+++ /dev/null
-## ------------------------------------------------------------------------
-##
-## SPDX-License-Identifier: LGPL-2.1-or-later
-## Copyright (C) 2022 - 2023 by the deal.II authors
-##
-## This file is part of the deal.II library.
-##
-## Part of the source code is dual licensed under Apache-2.0 WITH
-## LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-## governing the source code and code contributions can be found in
-## LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-##
-## ------------------------------------------------------------------------
-
-set(src_kokkos
- #tpls/desul/src/Lock_Array_CUDA.cpp
- #tpls/desul/src/Lock_Array_HIP.cpp
- core/src/impl/Kokkos_Profiling.cpp
- core/src/impl/Kokkos_NumericTraits.cpp
- core/src/impl/Kokkos_HostSpace_deepcopy.cpp
- core/src/impl/Kokkos_Error.cpp
- core/src/impl/Kokkos_Spinwait.cpp
- core/src/impl/Kokkos_MemoryPool.cpp
- core/src/impl/Kokkos_HostThreadTeam.cpp
- core/src/impl/Kokkos_hwloc.cpp
- core/src/impl/Kokkos_MemorySpace.cpp
- core/src/impl/Kokkos_HBWSpace.cpp
- core/src/impl/Kokkos_HostBarrier.cpp
- core/src/impl/Kokkos_Core.cpp
- core/src/impl/Kokkos_HostSpace.cpp
- core/src/impl/Kokkos_CPUDiscovery.cpp
- core/src/impl/Kokkos_Stacktrace.cpp
- core/src/impl/Kokkos_SharedAlloc.cpp
- core/src/impl/Kokkos_Command_Line_Parsing.cpp
- core/src/impl/Kokkos_ExecPolicy.cpp
- #core/src/OpenMPTarget/Kokkos_OpenMPTargetSpace.cpp
- #core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.cpp
- #core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.cpp
- #core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.cpp
- #core/src/OpenACC/Kokkos_OpenACCSpace.cpp
- #core/src/OpenACC/Kokkos_OpenACC.cpp
- #core/src/OpenACC/Kokkos_OpenACC_Instance.cpp
- #core/src/Cuda/Kokkos_Cuda_Instance.cpp
- #core/src/Cuda/Kokkos_Cuda_Task.cpp
- #core/src/Cuda/Kokkos_CudaSpace.cpp
- #core/src/Cuda/Kokkos_Cuda_Locks.cpp
- #core/src/HPX/Kokkos_HPX_Task.cpp
- #core/src/HPX/Kokkos_HPX.cpp
- core/src/dummy.cpp
- #core/src/Threads/Kokkos_ThreadsExec.cpp
- #core/src/HIP/Kokkos_HIP_Instance.cpp
- #core/src/HIP/Kokkos_HIP_Space.cpp
- #core/src/HIP/Kokkos_HIP_Locks.cpp
- #core/src/SYCL/Kokkos_SYCL_Space.cpp
- #core/src/SYCL/Kokkos_SYCL.cpp
- #core/src/SYCL/Kokkos_SYCL_Instance.cpp
- core/src/Serial/Kokkos_Serial.cpp
- core/src/Serial/Kokkos_Serial_Task.cpp
- #core/src/OpenMP/Kokkos_OpenMP_Instance.cpp
- #core/src/OpenMP/Kokkos_OpenMP_Task.cpp
- algorithms/src/KokkosAlgorithms_dummy.cpp
- simd/src/Kokkos_SIMD_dummy.cpp
- containers/src/impl/Kokkos_UnorderedMap_impl.cpp
-)
-
-enable_if_supported(DEAL_II_WARNING_FLAGS -Wno-float-conversion)
-enable_if_supported(DEAL_II_WARNING_FLAGS -Wno-missing-field-initializers)
-enable_if_supported(DEAL_II_WARNING_FLAGS -Wno-suggest-override)
-enable_if_supported(DEAL_II_WARNING_FLAGS -Wno-unused-but-set-parameter)
-
-include_directories(
- ${CMAKE_CURRENT_SOURCE_DIR}/algorithms/src
- ${CMAKE_CURRENT_SOURCE_DIR}/containers/src
- ${CMAKE_CURRENT_SOURCE_DIR}/core/src
- ${CMAKE_CURRENT_SOURCE_DIR}/simd/src
- ${CMAKE_CURRENT_SOURCE_DIR}/tpls/desul/include
- )
-
-define_object_library(bundled_kokkos OBJECT ${src_kokkos})
+++ /dev/null
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
+++ /dev/null
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Kokkos is licensed under 3-clause BSD terms of use:
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
+++ /dev/null
-void KOKKOS_ALGORITHMS_SRC_DUMMY_PREVENT_LINK_ERROR() {}
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SORT_HPP_
-#define KOKKOS_SORT_HPP_
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
-#endif
-
-#include <Kokkos_Core.hpp>
-
-#include <algorithm>
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <class DstViewType, class SrcViewType, int Rank = DstViewType::Rank>
-struct CopyOp;
-
-template <class DstViewType, class SrcViewType>
-struct CopyOp<DstViewType, SrcViewType, 1> {
- KOKKOS_INLINE_FUNCTION
- static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
- size_t i_src) {
- dst(i_dst) = src(i_src);
- }
-};
-
-template <class DstViewType, class SrcViewType>
-struct CopyOp<DstViewType, SrcViewType, 2> {
- KOKKOS_INLINE_FUNCTION
- static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
- size_t i_src) {
- for (int j = 0; j < (int)dst.extent(1); j++) dst(i_dst, j) = src(i_src, j);
- }
-};
-
-template <class DstViewType, class SrcViewType>
-struct CopyOp<DstViewType, SrcViewType, 3> {
- KOKKOS_INLINE_FUNCTION
- static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
- size_t i_src) {
- for (int j = 0; j < dst.extent(1); j++)
- for (int k = 0; k < dst.extent(2); k++)
- dst(i_dst, j, k) = src(i_src, j, k);
- }
-};
-} // namespace Impl
-
-//----------------------------------------------------------------------------
-
-template <class KeyViewType, class BinSortOp,
- class Space = typename KeyViewType::device_type,
- class SizeType = typename KeyViewType::memory_space::size_type>
-class BinSort {
- public:
- template <class DstViewType, class SrcViewType>
- struct copy_functor {
- using src_view_type = typename SrcViewType::const_type;
-
- using copy_op = Impl::CopyOp<DstViewType, src_view_type>;
-
- DstViewType dst_values;
- src_view_type src_values;
- int dst_offset;
-
- copy_functor(DstViewType const& dst_values_, int const& dst_offset_,
- SrcViewType const& src_values_)
- : dst_values(dst_values_),
- src_values(src_values_),
- dst_offset(dst_offset_) {}
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const int& i) const {
- copy_op::copy(dst_values, i + dst_offset, src_values, i);
- }
- };
-
- template <class DstViewType, class PermuteViewType, class SrcViewType>
- struct copy_permute_functor {
- // If a Kokkos::View then can generate constant random access
- // otherwise can only use the constant type.
-
- using src_view_type = std::conditional_t<
- Kokkos::is_view<SrcViewType>::value,
- Kokkos::View<typename SrcViewType::const_data_type,
- typename SrcViewType::array_layout,
- typename SrcViewType::device_type,
- Kokkos::MemoryTraits<Kokkos::RandomAccess> >,
- typename SrcViewType::const_type>;
-
- using perm_view_type = typename PermuteViewType::const_type;
-
- using copy_op = Impl::CopyOp<DstViewType, src_view_type>;
-
- DstViewType dst_values;
- perm_view_type sort_order;
- src_view_type src_values;
- int src_offset;
-
- copy_permute_functor(DstViewType const& dst_values_,
- PermuteViewType const& sort_order_,
- SrcViewType const& src_values_, int const& src_offset_)
- : dst_values(dst_values_),
- sort_order(sort_order_),
- src_values(src_values_),
- src_offset(src_offset_) {}
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const int& i) const {
- copy_op::copy(dst_values, i, src_values, src_offset + sort_order(i));
- }
- };
-
- // Naming this alias "execution_space" would be problematic since it would be
- // considered as execution space for the various functors which might use
- // another execution space through sort() or create_permute_vector().
- using exec_space = typename Space::execution_space;
- using bin_op_type = BinSortOp;
-
- struct bin_count_tag {};
- struct bin_offset_tag {};
- struct bin_binning_tag {};
- struct bin_sort_bins_tag {};
-
- public:
- using size_type = SizeType;
- using value_type = size_type;
-
- using offset_type = Kokkos::View<size_type*, Space>;
- using bin_count_type = Kokkos::View<const int*, Space>;
-
- using const_key_view_type = typename KeyViewType::const_type;
-
- // If a Kokkos::View then can generate constant random access
- // otherwise can only use the constant type.
-
- using const_rnd_key_view_type = std::conditional_t<
- Kokkos::is_view<KeyViewType>::value,
- Kokkos::View<typename KeyViewType::const_data_type,
- typename KeyViewType::array_layout,
- typename KeyViewType::device_type,
- Kokkos::MemoryTraits<Kokkos::RandomAccess> >,
- const_key_view_type>;
-
- using non_const_key_scalar = typename KeyViewType::non_const_value_type;
- using const_key_scalar = typename KeyViewType::const_value_type;
-
- using bin_count_atomic_type =
- Kokkos::View<int*, Space, Kokkos::MemoryTraits<Kokkos::Atomic> >;
-
- private:
- const_key_view_type keys;
- const_rnd_key_view_type keys_rnd;
-
- public:
- BinSortOp bin_op;
- offset_type bin_offsets;
- bin_count_atomic_type bin_count_atomic;
- bin_count_type bin_count_const;
- offset_type sort_order;
-
- int range_begin;
- int range_end;
- bool sort_within_bins;
-
- public:
- BinSort() = default;
-
- //----------------------------------------
- // Constructor: takes the keys, the binning_operator and optionally whether to
- // sort within bins (default false)
- template <typename ExecutionSpace>
- BinSort(const ExecutionSpace& exec, const_key_view_type keys_,
- int range_begin_, int range_end_, BinSortOp bin_op_,
- bool sort_within_bins_ = false)
- : keys(keys_),
- keys_rnd(keys_),
- bin_op(bin_op_),
- bin_offsets(),
- bin_count_atomic(),
- bin_count_const(),
- sort_order(),
- range_begin(range_begin_),
- range_end(range_end_),
- sort_within_bins(sort_within_bins_) {
- static_assert(
- Kokkos::SpaceAccessibility<ExecutionSpace,
- typename Space::memory_space>::accessible,
- "The provided execution space must be able to access the memory space "
- "BinSort was initialized with!");
- if (bin_op.max_bins() <= 0)
- Kokkos::abort(
- "The number of bins in the BinSortOp object must be greater than 0!");
- bin_count_atomic = Kokkos::View<int*, Space>(
- "Kokkos::SortImpl::BinSortFunctor::bin_count", bin_op.max_bins());
- bin_count_const = bin_count_atomic;
- bin_offsets =
- offset_type(view_alloc(exec, WithoutInitializing,
- "Kokkos::SortImpl::BinSortFunctor::bin_offsets"),
- bin_op.max_bins());
- sort_order =
- offset_type(view_alloc(exec, WithoutInitializing,
- "Kokkos::SortImpl::BinSortFunctor::sort_order"),
- range_end - range_begin);
- }
-
- BinSort(const_key_view_type keys_, int range_begin_, int range_end_,
- BinSortOp bin_op_, bool sort_within_bins_ = false)
- : BinSort(exec_space{}, keys_, range_begin_, range_end_, bin_op_,
- sort_within_bins_) {}
-
- template <typename ExecutionSpace>
- BinSort(const ExecutionSpace& exec, const_key_view_type keys_,
- BinSortOp bin_op_, bool sort_within_bins_ = false)
- : BinSort(exec, keys_, 0, keys_.extent(0), bin_op_, sort_within_bins_) {}
-
- BinSort(const_key_view_type keys_, BinSortOp bin_op_,
- bool sort_within_bins_ = false)
- : BinSort(exec_space{}, keys_, bin_op_, sort_within_bins_) {}
-
- //----------------------------------------
- // Create the permutation vector, the bin_offset array and the bin_count
- // array. Can be called again if keys changed
- template <class ExecutionSpace = exec_space>
- void create_permute_vector(const ExecutionSpace& exec = exec_space{}) {
- static_assert(
- Kokkos::SpaceAccessibility<ExecutionSpace,
- typename Space::memory_space>::accessible,
- "The provided execution space must be able to access the memory space "
- "BinSort was initialized with!");
-
- const size_t len = range_end - range_begin;
- Kokkos::parallel_for(
- "Kokkos::Sort::BinCount",
- Kokkos::RangePolicy<ExecutionSpace, bin_count_tag>(exec, 0, len),
- *this);
- Kokkos::parallel_scan("Kokkos::Sort::BinOffset",
- Kokkos::RangePolicy<ExecutionSpace, bin_offset_tag>(
- exec, 0, bin_op.max_bins()),
- *this);
-
- Kokkos::deep_copy(exec, bin_count_atomic, 0);
- Kokkos::parallel_for(
- "Kokkos::Sort::BinBinning",
- Kokkos::RangePolicy<ExecutionSpace, bin_binning_tag>(exec, 0, len),
- *this);
-
- if (sort_within_bins)
- Kokkos::parallel_for(
- "Kokkos::Sort::BinSort",
- Kokkos::RangePolicy<ExecutionSpace, bin_sort_bins_tag>(
- exec, 0, bin_op.max_bins()),
- *this);
- }
-
- // Sort a subset of a view with respect to the first dimension using the
- // permutation array
- template <class ExecutionSpace, class ValuesViewType>
- void sort(const ExecutionSpace& exec, ValuesViewType const& values,
- int values_range_begin, int values_range_end) const {
- static_assert(
- Kokkos::SpaceAccessibility<ExecutionSpace,
- typename Space::memory_space>::accessible,
- "The provided execution space must be able to access the memory space "
- "BinSort was initialized with!");
- static_assert(
- Kokkos::SpaceAccessibility<
- ExecutionSpace, typename ValuesViewType::memory_space>::accessible,
- "The provided execution space must be able to access the memory space "
- "of the View argument!");
-
- using scratch_view_type =
- Kokkos::View<typename ValuesViewType::data_type,
- typename ValuesViewType::array_layout,
- typename ValuesViewType::device_type>;
-
- const size_t len = range_end - range_begin;
- const size_t values_len = values_range_end - values_range_begin;
- if (len != values_len) {
- Kokkos::abort(
- "BinSort::sort: values range length != permutation vector length");
- }
-
- scratch_view_type sorted_values(
- view_alloc(exec, WithoutInitializing,
- "Kokkos::SortImpl::BinSortFunctor::sorted_values"),
- values.rank_dynamic > 0 ? len : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- values.rank_dynamic > 1 ? values.extent(1)
- : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- values.rank_dynamic > 2 ? values.extent(2)
- : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- values.rank_dynamic > 3 ? values.extent(3)
- : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- values.rank_dynamic > 4 ? values.extent(4)
- : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- values.rank_dynamic > 5 ? values.extent(5)
- : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- values.rank_dynamic > 6 ? values.extent(6)
- : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- values.rank_dynamic > 7 ? values.extent(7)
- : KOKKOS_IMPL_CTOR_DEFAULT_ARG);
-
- {
- copy_permute_functor<scratch_view_type /* DstViewType */
- ,
- offset_type /* PermuteViewType */
- ,
- ValuesViewType /* SrcViewType */
- >
- functor(sorted_values, sort_order, values,
- values_range_begin - range_begin);
-
- parallel_for("Kokkos::Sort::CopyPermute",
- Kokkos::RangePolicy<ExecutionSpace>(exec, 0, len), functor);
- }
-
- {
- copy_functor<ValuesViewType, scratch_view_type> functor(
- values, range_begin, sorted_values);
-
- parallel_for("Kokkos::Sort::Copy",
- Kokkos::RangePolicy<ExecutionSpace>(exec, 0, len), functor);
- }
- }
-
- // Sort a subset of a view with respect to the first dimension using the
- // permutation array
- template <class ValuesViewType>
- void sort(ValuesViewType const& values, int values_range_begin,
- int values_range_end) const {
- exec_space exec;
- sort(exec, values, values_range_begin, values_range_end);
- exec.fence("Kokkos::Sort: fence after sorting");
- }
-
- template <class ExecutionSpace, class ValuesViewType>
- void sort(ExecutionSpace const& exec, ValuesViewType const& values) const {
- this->sort(exec, values, 0, /*values.extent(0)*/ range_end - range_begin);
- }
-
- template <class ValuesViewType>
- void sort(ValuesViewType const& values) const {
- this->sort(values, 0, /*values.extent(0)*/ range_end - range_begin);
- }
-
- // Get the permutation vector
- KOKKOS_INLINE_FUNCTION
- offset_type get_permute_vector() const { return sort_order; }
-
- // Get the start offsets for each bin
- KOKKOS_INLINE_FUNCTION
- offset_type get_bin_offsets() const { return bin_offsets; }
-
- // Get the count for each bin
- KOKKOS_INLINE_FUNCTION
- bin_count_type get_bin_count() const { return bin_count_const; }
-
- public:
- KOKKOS_INLINE_FUNCTION
- void operator()(const bin_count_tag& /*tag*/, const int i) const {
- const int j = range_begin + i;
- bin_count_atomic(bin_op.bin(keys, j))++;
- }
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const bin_offset_tag& /*tag*/, const int i,
- value_type& offset, const bool& final) const {
- if (final) {
- bin_offsets(i) = offset;
- }
- offset += bin_count_const(i);
- }
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const bin_binning_tag& /*tag*/, const int i) const {
- const int j = range_begin + i;
- const int bin = bin_op.bin(keys, j);
- const int count = bin_count_atomic(bin)++;
-
- sort_order(bin_offsets(bin) + count) = j;
- }
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const bin_sort_bins_tag& /*tag*/, const int i) const {
- auto bin_size = bin_count_const(i);
- if (bin_size <= 1) return;
- int upper_bound = bin_offsets(i) + bin_size;
- bool sorted = false;
- while (!sorted) {
- sorted = true;
- int old_idx = sort_order(bin_offsets(i));
- int new_idx = 0;
- for (int k = bin_offsets(i) + 1; k < upper_bound; k++) {
- new_idx = sort_order(k);
-
- if (!bin_op(keys_rnd, old_idx, new_idx)) {
- sort_order(k - 1) = new_idx;
- sort_order(k) = old_idx;
- sorted = false;
- } else {
- old_idx = new_idx;
- }
- }
- upper_bound--;
- }
- }
-};
-
-//----------------------------------------------------------------------------
-
-template <class KeyViewType>
-struct BinOp1D {
- int max_bins_ = {};
- double mul_ = {};
- double min_ = {};
-
- BinOp1D() = default;
-
- // Construct BinOp with number of bins, minimum value and maximum value
- BinOp1D(int max_bins__, typename KeyViewType::const_value_type min,
- typename KeyViewType::const_value_type max)
- : max_bins_(max_bins__ + 1),
- // Cast to double to avoid possible overflow when using integer
- mul_(static_cast<double>(max_bins__) /
- (static_cast<double>(max) - static_cast<double>(min))),
- min_(static_cast<double>(min)) {
- // For integral types the number of bins may be larger than the range
- // in which case we can exactly have one unique value per bin
- // and then don't need to sort bins.
- if (std::is_integral<typename KeyViewType::const_value_type>::value &&
- (static_cast<double>(max) - static_cast<double>(min)) <=
- static_cast<double>(max_bins__)) {
- mul_ = 1.;
- }
- }
-
- // Determine bin index from key value
- template <class ViewType>
- KOKKOS_INLINE_FUNCTION int bin(ViewType& keys, const int& i) const {
- return static_cast<int>(mul_ * (static_cast<double>(keys(i)) - min_));
- }
-
- // Return maximum bin index + 1
- KOKKOS_INLINE_FUNCTION
- int max_bins() const { return max_bins_; }
-
- // Compare to keys within a bin if true new_val will be put before old_val
- template <class ViewType, typename iType1, typename iType2>
- KOKKOS_INLINE_FUNCTION bool operator()(ViewType& keys, iType1& i1,
- iType2& i2) const {
- return keys(i1) < keys(i2);
- }
-};
-
-template <class KeyViewType>
-struct BinOp3D {
- int max_bins_[3] = {};
- double mul_[3] = {};
- double min_[3] = {};
-
- BinOp3D() = default;
-
- BinOp3D(int max_bins__[], typename KeyViewType::const_value_type min[],
- typename KeyViewType::const_value_type max[]) {
- max_bins_[0] = max_bins__[0];
- max_bins_[1] = max_bins__[1];
- max_bins_[2] = max_bins__[2];
- mul_[0] = static_cast<double>(max_bins__[0]) /
- (static_cast<double>(max[0]) - static_cast<double>(min[0]));
- mul_[1] = static_cast<double>(max_bins__[1]) /
- (static_cast<double>(max[1]) - static_cast<double>(min[1]));
- mul_[2] = static_cast<double>(max_bins__[2]) /
- (static_cast<double>(max[2]) - static_cast<double>(min[2]));
- min_[0] = static_cast<double>(min[0]);
- min_[1] = static_cast<double>(min[1]);
- min_[2] = static_cast<double>(min[2]);
- }
-
- template <class ViewType>
- KOKKOS_INLINE_FUNCTION int bin(ViewType& keys, const int& i) const {
- return int((((int(mul_[0] * (keys(i, 0) - min_[0])) * max_bins_[1]) +
- int(mul_[1] * (keys(i, 1) - min_[1]))) *
- max_bins_[2]) +
- int(mul_[2] * (keys(i, 2) - min_[2])));
- }
-
- KOKKOS_INLINE_FUNCTION
- int max_bins() const { return max_bins_[0] * max_bins_[1] * max_bins_[2]; }
-
- template <class ViewType, typename iType1, typename iType2>
- KOKKOS_INLINE_FUNCTION bool operator()(ViewType& keys, iType1& i1,
- iType2& i2) const {
- if (keys(i1, 0) > keys(i2, 0))
- return true;
- else if (keys(i1, 0) == keys(i2, 0)) {
- if (keys(i1, 1) > keys(i2, 1))
- return true;
- else if (keys(i1, 1) == keys(i2, 1)) {
- if (keys(i1, 2) > keys(i2, 2)) return true;
- }
- }
- return false;
- }
-};
-
-namespace Impl {
-
-template <class ViewType, class ExecutionSpace>
-bool try_std_sort(ViewType view, const ExecutionSpace& exec) {
- bool possible = true;
- size_t stride[8] = {view.stride_0(), view.stride_1(), view.stride_2(),
- view.stride_3(), view.stride_4(), view.stride_5(),
- view.stride_6(), view.stride_7()};
- possible = possible &&
- SpaceAccessibility<HostSpace,
- typename ViewType::memory_space>::accessible;
- possible = possible && (ViewType::Rank == 1);
- possible = possible && (stride[0] == 1);
- if (possible) {
- exec.fence("Kokkos::sort: Fence before sorting on the host");
- std::sort(view.data(), view.data() + view.extent(0));
- }
- return possible;
-}
-
-template <class ViewType>
-struct min_max_functor {
- using minmax_scalar =
- Kokkos::MinMaxScalar<typename ViewType::non_const_value_type>;
-
- ViewType view;
- min_max_functor(const ViewType& view_) : view(view_) {}
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const size_t& i, minmax_scalar& minmax) const {
- if (view(i) < minmax.min_val) minmax.min_val = view(i);
- if (view(i) > minmax.max_val) minmax.max_val = view(i);
- }
-};
-
-} // namespace Impl
-
-template <class ExecutionSpace, class ViewType>
-std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value> sort(
- const ExecutionSpace& exec, ViewType const& view) {
- using CompType = BinOp1D<ViewType>;
-
- Kokkos::MinMaxScalar<typename ViewType::non_const_value_type> result;
- Kokkos::MinMax<typename ViewType::non_const_value_type> reducer(result);
- parallel_reduce("Kokkos::Sort::FindExtent",
- Kokkos::RangePolicy<typename ViewType::execution_space>(
- exec, 0, view.extent(0)),
- Impl::min_max_functor<ViewType>(view), reducer);
- if (result.min_val == result.max_val) return;
- // For integral types the number of bins may be larger than the range
- // in which case we can exactly have one unique value per bin
- // and then don't need to sort bins.
- bool sort_in_bins = true;
- // TODO: figure out better max_bins then this ...
- int64_t max_bins = view.extent(0) / 2;
- if (std::is_integral<typename ViewType::non_const_value_type>::value) {
- // Cast to double to avoid possible overflow when using integer
- auto const max_val = static_cast<double>(result.max_val);
- auto const min_val = static_cast<double>(result.min_val);
- // using 10M as the cutoff for special behavior (roughly 40MB for the count
- // array)
- if ((max_val - min_val) < 10000000) {
- max_bins = max_val - min_val + 1;
- sort_in_bins = false;
- }
- }
- if (std::is_floating_point<typename ViewType::non_const_value_type>::value) {
- KOKKOS_ASSERT(std::isfinite(static_cast<double>(result.max_val) -
- static_cast<double>(result.min_val)));
- }
-
- BinSort<ViewType, CompType> bin_sort(
- view, CompType(max_bins, result.min_val, result.max_val), sort_in_bins);
- bin_sort.create_permute_vector(exec);
- bin_sort.sort(exec, view);
-}
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class ExecutionSpace, class ViewType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload not taking bool always_use_kokkos_sort")
-std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value> sort(
- const ExecutionSpace& exec, ViewType const& view,
- bool const always_use_kokkos_sort) {
- if (!always_use_kokkos_sort && Impl::try_std_sort(view, exec)) {
- return;
- } else {
- sort(exec, view);
- }
-}
-#endif
-
-template <class ViewType>
-void sort(ViewType const& view) {
- typename ViewType::execution_space exec;
- sort(exec, view);
- exec.fence("Kokkos::Sort: fence after sorting");
-}
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class ViewType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload not taking bool always_use_kokkos_sort")
-void sort(ViewType const& view, bool const always_use_kokkos_sort) {
- typename ViewType::execution_space exec;
- sort(exec, view, always_use_kokkos_sort);
- exec.fence("Kokkos::Sort: fence after sorting");
-}
-#endif
-
-template <class ExecutionSpace, class ViewType>
-std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value> sort(
- const ExecutionSpace& exec, ViewType view, size_t const begin,
- size_t const end) {
- using range_policy = Kokkos::RangePolicy<typename ViewType::execution_space>;
- using CompType = BinOp1D<ViewType>;
-
- Kokkos::MinMaxScalar<typename ViewType::non_const_value_type> result;
- Kokkos::MinMax<typename ViewType::non_const_value_type> reducer(result);
-
- parallel_reduce("Kokkos::Sort::FindExtent", range_policy(exec, begin, end),
- Impl::min_max_functor<ViewType>(view), reducer);
-
- if (result.min_val == result.max_val) return;
-
- BinSort<ViewType, CompType> bin_sort(
- exec, view, begin, end,
- CompType((end - begin) / 2, result.min_val, result.max_val), true);
-
- bin_sort.create_permute_vector(exec);
- bin_sort.sort(exec, view, begin, end);
-}
-
-template <class ViewType>
-void sort(ViewType view, size_t const begin, size_t const end) {
- typename ViewType::execution_space exec;
- sort(exec, view, begin, end);
- exec.fence("Kokkos::Sort: fence after sorting");
-}
-
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_HPP
-#define KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_HPP
-
-#include "impl/Kokkos_AdjacentDifference.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType>
-std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
- OutputIteratorType>
-adjacent_difference(const ExecutionSpace& ex, InputIteratorType first_from,
- InputIteratorType last_from,
- OutputIteratorType first_dest) {
- using value_type1 = typename InputIteratorType::value_type;
- using value_type2 = typename OutputIteratorType::value_type;
- using binary_op =
- Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
- value_type2>;
- return Impl::adjacent_difference_impl(
- "Kokkos::adjacent_difference_iterator_api", ex, first_from, last_from,
- first_dest, binary_op());
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOp>
-std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
- OutputIteratorType>
-adjacent_difference(const ExecutionSpace& ex, InputIteratorType first_from,
- InputIteratorType last_from, OutputIteratorType first_dest,
- BinaryOp bin_op) {
- return Impl::adjacent_difference_impl(
- "Kokkos::adjacent_difference_iterator_api", ex, first_from, last_from,
- first_dest, bin_op);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType>
-std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
- OutputIteratorType>
-adjacent_difference(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first_from, InputIteratorType last_from,
- OutputIteratorType first_dest) {
- using value_type1 = typename InputIteratorType::value_type;
- using value_type2 = typename OutputIteratorType::value_type;
- using binary_op =
- Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
- value_type2>;
- return Impl::adjacent_difference_impl(label, ex, first_from, last_from,
- first_dest, binary_op());
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOp>
-std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
- OutputIteratorType>
-adjacent_difference(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first_from, InputIteratorType last_from,
- OutputIteratorType first_dest, BinaryOp bin_op) {
- return Impl::adjacent_difference_impl(label, ex, first_from, last_from,
- first_dest, bin_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto adjacent_difference(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
-
- using view_type1 = ::Kokkos::View<DataType1, Properties1...>;
- using view_type2 = ::Kokkos::View<DataType2, Properties2...>;
- using value_type1 = typename view_type1::value_type;
- using value_type2 = typename view_type2::value_type;
- using binary_op =
- Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
- value_type2>;
- return Impl::adjacent_difference_impl(
- "Kokkos::adjacent_difference_view_api", ex, KE::cbegin(view_from),
- KE::cend(view_from), KE::begin(view_dest), binary_op());
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOp>
-auto adjacent_difference(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOp bin_op) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- return Impl::adjacent_difference_impl(
- "Kokkos::adjacent_difference_view_api", ex, KE::cbegin(view_from),
- KE::cend(view_from), KE::begin(view_dest), bin_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto adjacent_difference(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
-
- using view_type1 = ::Kokkos::View<DataType1, Properties1...>;
- using view_type2 = ::Kokkos::View<DataType2, Properties2...>;
- using value_type1 = typename view_type1::value_type;
- using value_type2 = typename view_type2::value_type;
- using binary_op =
- Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
- value_type2>;
-
- return Impl::adjacent_difference_impl(label, ex, KE::cbegin(view_from),
- KE::cend(view_from),
- KE::begin(view_dest), binary_op());
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOp>
-auto adjacent_difference(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOp bin_op) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- return Impl::adjacent_difference_impl(label, ex, KE::cbegin(view_from),
- KE::cend(view_from),
- KE::begin(view_dest), bin_op);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_HPP
-#define KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_HPP
-
-#include "impl/Kokkos_AdjacentFind.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set1
-template <class ExecutionSpace, class IteratorType>
-IteratorType adjacent_find(const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- return Impl::adjacent_find_impl("Kokkos::adjacent_find_iterator_api_default",
- ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType adjacent_find(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
- return Impl::adjacent_find_impl(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto adjacent_find(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- namespace KE = ::Kokkos::Experimental;
- return Impl::adjacent_find_impl("Kokkos::adjacent_find_view_api_default", ex,
- KE::begin(v), KE::end(v));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto adjacent_find(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- namespace KE = ::Kokkos::Experimental;
- return Impl::adjacent_find_impl(label, ex, KE::begin(v), KE::end(v));
-}
-
-// overload set2
-template <class ExecutionSpace, class IteratorType, class BinaryPredicateType>
-IteratorType adjacent_find(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, BinaryPredicateType pred) {
- return Impl::adjacent_find_impl("Kokkos::adjacent_find_iterator_api_default",
- ex, first, last, pred);
-}
-
-template <class ExecutionSpace, class IteratorType, class BinaryPredicateType>
-IteratorType adjacent_find(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- BinaryPredicateType pred) {
- return Impl::adjacent_find_impl(label, ex, first, last, pred);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class BinaryPredicateType>
-auto adjacent_find(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- BinaryPredicateType pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- namespace KE = ::Kokkos::Experimental;
- return Impl::adjacent_find_impl("Kokkos::adjacent_find_view_api_default", ex,
- KE::begin(v), KE::end(v), pred);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class BinaryPredicateType>
-auto adjacent_find(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- BinaryPredicateType pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- namespace KE = ::Kokkos::Experimental;
- return Impl::adjacent_find_impl(label, ex, KE::begin(v), KE::end(v), pred);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ALL_OF_HPP
-#define KOKKOS_STD_ALGORITHMS_ALL_OF_HPP
-
-#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class Predicate>
-bool all_of(const ExecutionSpace& ex, InputIterator first, InputIterator last,
- Predicate predicate) {
- return Impl::all_of_impl("Kokkos::all_of_iterator_api_default", ex, first,
- last, predicate);
-}
-
-template <class ExecutionSpace, class InputIterator, class Predicate>
-bool all_of(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last, Predicate predicate) {
- return Impl::all_of_impl(label, ex, first, last, predicate);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-bool all_of(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::all_of_impl("Kokkos::all_of_view_api_default", ex, KE::cbegin(v),
- KE::cend(v), std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-bool all_of(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::all_of_impl(label, ex, KE::cbegin(v), KE::cend(v),
- std::move(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ANY_OF_HPP
-#define KOKKOS_STD_ALGORITHMS_ANY_OF_HPP
-
-#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class Predicate>
-bool any_of(const ExecutionSpace& ex, InputIterator first, InputIterator last,
- Predicate predicate) {
- return Impl::any_of_impl("Kokkos::any_of_view_api_default", ex, first, last,
- predicate);
-}
-
-template <class ExecutionSpace, class InputIterator, class Predicate>
-bool any_of(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last, Predicate predicate) {
- return Impl::any_of_impl(label, ex, first, last, predicate);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-bool any_of(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::any_of_impl("Kokkos::any_of_view_api_default", ex, KE::cbegin(v),
- KE::cend(v), std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-bool any_of(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::any_of_impl(label, ex, KE::cbegin(v), KE::cend(v),
- std::move(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COPY_HPP
-#define KOKKOS_STD_ALGORITHMS_COPY_HPP
-
-#include "impl/Kokkos_CopyCopyN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator copy(const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first) {
- return Impl::copy_impl("Kokkos::copy_iterator_api_default", ex, first, last,
- d_first);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator copy(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first) {
- return Impl::copy_impl(label, ex, first, last, d_first);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto copy(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::copy_impl("Kokkos::copy_view_api_default", ex,
- KE::cbegin(source), KE::cend(source), KE::begin(dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto copy(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::copy_impl(label, ex, KE::cbegin(source), KE::cend(source),
- KE::begin(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_HPP
-#define KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_HPP
-
-#include "impl/Kokkos_CopyBackward.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 copy_backward(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 d_last) {
- return Impl::copy_backward_impl("Kokkos::copy_backward_iterator_api_default",
- ex, first, last, d_last);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 copy_backward(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 d_last) {
- return Impl::copy_backward_impl(label, ex, first, last, d_last);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto copy_backward(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::copy_backward_impl("Kokkos::copy_backward_view_api_default", ex,
- cbegin(source), cend(source), end(dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto copy_backward(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::copy_backward_impl(label, ex, cbegin(source), cend(source),
- end(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COPY_IF_HPP
-#define KOKKOS_STD_ALGORITHMS_COPY_IF_HPP
-
-#include "impl/Kokkos_CopyIf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class Predicate>
-OutputIterator copy_if(const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first,
- Predicate pred) {
- return Impl::copy_if_impl("Kokkos::copy_if_iterator_api_default", ex, first,
- last, d_first, std::move(pred));
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class Predicate>
-OutputIterator copy_if(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first, Predicate pred) {
- return Impl::copy_if_impl(label, ex, first, last, d_first, std::move(pred));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class Predicate>
-auto copy_if(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest, Predicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::copy_if_impl("Kokkos::copy_if_view_api_default", ex,
- cbegin(source), cend(source), begin(dest),
- std::move(pred));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class Predicate>
-auto copy_if(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest, Predicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::copy_if_impl(label, ex, cbegin(source), cend(source),
- begin(dest), std::move(pred));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COPY_N_HPP
-#define KOKKOS_STD_ALGORITHMS_COPY_N_HPP
-
-#include "impl/Kokkos_CopyCopyN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class Size,
- class OutputIterator>
-OutputIterator copy_n(const ExecutionSpace& ex, InputIterator first, Size count,
- OutputIterator result) {
- return Impl::copy_n_impl("Kokkos::copy_n_iterator_api_default", ex, first,
- count, result);
-}
-
-template <class ExecutionSpace, class InputIterator, class Size,
- class OutputIterator>
-OutputIterator copy_n(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, Size count, OutputIterator result) {
- return Impl::copy_n_impl(label, ex, first, count, result);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class Size, class DataType2, class... Properties2>
-auto copy_n(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source, Size count,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::copy_n_impl("Kokkos::copy_n_view_api_default", ex,
- KE::cbegin(source), count, KE::begin(dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class Size, class DataType2, class... Properties2>
-auto copy_n(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source, Size count,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::copy_n_impl(label, ex, KE::cbegin(source), count,
- KE::begin(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COUNT_HPP
-#define KOKKOS_STD_ALGORITHMS_COUNT_HPP
-
-#include "impl/Kokkos_CountCountIf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class T>
-typename IteratorType::difference_type count(const ExecutionSpace& ex,
- IteratorType first,
- IteratorType last,
- const T& value) {
- return Impl::count_impl("Kokkos::count_iterator_api_default", ex, first, last,
- value);
-}
-
-template <class ExecutionSpace, class IteratorType, class T>
-typename IteratorType::difference_type count(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType first,
- IteratorType last,
- const T& value) {
- return Impl::count_impl(label, ex, first, last, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class T>
-auto count(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v, const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::count_impl("Kokkos::count_view_api_default", ex, KE::cbegin(v),
- KE::cend(v), value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class T>
-auto count(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v, const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::count_impl(label, ex, KE::cbegin(v), KE::cend(v), value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COUNT_IF_HPP
-#define KOKKOS_STD_ALGORITHMS_COUNT_IF_HPP
-
-#include "impl/Kokkos_CountCountIf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-typename IteratorType::difference_type count_if(const ExecutionSpace& ex,
- IteratorType first,
- IteratorType last,
- Predicate predicate) {
- return Impl::count_if_impl("Kokkos::count_if_iterator_api_default", ex, first,
- last, std::move(predicate));
-}
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-typename IteratorType::difference_type count_if(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType first,
- IteratorType last,
- Predicate predicate) {
- return Impl::count_if_impl(label, ex, first, last, std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-auto count_if(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::count_if_impl("Kokkos::count_if_view_api_default", ex,
- KE::cbegin(v), KE::cend(v), std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-auto count_if(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::count_if_impl(label, ex, KE::cbegin(v), KE::cend(v),
- std::move(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_DISTANCE_HPP
-#define KOKKOS_STD_ALGORITHMS_DISTANCE_HPP
-
-#include "impl/Kokkos_Constraints.hpp"
-#include "impl/Kokkos_RandomAccessIterator.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class IteratorType>
-KOKKOS_INLINE_FUNCTION constexpr typename IteratorType::difference_type
-distance(IteratorType first, IteratorType last) {
- static_assert(
- ::Kokkos::Experimental::Impl::are_random_access_iterators<
- IteratorType>::value,
- "Kokkos::Experimental::distance: only implemented for random access "
- "iterators.");
-
- return last - first;
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_EQUAL_HPP
-#define KOKKOS_STD_ALGORITHMS_EQUAL_HPP
-
-#include "impl/Kokkos_Equal.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2) {
- return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
- last1, first2);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2) {
- return Impl::equal_impl(label, ex, first1, last1, first2);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, BinaryPredicateType predicate) {
- return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
- last1, first2, std::move(predicate));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2,
- BinaryPredicateType predicate) {
- return Impl::equal_impl(label, ex, first1, last1, first2,
- std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-bool equal(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::equal_impl("Kokkos::equal_view_api_default", ex,
- KE::cbegin(view1), KE::cend(view1),
- KE::cbegin(view2));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-bool equal(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::equal_impl(label, ex, KE::cbegin(view1), KE::cend(view1),
- KE::cbegin(view2));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-bool equal(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2,
- BinaryPredicateType predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::equal_impl("Kokkos::equal_view_api_default", ex,
- KE::cbegin(view1), KE::cend(view1), KE::cbegin(view2),
- std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-bool equal(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2,
- BinaryPredicateType predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::equal_impl(label, ex, KE::cbegin(view1), KE::cend(view1),
- KE::cbegin(view2), std::move(predicate));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, IteratorType2 last2) {
- return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
- last1, first2, last2);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
- return Impl::equal_impl(label, ex, first1, last1, first2, last2);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, IteratorType2 last2,
- BinaryPredicateType predicate) {
- return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
- last1, first2, last2, std::move(predicate));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- IteratorType1, IteratorType2>::value,
- bool>
-equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
- BinaryPredicateType predicate) {
- return Impl::equal_impl(label, ex, first1, last1, first2, last2,
- std::move(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_HPP
-#define KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_HPP
-
-#include "impl/Kokkos_ExclusiveScan.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set 1
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-exclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest,
- ValueType init_value) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- return Impl::exclusive_scan_default_op_impl(
- "Kokkos::exclusive_scan_default_functors_iterator_api", ex, first, last,
- first_dest, init_value);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-exclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest, ValueType init_value) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- return Impl::exclusive_scan_default_op_impl(label, ex, first, last,
- first_dest, init_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-auto exclusive_scan(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- ValueType init_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- namespace KE = ::Kokkos::Experimental;
- return Impl::exclusive_scan_default_op_impl(
- "Kokkos::exclusive_scan_default_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
- init_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-auto exclusive_scan(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- ValueType init_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- namespace KE = ::Kokkos::Experimental;
- return Impl::exclusive_scan_default_op_impl(label, ex, KE::cbegin(view_from),
- KE::cend(view_from),
- KE::begin(view_dest), init_value);
-}
-
-// overload set 2
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType, class BinaryOpType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-exclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest,
- ValueType init_value, BinaryOpType bop) {
- Impl::static_assert_is_not_openmptarget(ex);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- return Impl::exclusive_scan_custom_op_impl(
- "Kokkos::exclusive_scan_custom_functors_iterator_api", ex, first, last,
- first_dest, init_value, bop);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType, class BinaryOpType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-exclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest, ValueType init_value,
- BinaryOpType bop) {
- Impl::static_assert_is_not_openmptarget(ex);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- return Impl::exclusive_scan_custom_op_impl(label, ex, first, last, first_dest,
- init_value, bop);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType,
- class BinaryOpType>
-auto exclusive_scan(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- ValueType init_value, BinaryOpType bop) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- namespace KE = ::Kokkos::Experimental;
- return Impl::exclusive_scan_custom_op_impl(
- "Kokkos::exclusive_scan_custom_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
- init_value, bop);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType,
- class BinaryOpType>
-auto exclusive_scan(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- ValueType init_value, BinaryOpType bop) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- namespace KE = ::Kokkos::Experimental;
- return Impl::exclusive_scan_custom_op_impl(
- label, ex, KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), init_value, bop);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FILL_HPP
-#define KOKKOS_STD_ALGORITHMS_FILL_HPP
-
-#include "impl/Kokkos_FillFillN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class T>
-void fill(const ExecutionSpace& ex, IteratorType first, IteratorType last,
- const T& value) {
- Impl::fill_impl("Kokkos::fill_iterator_api_default", ex, first, last, value);
-}
-
-template <class ExecutionSpace, class IteratorType, class T>
-void fill(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, const T& value) {
- Impl::fill_impl(label, ex, first, last, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class T>
-void fill(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- Impl::fill_impl("Kokkos::fill_view_api_default", ex, begin(view), end(view),
- value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class T>
-void fill(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- Impl::fill_impl(label, ex, begin(view), end(view), value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FILL_N_HPP
-#define KOKKOS_STD_ALGORITHMS_FILL_N_HPP
-
-#include "impl/Kokkos_FillFillN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class SizeType, class T>
-IteratorType fill_n(const ExecutionSpace& ex, IteratorType first, SizeType n,
- const T& value) {
- return Impl::fill_n_impl("Kokkos::fill_n_iterator_api_default", ex, first, n,
- value);
-}
-
-template <class ExecutionSpace, class IteratorType, class SizeType, class T>
-IteratorType fill_n(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, SizeType n, const T& value) {
- return Impl::fill_n_impl(label, ex, first, n, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class T>
-auto fill_n(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, SizeType n,
- const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::fill_n_impl("Kokkos::fill_n_view_api_default", ex, begin(view),
- n, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class T>
-auto fill_n(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, SizeType n,
- const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::fill_n_impl(label, ex, begin(view), n, value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FIND_HPP
-#define KOKKOS_STD_ALGORITHMS_FIND_HPP
-
-#include "impl/Kokkos_FindIfOrNot.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class T>
-InputIterator find(const ExecutionSpace& ex, InputIterator first,
- InputIterator last, const T& value) {
- return Impl::find_impl("Kokkos::find_iterator_api_default", ex, first, last,
- value);
-}
-
-template <class ExecutionSpace, class InputIterator, class T>
-InputIterator find(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last, const T& value) {
- return Impl::find_impl(label, ex, first, last, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class T>
-auto find(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_impl("Kokkos::find_view_api_default", ex, KE::begin(view),
- KE::end(view), value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class T>
-auto find(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_impl(label, ex, KE::begin(view), KE::end(view), value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FIND_END_HPP
-#define KOKKOS_STD_ALGORITHMS_FIND_END_HPP
-
-#include "impl/Kokkos_FindEnd.hpp"
-#include "Kokkos_Equal.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set 1: no binary predicate passed
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 find_end(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last) {
- return Impl::find_end_impl("Kokkos::find_end_iterator_api_default", ex, first,
- last, s_first, s_last);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 find_end(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last) {
- return Impl::find_end_impl(label, ex, first, last, s_first, s_last);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto find_end(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_end_impl("Kokkos::find_end_view_api_default", ex,
- KE::begin(view), KE::end(view), KE::begin(s_view),
- KE::end(s_view));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto find_end(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_end_impl(label, ex, KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view));
-}
-
-// overload set 2: binary predicate passed
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-IteratorType1 find_end(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last, const BinaryPredicateType& pred) {
- return Impl::find_end_impl("Kokkos::find_end_iterator_api_default", ex, first,
- last, s_first, s_last, pred);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-IteratorType1 find_end(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last,
- const BinaryPredicateType& pred) {
- return Impl::find_end_impl(label, ex, first, last, s_first, s_last, pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto find_end(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_end_impl("Kokkos::find_end_view_api_default", ex,
- KE::begin(view), KE::end(view), KE::begin(s_view),
- KE::end(s_view), pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto find_end(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_end_impl(label, ex, KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view), pred);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_HPP
-#define KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_HPP
-
-#include "impl/Kokkos_FindFirstOf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set 1: no binary predicate passed
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 find_first_of(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last) {
- return Impl::find_first_of_impl("Kokkos::find_first_of_iterator_api_default",
- ex, first, last, s_first, s_last);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 find_first_of(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last) {
- return Impl::find_first_of_impl(label, ex, first, last, s_first, s_last);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto find_first_of(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_first_of_impl("Kokkos::find_first_of_view_api_default", ex,
- KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto find_first_of(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_first_of_impl(label, ex, KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view));
-}
-
-// overload set 2: binary predicate passed
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-IteratorType1 find_first_of(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last,
- const BinaryPredicateType& pred) {
- return Impl::find_first_of_impl("Kokkos::find_first_of_iterator_api_default",
- ex, first, last, s_first, s_last, pred);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-IteratorType1 find_first_of(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last,
- const BinaryPredicateType& pred) {
- return Impl::find_first_of_impl(label, ex, first, last, s_first, s_last,
- pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto find_first_of(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_first_of_impl("Kokkos::find_first_of_view_api_default", ex,
- KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view), pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto find_first_of(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_first_of_impl(label, ex, KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view), pred);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_HPP
-#define KOKKOS_STD_ALGORITHMS_FIND_IF_HPP
-
-#include "impl/Kokkos_FindIfOrNot.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class PredicateType>
-IteratorType find_if(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, PredicateType predicate) {
- return Impl::find_if_or_not_impl<true>("Kokkos::find_if_iterator_api_default",
- ex, first, last, std::move(predicate));
-}
-
-template <class ExecutionSpace, class IteratorType, class PredicateType>
-IteratorType find_if(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- PredicateType predicate) {
- return Impl::find_if_or_not_impl<true>(label, ex, first, last,
- std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-auto find_if(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_if_or_not_impl<true>("Kokkos::find_if_view_api_default", ex,
- KE::begin(v), KE::end(v),
- std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-auto find_if(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_if_or_not_impl<true>(label, ex, KE::begin(v), KE::end(v),
- std::move(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_NOT_HPP
-#define KOKKOS_STD_ALGORITHMS_FIND_IF_NOT_HPP
-
-#include "impl/Kokkos_FindIfOrNot.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-IteratorType find_if_not(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, Predicate predicate) {
- return Impl::find_if_or_not_impl<false>(
- "Kokkos::find_if_not_iterator_api_default", ex, first, last,
- std::move(predicate));
-}
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-IteratorType find_if_not(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- Predicate predicate) {
- return Impl::find_if_or_not_impl<false>(label, ex, first, last,
- std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-auto find_if_not(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_if_or_not_impl<false>(
- "Kokkos::find_if_not_view_api_default", ex, KE::begin(v), KE::end(v),
- std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-auto find_if_not(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::find_if_or_not_impl<false>(label, ex, KE::begin(v), KE::end(v),
- std::move(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_HPP
-#define KOKKOS_STD_ALGORITHMS_FOR_EACH_HPP
-
-#include "impl/Kokkos_ForEachForEachN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class UnaryFunctorType>
-UnaryFunctorType for_each(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- UnaryFunctorType functor) {
- return Impl::for_each_impl(label, ex, first, last, std::move(functor));
-}
-
-template <class ExecutionSpace, class IteratorType, class UnaryFunctorType>
-UnaryFunctorType for_each(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, UnaryFunctorType functor) {
- return Impl::for_each_impl("Kokkos::for_each_iterator_api_default", ex, first,
- last, std::move(functor));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class UnaryFunctorType>
-UnaryFunctorType for_each(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- UnaryFunctorType functor) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::for_each_impl(label, ex, KE::begin(v), KE::end(v),
- std::move(functor));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class UnaryFunctorType>
-UnaryFunctorType for_each(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- UnaryFunctorType functor) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::for_each_impl("Kokkos::for_each_view_api_default", ex,
- KE::begin(v), KE::end(v), std::move(functor));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_N_HPP
-#define KOKKOS_STD_ALGORITHMS_FOR_EACH_N_HPP
-
-#include "impl/Kokkos_ForEachForEachN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class SizeType,
- class UnaryFunctorType>
-IteratorType for_each_n(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, SizeType n,
- UnaryFunctorType functor) {
- return Impl::for_each_n_impl(label, ex, first, n, std::move(functor));
-}
-
-template <class ExecutionSpace, class IteratorType, class SizeType,
- class UnaryFunctorType>
-IteratorType for_each_n(const ExecutionSpace& ex, IteratorType first,
- SizeType n, UnaryFunctorType functor) {
- return Impl::for_each_n_impl("Kokkos::for_each_n_iterator_api_default", ex,
- first, n, std::move(functor));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class UnaryFunctorType>
-auto for_each_n(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v, SizeType n,
- UnaryFunctorType functor) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::for_each_n_impl(label, ex, KE::begin(v), n, std::move(functor));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class UnaryFunctorType>
-auto for_each_n(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v, SizeType n,
- UnaryFunctorType functor) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::for_each_n_impl("Kokkos::for_each_n_view_api_default", ex,
- KE::begin(v), n, std::move(functor));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_HPP
-#define KOKKOS_STD_ALGORITHMS_GENERATE_HPP
-
-#include "impl/Kokkos_GenerateGenerateN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class Generator>
-void generate(const ExecutionSpace& ex, IteratorType first, IteratorType last,
- Generator g) {
- Impl::generate_impl("Kokkos::generate_iterator_api_default", ex, first, last,
- std::move(g));
-}
-
-template <class ExecutionSpace, class IteratorType, class Generator>
-void generate(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, Generator g) {
- Impl::generate_impl(label, ex, first, last, std::move(g));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Generator>
-void generate(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- Generator g) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- Impl::generate_impl("Kokkos::generate_view_api_default", ex, begin(view),
- end(view), std::move(g));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Generator>
-void generate(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- Generator g) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- Impl::generate_impl(label, ex, begin(view), end(view), std::move(g));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_N_HPP
-#define KOKKOS_STD_ALGORITHMS_GENERATE_N_HPP
-
-#include "impl/Kokkos_GenerateGenerateN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class Size, class Generator>
-IteratorType generate_n(const ExecutionSpace& ex, IteratorType first,
- Size count, Generator g) {
- Impl::generate_n_impl("Kokkos::generate_n_iterator_api_default", ex, first,
- count, std::move(g));
- return first + count;
-}
-
-template <class ExecutionSpace, class IteratorType, class Size, class Generator>
-IteratorType generate_n(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, Size count, Generator g) {
- Impl::generate_n_impl(label, ex, first, count, std::move(g));
- return first + count;
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class Size,
- class Generator>
-auto generate_n(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, Size count,
- Generator g) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::generate_n_impl("Kokkos::generate_n_view_api_default", ex,
- begin(view), count, std::move(g));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties, class Size,
- class Generator>
-auto generate_n(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view, Size count,
- Generator g) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::generate_n_impl(label, ex, begin(view), count, std::move(g));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_HPP
-#define KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_HPP
-
-#include "impl/Kokkos_InclusiveScan.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set 1
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest) {
- return Impl::inclusive_scan_default_op_impl(
- "Kokkos::inclusive_scan_default_functors_iterator_api", ex, first, last,
- first_dest);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-inclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest) {
- return Impl::inclusive_scan_default_op_impl(label, ex, first, last,
- first_dest);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto inclusive_scan(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::inclusive_scan_default_op_impl(
- "Kokkos::inclusive_scan_default_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto inclusive_scan(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::inclusive_scan_default_op_impl(label, ex, KE::cbegin(view_from),
- KE::cend(view_from),
- KE::begin(view_dest));
-}
-
-// overload set 2 (accepting custom binary op)
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOp>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest,
- BinaryOp binary_op) {
- return Impl::inclusive_scan_custom_binary_op_impl(
- "Kokkos::inclusive_scan_custom_functors_iterator_api", ex, first, last,
- first_dest, binary_op);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOp>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-inclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest, BinaryOp binary_op) {
- return Impl::inclusive_scan_custom_binary_op_impl(label, ex, first, last,
- first_dest, binary_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOp>
-auto inclusive_scan(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOp binary_op) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::inclusive_scan_custom_binary_op_impl(
- "Kokkos::inclusive_scan_custom_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
- binary_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOp>
-auto inclusive_scan(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOp binary_op) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::inclusive_scan_custom_binary_op_impl(
- label, ex, KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), binary_op);
-}
-
-// overload set 3
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOp, class ValueType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest,
- BinaryOp binary_op, ValueType init_value) {
- return Impl::inclusive_scan_custom_binary_op_impl(
- "Kokkos::inclusive_scan_custom_functors_iterator_api", ex, first, last,
- first_dest, binary_op, init_value);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOp, class ValueType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-inclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest, BinaryOp binary_op,
- ValueType init_value) {
- return Impl::inclusive_scan_custom_binary_op_impl(
- label, ex, first, last, first_dest, binary_op, init_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOp,
- class ValueType>
-auto inclusive_scan(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOp binary_op, ValueType init_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::inclusive_scan_custom_binary_op_impl(
- "Kokkos::inclusive_scan_custom_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
- binary_op, init_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOp,
- class ValueType>
-auto inclusive_scan(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOp binary_op, ValueType init_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::inclusive_scan_custom_binary_op_impl(
- label, ex, KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), binary_op, init_value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_HPP
-#define KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_HPP
-
-#include "impl/Kokkos_IsPartitioned.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class PredicateType>
-bool is_partitioned(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, PredicateType p) {
- return Impl::is_partitioned_impl(
- "Kokkos::is_partitioned_iterator_api_default", ex, first, last,
- std::move(p));
-}
-
-template <class ExecutionSpace, class IteratorType, class PredicateType>
-bool is_partitioned(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, PredicateType p) {
- return Impl::is_partitioned_impl(label, ex, first, last, std::move(p));
-}
-
-template <class ExecutionSpace, class PredicateType, class DataType,
- class... Properties>
-bool is_partitioned(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- PredicateType p) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::is_partitioned_impl("Kokkos::is_partitioned_view_api_default",
- ex, cbegin(v), cend(v), std::move(p));
-}
-
-template <class ExecutionSpace, class PredicateType, class DataType,
- class... Properties>
-bool is_partitioned(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- PredicateType p) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::is_partitioned_impl(label, ex, cbegin(v), cend(v), std::move(p));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_HPP
-#define KOKKOS_STD_ALGORITHMS_IS_SORTED_HPP
-
-#include "impl/Kokkos_IsSorted.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-bool is_sorted(const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- return Impl::is_sorted_impl("Kokkos::is_sorted_iterator_api_default", ex,
- first, last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-bool is_sorted(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
- return Impl::is_sorted_impl(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-bool is_sorted(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_impl("Kokkos::is_sorted_view_api_default", ex,
- KE::cbegin(view), KE::cend(view));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-bool is_sorted(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_impl(label, ex, KE::cbegin(view), KE::cend(view));
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-bool is_sorted(const ExecutionSpace& ex, IteratorType first, IteratorType last,
- ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
- return Impl::is_sorted_impl("Kokkos::is_sorted_iterator_api_default", ex,
- first, last, std::move(comp));
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-bool is_sorted(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
- return Impl::is_sorted_impl(label, ex, first, last, std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ComparatorType>
-bool is_sorted(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_not_openmptarget(ex);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_impl("Kokkos::is_sorted_view_api_default", ex,
- KE::cbegin(view), KE::cend(view),
- std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ComparatorType>
-bool is_sorted(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_not_openmptarget(ex);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_impl(label, ex, KE::cbegin(view), KE::cend(view),
- std::move(comp));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_HPP
-#define KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_HPP
-
-#include "impl/Kokkos_IsSortedUntil.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType is_sorted_until(const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- return Impl::is_sorted_until_impl(
- "Kokkos::is_sorted_until_iterator_api_default", ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType is_sorted_until(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
- return Impl::is_sorted_until_impl(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto is_sorted_until(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_until_impl("Kokkos::is_sorted_until_view_api_default",
- ex, KE::begin(view), KE::end(view));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto is_sorted_until(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_until_impl(label, ex, KE::begin(view), KE::end(view));
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-IteratorType is_sorted_until(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
- return Impl::is_sorted_until_impl(
- "Kokkos::is_sorted_until_iterator_api_default", ex, first, last,
- std::move(comp));
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-IteratorType is_sorted_until(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::is_sorted_until_impl(label, ex, first, last, std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ComparatorType>
-auto is_sorted_until(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_not_openmptarget(ex);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_until_impl("Kokkos::is_sorted_until_view_api_default",
- ex, KE::begin(view), KE::end(view),
- std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ComparatorType>
-auto is_sorted_until(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_not_openmptarget(ex);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::is_sorted_until_impl(label, ex, KE::begin(view), KE::end(view),
- std::move(comp));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ITER_SWAP_HPP
-#define KOKKOS_STD_ALGORITHMS_ITER_SWAP_HPP
-
-#include <Kokkos_Core.hpp>
-#include "impl/Kokkos_Constraints.hpp"
-#include "Kokkos_Swap.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IteratorType1, class IteratorType2>
-struct StdIterSwapFunctor {
- IteratorType1 m_a;
- IteratorType2 m_b;
-
- KOKKOS_FUNCTION
- void operator()(int i) const {
- (void)i;
- ::Kokkos::Experimental::swap(*m_a, *m_b);
- }
-
- KOKKOS_FUNCTION
- StdIterSwapFunctor(IteratorType1 _a, IteratorType2 _b)
- : m_a(std::move(_a)), m_b(std::move(_b)) {}
-};
-
-template <class IteratorType1, class IteratorType2>
-void iter_swap_impl(IteratorType1 a, IteratorType2 b) {
- // is there a better way to do this maybe?
- ::Kokkos::parallel_for(
- 1, StdIterSwapFunctor<IteratorType1, IteratorType2>(a, b));
- Kokkos::DefaultExecutionSpace().fence(
- "Kokkos::iter_swap: fence after operation");
-}
-} // namespace Impl
-//----------------------------------------------------------------------------
-
-// iter_swap
-template <class IteratorType1, class IteratorType2>
-void iter_swap(IteratorType1 a, IteratorType2 b) {
- Impl::iter_swap_impl(a, b);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_HPP
-#define KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_HPP
-
-#include "impl/Kokkos_LexicographicalCompare.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-bool lexicographical_compare(const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2,
- IteratorType2 last2) {
- return Impl::lexicographical_compare_impl(
- "Kokkos::lexicographical_compare_iterator_api_default", ex, first1, last1,
- first2, last2);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-bool lexicographical_compare(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, IteratorType2 last2) {
- return Impl::lexicographical_compare_impl(label, ex, first1, last1, first2,
- last2);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-bool lexicographical_compare(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::lexicographical_compare_impl(
- "Kokkos::lexicographical_compare_view_api_default", ex, KE::cbegin(view1),
- KE::cend(view1), KE::cbegin(view2), KE::cend(view2));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-bool lexicographical_compare(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::lexicographical_compare_impl(label, ex, KE::cbegin(view1),
- KE::cend(view1), KE::cbegin(view2),
- KE::cend(view2));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class ComparatorType>
-bool lexicographical_compare(const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2,
- IteratorType2 last2, ComparatorType comp) {
- return Impl::lexicographical_compare_impl(
- "Kokkos::lexicographical_compare_iterator_api_default", ex, first1, last1,
- first2, last2, comp);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class ComparatorType>
-bool lexicographical_compare(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, IteratorType2 last2,
- ComparatorType comp) {
- return Impl::lexicographical_compare_impl(label, ex, first1, last1, first2,
- last2, comp);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ComparatorType>
-bool lexicographical_compare(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2, ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::lexicographical_compare_impl(
- "Kokkos::lexicographical_compare_view_api_default", ex, KE::cbegin(view1),
- KE::cend(view1), KE::cbegin(view2), KE::cend(view2), comp);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ComparatorType>
-bool lexicographical_compare(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- ::Kokkos::View<DataType2, Properties2...>& view2, ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::lexicographical_compare_impl(label, ex, KE::cbegin(view1),
- KE::cend(view1), KE::cbegin(view2),
- KE::cend(view2), comp);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MAX_ELEMENT_HPP
-#define KOKKOS_STD_ALGORITHMS_MAX_ELEMENT_HPP
-
-#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-auto max_element(const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- return Impl::min_or_max_element_impl<MaxFirstLoc>(
- "Kokkos::max_element_iterator_api_default", ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-auto max_element(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
- return Impl::min_or_max_element_impl<MaxFirstLoc>(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-auto max_element(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
- "Kokkos::max_element_iterator_api_default", ex, first, last,
- std::move(comp));
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-auto max_element(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
- label, ex, first, last, std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto max_element(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::min_or_max_element_impl<MaxFirstLoc>(
- "Kokkos::max_element_view_api_default", ex, begin(v), end(v));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto max_element(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::min_or_max_element_impl<MaxFirstLoc>(label, ex, begin(v),
- end(v));
-}
-
-template <class ExecutionSpace, class DataType, class ComparatorType,
- class... Properties>
-auto max_element(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
- "Kokkos::max_element_view_api_default", ex, begin(v), end(v),
- std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class ComparatorType,
- class... Properties>
-auto max_element(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
- label, ex, begin(v), end(v), std::move(comp));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MIN_ELEMENT_HPP
-#define KOKKOS_STD_ALGORITHMS_MIN_ELEMENT_HPP
-
-#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-auto min_element(const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- return Impl::min_or_max_element_impl<MinFirstLoc>(
- "Kokkos::min_element_iterator_api_default", ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-auto min_element(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
- return Impl::min_or_max_element_impl<MinFirstLoc>(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-auto min_element(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
- "Kokkos::min_element_iterator_api_default", ex, first, last,
- std::move(comp));
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-auto min_element(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
- label, ex, first, last, std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto min_element(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::min_or_max_element_impl<MinFirstLoc>(
- "Kokkos::min_element_view_api_default", ex, begin(v), end(v));
-}
-
-template <class ExecutionSpace, class DataType, class ComparatorType,
- class... Properties>
-auto min_element(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
- "Kokkos::min_element_view_api_default", ex, begin(v), end(v),
- std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto min_element(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::min_or_max_element_impl<MinFirstLoc>(label, ex, begin(v),
- end(v));
-}
-
-template <class ExecutionSpace, class DataType, class ComparatorType,
- class... Properties>
-auto min_element(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
- label, ex, begin(v), end(v), std::move(comp));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MINMAX_ELEMENT_HPP
-#define KOKKOS_STD_ALGORITHMS_MINMAX_ELEMENT_HPP
-
-#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-auto minmax_element(const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- return Impl::minmax_element_impl<MinMaxFirstLastLoc>(
- "Kokkos::minmax_element_iterator_api_default", ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-auto minmax_element(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
- return Impl::minmax_element_impl<MinMaxFirstLastLoc>(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-auto minmax_element(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
- "Kokkos::minmax_element_iterator_api_default", ex, first, last,
- std::move(comp));
-}
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-auto minmax_element(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- ComparatorType comp) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
- label, ex, first, last, std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto minmax_element(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::minmax_element_impl<MinMaxFirstLastLoc>(
- "Kokkos::minmax_element_view_api_default", ex, begin(v), end(v));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto minmax_element(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- return Impl::minmax_element_impl<MinMaxFirstLastLoc>(label, ex, begin(v),
- end(v));
-}
-
-template <class ExecutionSpace, class DataType, class ComparatorType,
- class... Properties>
-auto minmax_element(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
- "Kokkos::minmax_element_view_api_default", ex, begin(v), end(v),
- std::move(comp));
-}
-
-template <class ExecutionSpace, class DataType, class ComparatorType,
- class... Properties>
-auto minmax_element(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- ComparatorType comp) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
- label, ex, begin(v), end(v), std::move(comp));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MISMATCH_HPP
-#define KOKKOS_STD_ALGORITHMS_MISMATCH_HPP
-
-#include "impl/Kokkos_Mismatch.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// FIXME: add mismatch overloads accepting 3 iterators.
-// An overload consistent with other algorithms:
-//
-// auto mismatch(const ExecSpace& ex, It1 first1, It1 last1, It2 first2) {...}
-//
-// makes API ambiguous (with the overload accepting views).
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-::Kokkos::pair<IteratorType1, IteratorType2> mismatch(const ExecutionSpace& ex,
- IteratorType1 first1,
- IteratorType1 last1,
- IteratorType2 first2,
- IteratorType2 last2) {
- return Impl::mismatch_impl("Kokkos::mismatch_iterator_api_default", ex,
- first1, last1, first2, last2);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
- const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, IteratorType2 last2,
- BinaryPredicateType&& predicate) {
- return Impl::mismatch_impl("Kokkos::mismatch_iterator_api_default", ex,
- first1, last1, first2, last2,
- std::forward<BinaryPredicateType>(predicate));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
- const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
- return Impl::mismatch_impl(label, ex, first1, last1, first2, last2);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
- const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
- BinaryPredicateType&& predicate) {
- return Impl::mismatch_impl(label, ex, first1, last1, first2, last2,
- std::forward<BinaryPredicateType>(predicate));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto mismatch(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- const ::Kokkos::View<DataType2, Properties2...>& view2) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::mismatch_impl("Kokkos::mismatch_view_api_default", ex,
- KE::begin(view1), KE::end(view1), KE::begin(view2),
- KE::end(view2));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto mismatch(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- const ::Kokkos::View<DataType2, Properties2...>& view2,
- BinaryPredicateType&& predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::mismatch_impl("Kokkos::mismatch_view_api_default", ex,
- KE::begin(view1), KE::end(view1), KE::begin(view2),
- KE::end(view2),
- std::forward<BinaryPredicateType>(predicate));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto mismatch(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- const ::Kokkos::View<DataType2, Properties2...>& view2) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::mismatch_impl(label, ex, KE::begin(view1), KE::end(view1),
- KE::begin(view2), KE::end(view2));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto mismatch(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view1,
- const ::Kokkos::View<DataType2, Properties2...>& view2,
- BinaryPredicateType&& predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::mismatch_impl(label, ex, KE::begin(view1), KE::end(view1),
- KE::begin(view2), KE::end(view2),
- std::forward<BinaryPredicateType>(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MOVE_HPP
-#define KOKKOS_STD_ALGORITHMS_MOVE_HPP
-
-#include "impl/Kokkos_Move.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator move(const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first) {
- return Impl::move_impl("Kokkos::move_iterator_api_default", ex, first, last,
- d_first);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator move(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first) {
- return Impl::move_impl(label, ex, first, last, d_first);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto move(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::move_impl("Kokkos::move_view_api_default", ex, begin(source),
- end(source), begin(dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto move(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::move_impl(label, ex, begin(source), end(source), begin(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_HPP
-#define KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_HPP
-
-#include "impl/Kokkos_MoveBackward.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 move_backward(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 d_last) {
- return Impl::move_backward_impl("Kokkos::move_backward_iterator_api_default",
- ex, first, last, d_last);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto move_backward(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::move_backward_impl("Kokkos::move_backward_view_api_default", ex,
- begin(source), end(source), end(dest));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 move_backward(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 d_last) {
- return Impl::move_backward_impl(label, ex, first, last, d_last);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto move_backward(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::move_backward_impl(label, ex, begin(source), end(source),
- end(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_NONE_OF_HPP
-#define KOKKOS_STD_ALGORITHMS_NONE_OF_HPP
-
-#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-bool none_of(const ExecutionSpace& ex, IteratorType first, IteratorType last,
- Predicate predicate) {
- return Impl::none_of_impl("Kokkos::none_of_iterator_api_default", ex, first,
- last, predicate);
-}
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-bool none_of(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, Predicate predicate) {
- return Impl::none_of_impl(label, ex, first, last, predicate);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-bool none_of(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::none_of_impl("Kokkos::none_of_view_api_default", ex,
- KE::cbegin(v), KE::cend(v), std::move(predicate));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class Predicate>
-bool none_of(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- Predicate predicate) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::none_of_impl(label, ex, KE::cbegin(v), KE::cend(v),
- std::move(predicate));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_COPY_HPP
-#define KOKKOS_STD_ALGORITHMS_PARTITION_COPY_HPP
-
-#include "impl/Kokkos_PartitionCopy.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorTrueType, class OutputIteratorFalseType,
- class PredicateType>
-::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType> partition_copy(
- const ExecutionSpace& ex, InputIteratorType from_first,
- InputIteratorType from_last, OutputIteratorTrueType to_first_true,
- OutputIteratorFalseType to_first_false, PredicateType p) {
- return Impl::partition_copy_impl(
- "Kokkos::partition_copy_iterator_api_default", ex, from_first, from_last,
- to_first_true, to_first_false, std::move(p));
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorTrueType, class OutputIteratorFalseType,
- class PredicateType>
-::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType> partition_copy(
- const std::string& label, const ExecutionSpace& ex,
- InputIteratorType from_first, InputIteratorType from_last,
- OutputIteratorTrueType to_first_true,
- OutputIteratorFalseType to_first_false, PredicateType p) {
- return Impl::partition_copy_impl(label, ex, from_first, from_last,
- to_first_true, to_first_false, std::move(p));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class DataType3,
- class... Properties3, class PredicateType>
-auto partition_copy(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest_true,
- const ::Kokkos::View<DataType3, Properties3...>& view_dest_false,
- PredicateType p) {
- return Impl::partition_copy_impl("Kokkos::partition_copy_view_api_default",
- ex, cbegin(view_from), cend(view_from),
- begin(view_dest_true),
- begin(view_dest_false), std::move(p));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class DataType3,
- class... Properties3, class PredicateType>
-auto partition_copy(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest_true,
- const ::Kokkos::View<DataType3, Properties3...>& view_dest_false,
- PredicateType p) {
- return Impl::partition_copy_impl(label, ex, cbegin(view_from),
- cend(view_from), begin(view_dest_true),
- begin(view_dest_false), std::move(p));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_POINT_HPP
-#define KOKKOS_STD_ALGORITHMS_PARTITION_POINT_HPP
-
-#include "impl/Kokkos_PartitionPoint.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType, class UnaryPredicate>
-IteratorType partition_point(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, UnaryPredicate p) {
- return Impl::partition_point_impl(
- "Kokkos::partitioned_point_iterator_api_default", ex, first, last,
- std::move(p));
-}
-
-template <class ExecutionSpace, class IteratorType, class UnaryPredicate>
-IteratorType partition_point(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- UnaryPredicate p) {
- return Impl::partition_point_impl(label, ex, first, last, std::move(p));
-}
-
-template <class ExecutionSpace, class UnaryPredicate, class DataType,
- class... Properties>
-auto partition_point(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- UnaryPredicate p) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- return Impl::partition_point_impl(label, ex, begin(v), end(v), std::move(p));
-}
-
-template <class ExecutionSpace, class UnaryPredicate, class DataType,
- class... Properties>
-auto partition_point(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& v,
- UnaryPredicate p) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
- return Impl::partition_point_impl("Kokkos::partition_point_view_api_default",
- ex, begin(v), end(v), std::move(p));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REDUCE_HPP
-#define KOKKOS_STD_ALGORITHMS_REDUCE_HPP
-
-#include "impl/Kokkos_Reduce.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-//
-// overload set 1
-//
-template <class ExecutionSpace, class IteratorType>
-typename IteratorType::value_type reduce(const ExecutionSpace& ex,
- IteratorType first,
- IteratorType last) {
- return Impl::reduce_default_functors_impl(
- "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
- typename IteratorType::value_type());
-}
-
-template <class ExecutionSpace, class IteratorType>
-typename IteratorType::value_type reduce(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType first,
- IteratorType last) {
- return Impl::reduce_default_functors_impl(
- label, ex, first, last, typename IteratorType::value_type());
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto reduce(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- using view_type = ::Kokkos::View<DataType, Properties...>;
- using value_type = typename view_type::value_type;
-
- return Impl::reduce_default_functors_impl(
- "Kokkos::reduce_default_functors_view_api", ex, KE::cbegin(view),
- KE::cend(view), value_type());
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto reduce(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- using view_type = ::Kokkos::View<DataType, Properties...>;
- using value_type = typename view_type::value_type;
-
- return Impl::reduce_default_functors_impl(label, ex, KE::cbegin(view),
- KE::cend(view), value_type());
-}
-
-//
-// overload set2:
-//
-template <class ExecutionSpace, class IteratorType, class ValueType>
-ValueType reduce(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, ValueType init_reduction_value) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::reduce_default_functors_impl(
- "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
- init_reduction_value);
-}
-
-template <class ExecutionSpace, class IteratorType, class ValueType>
-ValueType reduce(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- ValueType init_reduction_value) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::reduce_default_functors_impl(label, ex, first, last,
- init_reduction_value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType>
-ValueType reduce(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ValueType init_reduction_value) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::reduce_default_functors_impl(
- "Kokkos::reduce_default_functors_view_api", ex, KE::cbegin(view),
- KE::cend(view), init_reduction_value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType>
-ValueType reduce(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ValueType init_reduction_value) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::reduce_default_functors_impl(
- label, ex, KE::cbegin(view), KE::cend(view), init_reduction_value);
-}
-
-//
-// overload set 3
-//
-template <class ExecutionSpace, class IteratorType, class ValueType,
- class BinaryOp>
-ValueType reduce(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, ValueType init_reduction_value,
- BinaryOp joiner) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::reduce_custom_functors_impl(
- "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
- init_reduction_value, joiner);
-}
-
-template <class ExecutionSpace, class IteratorType, class ValueType,
- class BinaryOp>
-ValueType reduce(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- ValueType init_reduction_value, BinaryOp joiner) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::reduce_custom_functors_impl(label, ex, first, last,
- init_reduction_value, joiner);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType, class BinaryOp>
-ValueType reduce(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ValueType init_reduction_value, BinaryOp joiner) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::reduce_custom_functors_impl(
- "Kokkos::reduce_custom_functors_view_api", ex, KE::cbegin(view),
- KE::cend(view), init_reduction_value, joiner);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType, class BinaryOp>
-ValueType reduce(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ValueType init_reduction_value, BinaryOp joiner) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::reduce_custom_functors_impl(label, ex, KE::cbegin(view),
- KE::cend(view), init_reduction_value,
- joiner);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_HPP
-#define KOKKOS_STD_ALGORITHMS_REMOVE_HPP
-
-#include "impl/Kokkos_RemoveAllVariants.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class Iterator, class ValueType>
-Iterator remove(const ExecutionSpace& ex, Iterator first, Iterator last,
- const ValueType& value) {
- return Impl::remove_impl("Kokkos::remove_iterator_api_default", ex, first,
- last, value);
-}
-
-template <class ExecutionSpace, class Iterator, class ValueType>
-Iterator remove(const std::string& label, const ExecutionSpace& ex,
- Iterator first, Iterator last, const ValueType& value) {
- return Impl::remove_impl(label, ex, first, last, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType>
-auto remove(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- const ValueType& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::remove_impl("Kokkos::remove_iterator_api_default", ex,
- ::Kokkos::Experimental::begin(view),
- ::Kokkos::Experimental::end(view), value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType>
-auto remove(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- const ValueType& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::remove_impl(label, ex, ::Kokkos::Experimental::begin(view),
- ::Kokkos::Experimental::end(view), value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_COPY_HPP
-#define KOKKOS_STD_ALGORITHMS_REMOVE_COPY_HPP
-
-#include "impl/Kokkos_RemoveAllVariants.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class ValueType>
-OutputIterator remove_copy(const ExecutionSpace& ex, InputIterator first_from,
- InputIterator last_from, OutputIterator first_dest,
- const ValueType& value) {
- return Impl::remove_copy_impl("Kokkos::remove_copy_iterator_api_default", ex,
- first_from, last_from, first_dest, value);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class ValueType>
-OutputIterator remove_copy(const std::string& label, const ExecutionSpace& ex,
- InputIterator first_from, InputIterator last_from,
- OutputIterator first_dest, const ValueType& value) {
- return Impl::remove_copy_impl(label, ex, first_from, last_from, first_dest,
- value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-auto remove_copy(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- const ValueType& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
-
- return Impl::remove_copy_impl("Kokkos::remove_copy_iterator_api_default", ex,
- ::Kokkos::Experimental::cbegin(view_from),
- ::Kokkos::Experimental::cend(view_from),
- ::Kokkos::Experimental::begin(view_dest),
- value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-auto remove_copy(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- const ValueType& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
-
- return Impl::remove_copy_impl(
- label, ex, ::Kokkos::Experimental::cbegin(view_from),
- ::Kokkos::Experimental::cend(view_from),
- ::Kokkos::Experimental::begin(view_dest), value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_COPY_IF_HPP
-#define KOKKOS_STD_ALGORITHMS_REMOVE_COPY_IF_HPP
-
-#include "impl/Kokkos_RemoveAllVariants.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class UnaryPredicate>
-OutputIterator remove_copy_if(const ExecutionSpace& ex,
- InputIterator first_from, InputIterator last_from,
- OutputIterator first_dest,
- const UnaryPredicate& pred) {
- return Impl::remove_copy_if_impl(
- "Kokkos::remove_copy_if_iterator_api_default", ex, first_from, last_from,
- first_dest, pred);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class UnaryPredicate>
-OutputIterator remove_copy_if(const std::string& label,
- const ExecutionSpace& ex,
- InputIterator first_from, InputIterator last_from,
- OutputIterator first_dest,
- const UnaryPredicate& pred) {
- return Impl::remove_copy_if_impl(label, ex, first_from, last_from, first_dest,
- pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class UnaryPredicate>
-auto remove_copy_if(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- const UnaryPredicate& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
-
- return Impl::remove_copy_if_impl(
- "Kokkos::remove_copy_if_iterator_api_default", ex,
- ::Kokkos::Experimental::cbegin(view_from),
- ::Kokkos::Experimental::cend(view_from),
- ::Kokkos::Experimental::begin(view_dest), pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class UnaryPredicate>
-auto remove_copy_if(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- const UnaryPredicate& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
-
- return Impl::remove_copy_if_impl(
- label, ex, ::Kokkos::Experimental::cbegin(view_from),
- ::Kokkos::Experimental::cend(view_from),
- ::Kokkos::Experimental::begin(view_dest), pred);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_IF_HPP
-#define KOKKOS_STD_ALGORITHMS_REMOVE_IF_HPP
-
-#include "impl/Kokkos_RemoveAllVariants.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class Iterator, class UnaryPredicate>
-Iterator remove_if(const ExecutionSpace& ex, Iterator first, Iterator last,
- UnaryPredicate pred) {
- return Impl::remove_if_impl("Kokkos::remove_if_iterator_api_default", ex,
- first, last, pred);
-}
-
-template <class ExecutionSpace, class Iterator, class UnaryPredicate>
-Iterator remove_if(const std::string& label, const ExecutionSpace& ex,
- Iterator first, Iterator last, UnaryPredicate pred) {
- return Impl::remove_if_impl(label, ex, first, last, pred);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class UnaryPredicate>
-auto remove_if(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- UnaryPredicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::remove_if_impl("Kokkos::remove_if_iterator_api_default", ex,
- ::Kokkos::Experimental::begin(view),
- ::Kokkos::Experimental::end(view), pred);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class UnaryPredicate>
-auto remove_if(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- UnaryPredicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::remove_if_impl(label, ex, ::Kokkos::Experimental::begin(view),
- ::Kokkos::Experimental::end(view), pred);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_HPP
-
-#include "impl/Kokkos_Replace.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class Iterator, class ValueType>
-void replace(const ExecutionSpace& ex, Iterator first, Iterator last,
- const ValueType& old_value, const ValueType& new_value) {
- return Impl::replace_impl("Kokkos::replace_iterator_api", ex, first, last,
- old_value, new_value);
-}
-
-template <class ExecutionSpace, class Iterator, class ValueType>
-void replace(const std::string& label, const ExecutionSpace& ex, Iterator first,
- Iterator last, const ValueType& old_value,
- const ValueType& new_value) {
- return Impl::replace_impl(label, ex, first, last, old_value, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class ValueType>
-void replace(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ValueType& old_value, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_impl("Kokkos::replace_view_api", ex, KE::begin(view),
- KE::end(view), old_value, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class ValueType>
-void replace(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ValueType& old_value, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_impl(label, ex, KE::begin(view), KE::end(view),
- old_value, new_value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_HPP
-
-#include "impl/Kokkos_ReplaceCopy.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class ValueType>
-OutputIterator replace_copy(const ExecutionSpace& ex, InputIterator first_from,
- InputIterator last_from, OutputIterator first_dest,
- const ValueType& old_value,
- const ValueType& new_value) {
- return Impl::replace_copy_impl("Kokkos::replace_copy_iterator_api", ex,
- first_from, last_from, first_dest, old_value,
- new_value);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class ValueType>
-OutputIterator replace_copy(const std::string& label, const ExecutionSpace& ex,
- InputIterator first_from, InputIterator last_from,
- OutputIterator first_dest,
- const ValueType& old_value,
- const ValueType& new_value) {
- return Impl::replace_copy_impl(label, ex, first_from, last_from, first_dest,
- old_value, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-auto replace_copy(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- const ValueType& old_value, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_copy_impl("Kokkos::replace_copy_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), old_value, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-auto replace_copy(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- const ValueType& old_value, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_copy_impl(label, ex, KE::cbegin(view_from),
- KE::cend(view_from), KE::begin(view_dest),
- old_value, new_value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_HPP
-
-#include "impl/Kokkos_ReplaceCopyIf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class PredicateType, class ValueType>
-OutputIterator replace_copy_if(const ExecutionSpace& ex,
- InputIterator first_from,
- InputIterator last_from,
- OutputIterator first_dest, PredicateType pred,
- const ValueType& new_value) {
- return Impl::replace_copy_if_impl("Kokkos::replace_copy_if_iterator_api", ex,
- first_from, last_from, first_dest, pred,
- new_value);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class PredicateType, class ValueType>
-OutputIterator replace_copy_if(const std::string& label,
- const ExecutionSpace& ex,
- InputIterator first_from,
- InputIterator last_from,
- OutputIterator first_dest, PredicateType pred,
- const ValueType& new_value) {
- return Impl::replace_copy_if_impl(label, ex, first_from, last_from,
- first_dest, pred, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class PredicateType,
- class ValueType>
-auto replace_copy_if(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- PredicateType pred, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_copy_if_impl("Kokkos::replace_copy_if_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), pred, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class PredicateType,
- class ValueType>
-auto replace_copy_if(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- PredicateType pred, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_copy_if_impl(label, ex, KE::cbegin(view_from),
- KE::cend(view_from), KE::begin(view_dest),
- pred, new_value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IF_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_IF_HPP
-
-#include "impl/Kokkos_ReplaceIf.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class Predicate,
- class ValueType>
-void replace_if(const ExecutionSpace& ex, InputIterator first,
- InputIterator last, Predicate pred,
- const ValueType& new_value) {
- return Impl::replace_if_impl("Kokkos::replace_if_iterator_api", ex, first,
- last, pred, new_value);
-}
-
-template <class ExecutionSpace, class InputIterator, class Predicate,
- class ValueType>
-void replace_if(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last, Predicate pred,
- const ValueType& new_value) {
- return Impl::replace_if_impl(label, ex, first, last, pred, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class Predicate, class ValueType>
-void replace_if(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- Predicate pred, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_if_impl("Kokkos::replace_if_view_api", ex,
- KE::begin(view), KE::end(view), pred, new_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class Predicate, class ValueType>
-void replace_if(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- Predicate pred, const ValueType& new_value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- namespace KE = ::Kokkos::Experimental;
- return Impl::replace_if_impl(label, ex, KE::begin(view), KE::end(view), pred,
- new_value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_HPP
-#define KOKKOS_STD_ALGORITHMS_REVERSE_HPP
-
-#include "impl/Kokkos_Reverse.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator>
-void reverse(const ExecutionSpace& ex, InputIterator first,
- InputIterator last) {
- return Impl::reverse_impl("Kokkos::reverse_iterator_api_default", ex, first,
- last);
-}
-
-template <class ExecutionSpace, class InputIterator>
-void reverse(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last) {
- return Impl::reverse_impl(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-void reverse(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- namespace KE = ::Kokkos::Experimental;
- return Impl::reverse_impl("Kokkos::reverse_view_api_default", ex,
- KE::begin(view), KE::end(view));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-void reverse(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- namespace KE = ::Kokkos::Experimental;
- return Impl::reverse_impl(label, ex, KE::begin(view), KE::end(view));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_COPY_HPP
-#define KOKKOS_STD_ALGORITHMS_REVERSE_COPY_HPP
-
-#include "impl/Kokkos_ReverseCopy.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator reverse_copy(const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first) {
- return Impl::reverse_copy_impl("Kokkos::reverse_copy_iterator_api_default",
- ex, first, last, d_first);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator reverse_copy(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first) {
- return Impl::reverse_copy_impl(label, ex, first, last, d_first);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto reverse_copy(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::reverse_copy_impl("Kokkos::reverse_copy_view_api_default", ex,
- cbegin(source), cend(source), begin(dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto reverse_copy(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::reverse_copy_impl(label, ex, cbegin(source), cend(source),
- begin(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_HPP
-#define KOKKOS_STD_ALGORITHMS_ROTATE_HPP
-
-#include "impl/Kokkos_Rotate.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType rotate(const ExecutionSpace& ex, IteratorType first,
- IteratorType n_first, IteratorType last) {
- return Impl::rotate_impl("Kokkos::rotate_iterator_api_default", ex, first,
- n_first, last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType rotate(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType n_first,
- IteratorType last) {
- return Impl::rotate_impl(label, ex, first, n_first, last);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto rotate(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- std::size_t n_location) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::rotate_impl("Kokkos::rotate_view_api_default", ex, begin(view),
- begin(view) + n_location, end(view));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto rotate(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- std::size_t n_location) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::rotate_impl(label, ex, begin(view), begin(view) + n_location,
- end(view));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_COPY_HPP
-#define KOKKOS_STD_ALGORITHMS_ROTATE_COPY_HPP
-
-#include "impl/Kokkos_RotateCopy.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator rotate_copy(const ExecutionSpace& ex, InputIterator first,
- InputIterator n_first, InputIterator last,
- OutputIterator d_first) {
- return Impl::rotate_copy_impl("Kokkos::rotate_copy_iterator_api_default", ex,
- first, n_first, last, d_first);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator rotate_copy(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator n_first,
- InputIterator last, OutputIterator d_first) {
- return Impl::rotate_copy_impl(label, ex, first, n_first, last, d_first);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto rotate_copy(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- std::size_t n_location,
- const ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::rotate_copy_impl("Kokkos::rotate_copy_view_api_default", ex,
- cbegin(source), cbegin(source) + n_location,
- cend(source), begin(dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto rotate_copy(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- std::size_t n_location,
- const ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::rotate_copy_impl(label, ex, cbegin(source),
- cbegin(source) + n_location, cend(source),
- begin(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_HPP
-#define KOKKOS_STD_ALGORITHMS_SEARCH_HPP
-
-#include "impl/Kokkos_Search.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set 1: no binary predicate passed
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 search(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last) {
- return Impl::search_impl("Kokkos::search_iterator_api_default", ex, first,
- last, s_first, s_last);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 search(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last) {
- return Impl::search_impl(label, ex, first, last, s_first, s_last);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto search(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_impl("Kokkos::search_view_api_default", ex,
- KE::begin(view), KE::end(view), KE::begin(s_view),
- KE::end(s_view));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto search(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_impl(label, ex, KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view));
-}
-
-// overload set 2: binary predicate passed
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-IteratorType1 search(const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last, const BinaryPredicateType& pred) {
- return Impl::search_impl("Kokkos::search_iterator_api_default", ex, first,
- last, s_first, s_last, pred);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-IteratorType1 search(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last,
- const BinaryPredicateType& pred) {
- return Impl::search_impl(label, ex, first, last, s_first, s_last, pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto search(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_impl("Kokkos::search_view_api_default", ex,
- KE::begin(view), KE::end(view), KE::begin(s_view),
- KE::end(s_view), pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicateType>
-auto search(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view,
- const ::Kokkos::View<DataType2, Properties2...>& s_view,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_impl(label, ex, KE::begin(view), KE::end(view),
- KE::begin(s_view), KE::end(s_view), pred);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_N_HPP
-#define KOKKOS_STD_ALGORITHMS_SEARCH_N_HPP
-
-#include "impl/Kokkos_SearchN.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set 1: no binary predicate passed
-template <class ExecutionSpace, class IteratorType, class SizeType,
- class ValueType>
-IteratorType search_n(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, SizeType count,
- const ValueType& value) {
- return Impl::search_n_impl("Kokkos::search_n_iterator_api_default", ex, first,
- last, count, value);
-}
-
-template <class ExecutionSpace, class IteratorType, class SizeType,
- class ValueType>
-IteratorType search_n(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, SizeType count,
- const ValueType& value) {
- return Impl::search_n_impl(label, ex, first, last, count, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class ValueType>
-auto search_n(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- SizeType count, const ValueType& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_n_impl("Kokkos::search_n_view_api_default", ex,
- KE::begin(view), KE::end(view), count, value);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class ValueType>
-auto search_n(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- SizeType count, const ValueType& value) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_n_impl(label, ex, KE::begin(view), KE::end(view), count,
- value);
-}
-
-// overload set 2: binary predicate passed
-template <class ExecutionSpace, class IteratorType, class SizeType,
- class ValueType, class BinaryPredicateType>
-IteratorType search_n(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, SizeType count, const ValueType& value,
- const BinaryPredicateType& pred) {
- return Impl::search_n_impl("Kokkos::search_n_iterator_api_default", ex, first,
- last, count, value, pred);
-}
-
-template <class ExecutionSpace, class IteratorType, class SizeType,
- class ValueType, class BinaryPredicateType>
-IteratorType search_n(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, SizeType count,
- const ValueType& value, const BinaryPredicateType& pred) {
- return Impl::search_n_impl(label, ex, first, last, count, value, pred);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class ValueType, class BinaryPredicateType>
-auto search_n(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- SizeType count, const ValueType& value,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_n_impl("Kokkos::search_n_view_api_default", ex,
- KE::begin(view), KE::end(view), count, value,
- pred);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class SizeType, class ValueType, class BinaryPredicateType>
-auto search_n(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- SizeType count, const ValueType& value,
- const BinaryPredicateType& pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- namespace KE = ::Kokkos::Experimental;
- return Impl::search_n_impl(label, ex, KE::begin(view), KE::end(view), count,
- value, pred);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_HPP
-#define KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_HPP
-
-#include "impl/Kokkos_ShiftLeft.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType shift_left(const ExecutionSpace& ex, IteratorType first,
- IteratorType last,
- typename IteratorType::difference_type n) {
- return Impl::shift_left_impl("Kokkos::shift_left_iterator_api_default", ex,
- first, last, n);
-}
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType shift_left(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- typename IteratorType::difference_type n) {
- return Impl::shift_left_impl(label, ex, first, last, n);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto shift_left(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- typename decltype(begin(view))::difference_type n) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::shift_left_impl("Kokkos::shift_left_view_api_default", ex,
- begin(view), end(view), n);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto shift_left(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- typename decltype(begin(view))::difference_type n) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::shift_left_impl(label, ex, begin(view), end(view), n);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_HPP
-#define KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_HPP
-
-#include "impl/Kokkos_ShiftRight.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType shift_right(const ExecutionSpace& ex, IteratorType first,
- IteratorType last,
- typename IteratorType::difference_type n) {
- return Impl::shift_right_impl("Kokkos::shift_right_iterator_api_default", ex,
- first, last, n);
-}
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType shift_right(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- typename IteratorType::difference_type n) {
- return Impl::shift_right_impl(label, ex, first, last, n);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto shift_right(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- typename decltype(begin(view))::difference_type n) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::shift_right_impl("Kokkos::shift_right_view_api_default", ex,
- begin(view), end(view), n);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto shift_right(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- typename decltype(begin(view))::difference_type n) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::shift_right_impl(label, ex, begin(view), end(view), n);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_SWAP_HPP
-#define KOKKOS_STD_ALGORITHMS_SWAP_HPP
-
-#include <Kokkos_Core.hpp>
-
-namespace Kokkos {
-namespace Experimental {
-
-// swap
-template <class T>
-KOKKOS_INLINE_FUNCTION void swap(T& a, T& b) noexcept {
- static_assert(
- std::is_move_assignable<T>::value && std::is_move_constructible<T>::value,
- "Kokkos::Experimental::swap arguments must be move assignable "
- "and move constructible");
-
- T tmp = std::move(a);
- a = std::move(b);
- b = std::move(tmp);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_SWAP_RANGES_HPP
-#define KOKKOS_STD_ALGORITHMS_SWAP_RANGES_HPP
-
-#include "impl/Kokkos_SwapRanges.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 swap_ranges(const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2) {
- return Impl::swap_ranges_impl("Kokkos::swap_ranges_iterator_api_default", ex,
- first1, last1, first2);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto swap_ranges(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- assert(source.extent(0) == dest.extent(0));
- return Impl::swap_ranges_impl("Kokkos::swap_ranges_view_api_default", ex,
- begin(source), end(source), begin(dest));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 swap_ranges(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2) {
- return Impl::swap_ranges_impl(label, ex, first1, last1, first2);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto swap_ranges(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- assert(source.extent(0) == dest.extent(0));
- return Impl::swap_ranges_impl(label, ex, begin(source), end(source),
- begin(dest));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_HPP
-#define KOKKOS_STD_ALGORITHMS_TRANSFORM_HPP
-
-#include "impl/Kokkos_Transform.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class UnaryOperation>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- InputIterator, OutputIterator>::value,
- OutputIterator>
-transform(const ExecutionSpace& ex, InputIterator first1, InputIterator last1,
- OutputIterator d_first, UnaryOperation unary_op) {
- return Impl::transform_impl("Kokkos::transform_iterator_api_default", ex,
- first1, last1, d_first, std::move(unary_op));
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class UnaryOperation>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- InputIterator, OutputIterator>::value,
- OutputIterator>
-transform(const std::string& label, const ExecutionSpace& ex,
- InputIterator first1, InputIterator last1, OutputIterator d_first,
- UnaryOperation unary_op) {
- return Impl::transform_impl(label, ex, first1, last1, d_first,
- std::move(unary_op));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class UnaryOperation>
-auto transform(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest,
- UnaryOperation unary_op) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::transform_impl("Kokkos::transform_view_api_default", ex,
- begin(source), end(source), begin(dest),
- std::move(unary_op));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class UnaryOperation>
-auto transform(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- ::Kokkos::View<DataType2, Properties2...>& dest,
- UnaryOperation unary_op) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::transform_impl(label, ex, begin(source), end(source),
- begin(dest), std::move(unary_op));
-}
-
-template <class ExecutionSpace, class InputIterator1, class InputIterator2,
- class OutputIterator, class BinaryOperation>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- InputIterator1, InputIterator2, OutputIterator>::value,
- OutputIterator>
-transform(const ExecutionSpace& ex, InputIterator1 first1, InputIterator1 last1,
- InputIterator2 first2, OutputIterator d_first,
- BinaryOperation binary_op) {
- return Impl::transform_impl("Kokkos::transform_iterator_api_default", ex,
- first1, last1, first2, d_first,
- std::move(binary_op));
-}
-
-template <class ExecutionSpace, class InputIterator1, class InputIterator2,
- class OutputIterator, class BinaryOperation>
-std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
- InputIterator1, InputIterator2, OutputIterator>::value,
- OutputIterator>
-transform(const std::string& label, const ExecutionSpace& ex,
- InputIterator1 first1, InputIterator1 last1, InputIterator2 first2,
- OutputIterator d_first, BinaryOperation binary_op) {
- return Impl::transform_impl(label, ex, first1, last1, first2, d_first,
- std::move(binary_op));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class DataType3,
- class... Properties3, class BinaryOperation>
-auto transform(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source1,
- const ::Kokkos::View<DataType2, Properties2...>& source2,
- ::Kokkos::View<DataType3, Properties3...>& dest,
- BinaryOperation binary_op) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source2);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::transform_impl("Kokkos::transform_view_api_default", ex,
- begin(source1), end(source1), begin(source2),
- begin(dest), std::move(binary_op));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class DataType3,
- class... Properties3, class BinaryOperation>
-auto transform(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source1,
- const ::Kokkos::View<DataType2, Properties2...>& source2,
- ::Kokkos::View<DataType3, Properties3...>& dest,
- BinaryOperation binary_op) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source1);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source2);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::transform_impl(label, ex, begin(source1), end(source1),
- begin(source2), begin(dest),
- std::move(binary_op));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_TRASFORM_EXCLUSIVE_SCAN_HPP
-#define KOKKOS_STD_ALGORITHMS_TRASFORM_EXCLUSIVE_SCAN_HPP
-
-#include "impl/Kokkos_TransformExclusiveScan.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType, class BinaryOpType,
- class UnaryOpType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-transform_exclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest,
- ValueType init_value, BinaryOpType binary_op,
- UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- return Impl::transform_exclusive_scan_impl(
- "Kokkos::transform_exclusive_scan_custom_functors_iterator_api", ex,
- first, last, first_dest, init_value, binary_op, unary_op);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType, class BinaryOpType,
- class UnaryOpType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-transform_exclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest, ValueType init_value,
- BinaryOpType binary_op, UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- return Impl::transform_exclusive_scan_impl(label, ex, first, last, first_dest,
- init_value, binary_op, unary_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType,
- class BinaryOpType, class UnaryOpType>
-auto transform_exclusive_scan(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- ValueType init_value, BinaryOpType binary_op, UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- namespace KE = ::Kokkos::Experimental;
- return Impl::transform_exclusive_scan_impl(
- "Kokkos::transform_exclusive_scan_custom_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
- init_value, binary_op, unary_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType,
- class BinaryOpType, class UnaryOpType>
-auto transform_exclusive_scan(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- ValueType init_value, BinaryOpType binary_op, UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
- namespace KE = ::Kokkos::Experimental;
- return Impl::transform_exclusive_scan_impl(
- label, ex, KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), init_value, binary_op, unary_op);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_HPP
-#define KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_HPP
-
-#include "impl/Kokkos_TransformInclusiveScan.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set 1 (no init value)
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOpType, class UnaryOpType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-transform_inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest,
- BinaryOpType binary_op, UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::transform_inclusive_scan_impl(
- "Kokkos::transform_inclusive_scan_custom_functors_iterator_api", ex,
- first, last, first_dest, binary_op, unary_op);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOpType, class UnaryOpType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-transform_inclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest, BinaryOpType binary_op,
- UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
-
- return Impl::transform_inclusive_scan_impl(label, ex, first, last, first_dest,
- binary_op, unary_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOpType,
- class UnaryOpType>
-auto transform_inclusive_scan(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOpType binary_op, UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::transform_inclusive_scan_impl(
- "Kokkos::transform_inclusive_scan_custom_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
- binary_op, unary_op);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOpType,
- class UnaryOpType>
-auto transform_inclusive_scan(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOpType binary_op, UnaryOpType unary_op) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::transform_inclusive_scan_impl(
- label, ex, KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), binary_op, unary_op);
-}
-
-// overload set 2 (init value)
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOpType, class UnaryOpType,
- class ValueType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-transform_inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
- InputIteratorType last, OutputIteratorType first_dest,
- BinaryOpType binary_op, UnaryOpType unary_op,
- ValueType init_value) {
- Impl::static_assert_is_not_openmptarget(ex);
- return Impl::transform_inclusive_scan_impl(
- "Kokkos::transform_inclusive_scan_custom_functors_iterator_api", ex,
- first, last, first_dest, binary_op, unary_op, init_value);
-}
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOpType, class UnaryOpType,
- class ValueType>
-std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
- InputIteratorType, OutputIteratorType>::value,
- OutputIteratorType>
-transform_inclusive_scan(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first, InputIteratorType last,
- OutputIteratorType first_dest, BinaryOpType binary_op,
- UnaryOpType unary_op, ValueType init_value) {
- Impl::static_assert_is_not_openmptarget(ex);
- return Impl::transform_inclusive_scan_impl(label, ex, first, last, first_dest,
- binary_op, unary_op, init_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOpType,
- class UnaryOpType, class ValueType>
-auto transform_inclusive_scan(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::transform_inclusive_scan_impl(
- "Kokkos::transform_inclusive_scan_custom_functors_view_api", ex,
- KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
- binary_op, unary_op, init_value);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryOpType,
- class UnaryOpType, class ValueType>
-auto transform_inclusive_scan(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& view_from,
- const ::Kokkos::View<DataType2, Properties2...>& view_dest,
- BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
- Impl::static_assert_is_not_openmptarget(ex);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
- namespace KE = ::Kokkos::Experimental;
- return Impl::transform_inclusive_scan_impl(
- label, ex, KE::cbegin(view_from), KE::cend(view_from),
- KE::begin(view_dest), binary_op, unary_op, init_value);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_HPP
-#define KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_HPP
-
-#include "impl/Kokkos_TransformReduce.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// ----------------------------
-// overload set1:
-// no custom functors passed, so equivalent to
-// transform_reduce(first1, last1, first2, init, plus<>(), multiplies<>());
-// ----------------------------
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class ValueType>
-ValueType transform_reduce(const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2,
- ValueType init_reduction_value) {
- return Impl::transform_reduce_default_functors_impl(
- "Kokkos::transform_reduce_default_functors_iterator_api", ex, first1,
- last1, first2, std::move(init_reduction_value));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class ValueType>
-ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2,
- ValueType init_reduction_value) {
- return Impl::transform_reduce_default_functors_impl(
- label, ex, first1, last1, first2, std::move(init_reduction_value));
-}
-
-// overload1 accepting views
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-ValueType transform_reduce(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& first_view,
- const ::Kokkos::View<DataType2, Properties2...>& second_view,
- ValueType init_reduction_value) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
-
- return Impl::transform_reduce_default_functors_impl(
- "Kokkos::transform_reduce_default_functors_iterator_api", ex,
- KE::cbegin(first_view), KE::cend(first_view), KE::cbegin(second_view),
- std::move(init_reduction_value));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType>
-ValueType transform_reduce(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& first_view,
- const ::Kokkos::View<DataType2, Properties2...>& second_view,
- ValueType init_reduction_value) {
- namespace KE = ::Kokkos::Experimental;
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
-
- return Impl::transform_reduce_default_functors_impl(
- label, ex, KE::cbegin(first_view), KE::cend(first_view),
- KE::cbegin(second_view), std::move(init_reduction_value));
-}
-
-//
-// overload set2:
-// accepts a custom transform and joiner functor
-//
-
-// Note the std refers to the arg BinaryReductionOp
-// but in the Kokkos naming convention, it corresponds
-// to a "joiner" that knows how to join two values
-// NOTE: "joiner/transformer" need to be commutative.
-
-// https://en.cppreference.com/w/cpp/algorithm/transform_reduce
-
-// api accepting iterators
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class ValueType, class BinaryJoinerType, class BinaryTransform>
-ValueType transform_reduce(const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2,
- ValueType init_reduction_value,
- BinaryJoinerType joiner,
- BinaryTransform transformer) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::transform_reduce_custom_functors_impl(
- "Kokkos::transform_reduce_custom_functors_iterator_api", ex, first1,
- last1, first2, std::move(init_reduction_value), std::move(joiner),
- std::move(transformer));
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class ValueType, class BinaryJoinerType, class BinaryTransform>
-ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, ValueType init_reduction_value,
- BinaryJoinerType joiner,
- BinaryTransform transformer) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::transform_reduce_custom_functors_impl(
- label, ex, first1, last1, first2, std::move(init_reduction_value),
- std::move(joiner), std::move(transformer));
-}
-
-// accepting views
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType,
- class BinaryJoinerType, class BinaryTransform>
-ValueType transform_reduce(
- const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& first_view,
- const ::Kokkos::View<DataType2, Properties2...>& second_view,
- ValueType init_reduction_value, BinaryJoinerType joiner,
- BinaryTransform transformer) {
- namespace KE = ::Kokkos::Experimental;
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
-
- return Impl::transform_reduce_custom_functors_impl(
- "Kokkos::transform_reduce_custom_functors_view_api", ex,
- KE::cbegin(first_view), KE::cend(first_view), KE::cbegin(second_view),
- std::move(init_reduction_value), std::move(joiner),
- std::move(transformer));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class ValueType,
- class BinaryJoinerType, class BinaryTransform>
-ValueType transform_reduce(
- const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& first_view,
- const ::Kokkos::View<DataType2, Properties2...>& second_view,
- ValueType init_reduction_value, BinaryJoinerType joiner,
- BinaryTransform transformer) {
- namespace KE = ::Kokkos::Experimental;
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
-
- return Impl::transform_reduce_custom_functors_impl(
- label, ex, KE::cbegin(first_view), KE::cend(first_view),
- KE::cbegin(second_view), std::move(init_reduction_value),
- std::move(joiner), std::move(transformer));
-}
-
-//
-// overload set3:
-//
-// accepting iterators
-template <class ExecutionSpace, class IteratorType, class ValueType,
- class BinaryJoinerType, class UnaryTransform>
-// need this to avoid ambiguous call
-std::enable_if_t<
- ::Kokkos::Experimental::Impl::are_iterators<IteratorType>::value, ValueType>
-transform_reduce(const ExecutionSpace& ex, IteratorType first1,
- IteratorType last1, ValueType init_reduction_value,
- BinaryJoinerType joiner, UnaryTransform transformer) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::transform_reduce_custom_functors_impl(
- "Kokkos::transform_reduce_custom_functors_iterator_api", ex, first1,
- last1, std::move(init_reduction_value), std::move(joiner),
- std::move(transformer));
-}
-
-template <class ExecutionSpace, class IteratorType, class ValueType,
- class BinaryJoinerType, class UnaryTransform>
-// need this to avoid ambiguous call
-std::enable_if_t<
- ::Kokkos::Experimental::Impl::are_iterators<IteratorType>::value, ValueType>
-transform_reduce(const std::string& label, const ExecutionSpace& ex,
- IteratorType first1, IteratorType last1,
- ValueType init_reduction_value, BinaryJoinerType joiner,
- UnaryTransform transformer) {
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- return Impl::transform_reduce_custom_functors_impl(
- label, ex, first1, last1, std::move(init_reduction_value),
- std::move(joiner), std::move(transformer));
-}
-
-// accepting views
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType, class BinaryJoinerType, class UnaryTransform>
-ValueType transform_reduce(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ValueType init_reduction_value,
- BinaryJoinerType joiner,
- UnaryTransform transformer) {
- namespace KE = ::Kokkos::Experimental;
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::transform_reduce_custom_functors_impl(
- "Kokkos::transform_reduce_custom_functors_view_api", ex, KE::cbegin(view),
- KE::cend(view), std::move(init_reduction_value), std::move(joiner),
- std::move(transformer));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class ValueType, class BinaryJoinerType, class UnaryTransform>
-ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- ValueType init_reduction_value,
- BinaryJoinerType joiner,
- UnaryTransform transformer) {
- namespace KE = ::Kokkos::Experimental;
- static_assert(std::is_move_constructible<ValueType>::value,
- "ValueType must be move constructible.");
-
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
-
- return Impl::transform_reduce_custom_functors_impl(
- label, ex, KE::cbegin(view), KE::cend(view),
- std::move(init_reduction_value), std::move(joiner),
- std::move(transformer));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_HPP
-#define KOKKOS_STD_ALGORITHMS_UNIQUE_HPP
-
-#include "impl/Kokkos_Unique.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// note: the enable_if below is to avoid "call to ... is ambiguous"
-// for example in the unit test when using a variadic function
-
-// overload set1
-template <class ExecutionSpace, class IteratorType>
-std::enable_if_t<!::Kokkos::is_view<IteratorType>::value, IteratorType> unique(
- const ExecutionSpace& ex, IteratorType first, IteratorType last) {
- return Impl::unique_impl("Kokkos::unique_iterator_api_default", ex, first,
- last);
-}
-
-template <class ExecutionSpace, class IteratorType>
-std::enable_if_t<!::Kokkos::is_view<IteratorType>::value, IteratorType> unique(
- const std::string& label, const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- return Impl::unique_impl(label, ex, first, last);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto unique(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return ::Kokkos::Experimental::unique("Kokkos::unique_view_api_default", ex,
- begin(view), end(view));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties>
-auto unique(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return ::Kokkos::Experimental::unique(label, ex, begin(view), end(view));
-}
-
-// overload set2
-template <class ExecutionSpace, class IteratorType, class BinaryPredicate>
-IteratorType unique(const ExecutionSpace& ex, IteratorType first,
- IteratorType last, BinaryPredicate pred) {
- return Impl::unique_impl("Kokkos::unique_iterator_api_default", ex, first,
- last, pred);
-}
-
-template <class ExecutionSpace, class IteratorType, class BinaryPredicate>
-IteratorType unique(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- BinaryPredicate pred) {
- return Impl::unique_impl(label, ex, first, last, pred);
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class BinaryPredicate>
-auto unique(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- BinaryPredicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::unique_impl("Kokkos::unique_view_api_default", ex, begin(view),
- end(view), std::move(pred));
-}
-
-template <class ExecutionSpace, class DataType, class... Properties,
- class BinaryPredicate>
-auto unique(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType, Properties...>& view,
- BinaryPredicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
- return Impl::unique_impl(label, ex, begin(view), end(view), std::move(pred));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_HPP
-#define KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_HPP
-
-#include "impl/Kokkos_UniqueCopy.hpp"
-#include "Kokkos_BeginEnd.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-
-// overload set1
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-std::enable_if_t<!::Kokkos::is_view<InputIterator>::value, OutputIterator>
-unique_copy(const ExecutionSpace& ex, InputIterator first, InputIterator last,
- OutputIterator d_first) {
- return Impl::unique_copy_impl("Kokkos::unique_copy_iterator_api_default", ex,
- first, last, d_first);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-std::enable_if_t<!::Kokkos::is_view<InputIterator>::value, OutputIterator>
-unique_copy(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last, OutputIterator d_first) {
- return Impl::unique_copy_impl(label, ex, first, last, d_first);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto unique_copy(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- const ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return ::Kokkos::Experimental::unique_copy(
- "Kokkos::unique_copy_view_api_default", ex, cbegin(source), cend(source),
- begin(dest));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2>
-auto unique_copy(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- const ::Kokkos::View<DataType2, Properties2...>& dest) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return ::Kokkos::Experimental::unique_copy(label, ex, cbegin(source),
- cend(source), begin(dest));
-}
-
-// overload set2
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class BinaryPredicate>
-OutputIterator unique_copy(const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first,
- BinaryPredicate pred) {
- return Impl::unique_copy_impl("Kokkos::unique_copy_iterator_api_default", ex,
- first, last, d_first, pred);
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class BinaryPredicate>
-OutputIterator unique_copy(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first, BinaryPredicate pred) {
- return Impl::unique_copy_impl(label, ex, first, last, d_first, pred);
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicate>
-auto unique_copy(const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- const ::Kokkos::View<DataType2, Properties2...>& dest,
- BinaryPredicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::unique_copy_impl("Kokkos::unique_copy_view_api_default", ex,
- cbegin(source), cend(source), begin(dest),
- std::move(pred));
-}
-
-template <class ExecutionSpace, class DataType1, class... Properties1,
- class DataType2, class... Properties2, class BinaryPredicate>
-auto unique_copy(const std::string& label, const ExecutionSpace& ex,
- const ::Kokkos::View<DataType1, Properties1...>& source,
- const ::Kokkos::View<DataType2, Properties2...>& dest,
- BinaryPredicate pred) {
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
- Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
-
- return Impl::unique_copy_impl(label, ex, cbegin(source), cend(source),
- begin(dest), std::move(pred));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class ValueType1, class ValueType2, class RetType = ValueType2>
-struct StdAdjacentDifferenceDefaultBinaryOpFunctor {
- KOKKOS_FUNCTION
- constexpr RetType operator()(const ValueType1& a, const ValueType2& b) const {
- return a - b;
- }
-};
-
-template <class InputIteratorType, class OutputIteratorType,
- class BinaryOperator>
-struct StdAdjacentDiffFunctor {
- using index_type = typename InputIteratorType::difference_type;
-
- const InputIteratorType m_first_from;
- const OutputIteratorType m_first_dest;
- BinaryOperator m_op;
-
- KOKKOS_FUNCTION
- void operator()(const index_type i) const {
- const auto& my_value = m_first_from[i];
- if (i == 0) {
- m_first_dest[i] = my_value;
- } else {
- const auto& left_value = m_first_from[i - 1];
- m_first_dest[i] = m_op(my_value, left_value);
- }
- }
-
- KOKKOS_FUNCTION
- StdAdjacentDiffFunctor(InputIteratorType first_from,
- OutputIteratorType first_dest, BinaryOperator op)
- : m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)),
- m_op(std::move(op)) {}
-};
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOp>
-OutputIteratorType adjacent_difference_impl(const std::string& label,
- const ExecutionSpace& ex,
- InputIteratorType first_from,
- InputIteratorType last_from,
- OutputIteratorType first_dest,
- BinaryOp bin_op) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- if (first_from == last_from) {
- return first_dest;
- }
-
- // aliases
- using value_type = typename OutputIteratorType::value_type;
- using aux_view_type = ::Kokkos::View<value_type*, ExecutionSpace>;
- using functor_t =
- StdAdjacentDiffFunctor<InputIteratorType, OutputIteratorType, BinaryOp>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- aux_view_type aux_view("aux_view", num_elements);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- functor_t(first_from, first_dest, bin_op));
- ex.fence("Kokkos::adjacent_difference: fence after operation");
-
- // return
- return first_dest + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class IteratorType, class ReducerType,
- class PredicateType>
-struct StdAdjacentFindFunctor {
- using red_value_type = typename ReducerType::value_type;
-
- IteratorType m_first;
- ReducerType m_reducer;
- PredicateType m_p;
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, red_value_type& red_value) const {
- const auto& my_value = m_first[i];
- const auto& next_value = m_first[i + 1];
- const bool are_equal = m_p(my_value, next_value);
-
- auto rv =
- are_equal
- ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
-
- m_reducer.join(red_value, rv);
- }
-
- KOKKOS_FUNCTION
- StdAdjacentFindFunctor(IteratorType first, ReducerType reducer,
- PredicateType p)
- : m_first(std::move(first)),
- m_reducer(std::move(reducer)),
- m_p(std::move(p)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class PredicateType>
-IteratorType adjacent_find_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last, PredicateType pred) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- const auto num_elements = Kokkos::Experimental::distance(first, last);
-
- if (num_elements <= 1) {
- return last;
- }
-
- using index_type = typename IteratorType::difference_type;
- using reducer_type = FirstLoc<index_type>;
- using reduction_value_type = typename reducer_type::value_type;
- using func_t = StdAdjacentFindFunctor<index_type, IteratorType, reducer_type,
- PredicateType>;
-
- reduction_value_type red_result;
- reducer_type reducer(red_result);
-
- // note that we use below num_elements-1 because
- // each index i in the reduction checks i and (i+1).
- ::Kokkos::parallel_reduce(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements - 1),
- func_t(first, reducer, pred), reducer);
-
- // fence not needed because reducing into scalar
- if (red_result.min_loc_true ==
- ::Kokkos::reduction_identity<index_type>::min()) {
- return last;
- } else {
- return first + red_result.min_loc_true;
- }
-}
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType adjacent_find_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- using value_type = typename IteratorType::value_type;
- using default_pred_t = StdAlgoEqualBinaryPredicate<value_type>;
- return adjacent_find_impl(label, ex, first, last, default_pred_t());
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_ALL_OF_ANY_OF_NONE_OF_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_ALL_OF_ANY_OF_NONE_OF_IMPL_HPP
-
-#include "Kokkos_FindIfOrNot.hpp"
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class ExecutionSpace, class InputIterator, class Predicate>
-bool all_of_impl(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last, Predicate predicate) {
- return (find_if_or_not_impl<false>(label, ex, first, last, predicate) ==
- last);
-}
-
-template <class ExecutionSpace, class InputIterator, class Predicate>
-bool any_of_impl(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last, Predicate predicate) {
- return (find_if_or_not_impl<true>(label, ex, first, last, predicate) != last);
-}
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-bool none_of_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, Predicate predicate) {
- return (find_if_or_not_impl<true>(label, ex, first, last, predicate) == last);
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class IteratorType1, class IteratorType2>
-struct StdCopyBackwardFunctor {
- static_assert(std::is_signed<IndexType>::value,
- "Kokkos: StdCopyBackwardFunctor requires signed index type");
-
- IteratorType1 m_last;
- IteratorType2 m_dest_last;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const { m_dest_last[-i - 1] = m_last[-i - 1]; }
-
- KOKKOS_FUNCTION
- StdCopyBackwardFunctor(IteratorType1 _last, IteratorType2 _dest_last)
- : m_last(std::move(_last)), m_dest_last(std::move(_dest_last)) {}
-};
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 copy_backward_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 d_last) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first, d_last);
- Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using index_type = typename IteratorType1::difference_type;
- using func_t =
- StdCopyBackwardFunctor<index_type, IteratorType1, IteratorType2>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(last, d_last));
- ex.fence("Kokkos::copy_backward: fence after operation");
-
- // return
- return d_last - num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COPY_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_COPY_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class InputIterator, class OutputIterator>
-struct StdCopyFunctor {
- InputIterator m_first;
- OutputIterator m_dest_first;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const { m_dest_first[i] = m_first[i]; }
-
- KOKKOS_FUNCTION
- StdCopyFunctor(InputIterator _first, OutputIterator _dest_first)
- : m_first(std::move(_first)), m_dest_first(std::move(_dest_first)) {}
-};
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator copy_impl(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first, d_first);
- Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using index_type = typename InputIterator::difference_type;
- using func_t = StdCopyFunctor<index_type, InputIterator, OutputIterator>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first, d_first));
- ex.fence("Kokkos::copy: fence after operation");
-
- // return
- return d_first + num_elements;
-}
-
-template <class ExecutionSpace, class InputIterator, class Size,
- class OutputIterator>
-OutputIterator copy_n_impl(const std::string& label, const ExecutionSpace& ex,
- InputIterator first_from, Size count,
- OutputIterator first_dest) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
-
- if (count > 0) {
- return copy_impl(label, ex, first_from, first_from + count, first_dest);
- } else {
- return first_dest;
- }
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_COUNT_IF_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_COUNT_IF_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IteratorType, class Predicate>
-struct StdCountIfFunctor {
- using index_type = typename IteratorType::difference_type;
- IteratorType m_first;
- Predicate m_predicate;
-
- KOKKOS_FUNCTION
- void operator()(index_type i, index_type& lsum) const {
- if (m_predicate(m_first[i])) {
- lsum++;
- }
- }
-
- KOKKOS_FUNCTION
- StdCountIfFunctor(IteratorType _first, Predicate _predicate)
- : m_first(std::move(_first)), m_predicate(std::move(_predicate)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class Predicate>
-typename IteratorType::difference_type count_if_impl(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType first,
- IteratorType last,
- Predicate predicate) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using func_t = StdCountIfFunctor<IteratorType, Predicate>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- typename IteratorType::difference_type count = 0;
- ::Kokkos::parallel_reduce(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first, predicate), count);
- ex.fence("Kokkos::count_if: fence after operation");
-
- return count;
-}
-
-template <class ExecutionSpace, class IteratorType, class T>
-auto count_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, const T& value) {
- return count_if_impl(
- label, ex, first, last,
- ::Kokkos::Experimental::Impl::StdAlgoEqualsValUnaryPredicate<T>(value));
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_EQUAL_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_EQUAL_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-struct StdEqualFunctor {
- IteratorType1 m_first1;
- IteratorType2 m_first2;
- BinaryPredicateType m_predicate;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i, std::size_t& lsum) const {
- if (!m_predicate(m_first1[i], m_first2[i])) {
- lsum = 1;
- }
- }
-
- KOKKOS_FUNCTION
- StdEqualFunctor(IteratorType1 _first1, IteratorType2 _first2,
- BinaryPredicateType _predicate)
- : m_first1(std::move(_first1)),
- m_first2(std::move(_first2)),
- m_predicate(std::move(_predicate)) {}
-};
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-bool equal_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
- BinaryPredicateType predicate) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first1, first2);
- Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
- Impl::expect_valid_range(first1, last1);
-
- // aliases
- using index_type = typename IteratorType1::difference_type;
- using func_t = StdEqualFunctor<index_type, IteratorType1, IteratorType2,
- BinaryPredicateType>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first1, last1);
- std::size_t different = 0;
- ::Kokkos::parallel_reduce(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first1, first2, predicate), different);
- ex.fence("Kokkos::equal: fence after operation");
-
- return !different;
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-bool equal_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2) {
- using value_type1 = typename IteratorType1::value_type;
- using value_type2 = typename IteratorType2::value_type;
- using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
- return equal_impl(label, ex, first1, last1, first2, pred_t());
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-bool equal_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
- IteratorType2 last2, BinaryPredicateType predicate) {
- const auto d1 = ::Kokkos::Experimental::distance(first1, last1);
- const auto d2 = ::Kokkos::Experimental::distance(first2, last2);
- if (d1 != d2) {
- return false;
- }
-
- return equal_impl(label, ex, first1, last1, first2, predicate);
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-bool equal_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
- IteratorType2 last2) {
- Impl::expect_valid_range(first1, last1);
- Impl::expect_valid_range(first2, last2);
-
- using value_type1 = typename IteratorType1::value_type;
- using value_type2 = typename IteratorType2::value_type;
- using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
- return equal_impl(label, ex, first1, last1, first2, last2, pred_t());
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
-#include "Kokkos_IdentityReferenceUnaryFunctor.hpp"
-#include <std_algorithms/Kokkos_TransformExclusiveScan.hpp>
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
- class FirstDest>
-struct ExclusiveScanDefaultFunctorForKnownNeutralElement {
- using execution_space = ExeSpace;
-
- ValueType m_init_value;
- FirstFrom m_first_from;
- FirstDest m_first_dest;
-
- KOKKOS_FUNCTION
- ExclusiveScanDefaultFunctorForKnownNeutralElement(ValueType init,
- FirstFrom first_from,
- FirstDest first_dest)
- : m_init_value(std::move(init)),
- m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)) {}
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, ValueType& update,
- const bool final_pass) const {
- if (final_pass) m_first_dest[i] = update + m_init_value;
- update += m_first_from[i];
- }
-};
-
-template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
- class FirstDest>
-struct ExclusiveScanDefaultFunctor {
- using execution_space = ExeSpace;
- using value_type =
- ::Kokkos::Experimental::Impl::ValueWrapperForNoNeutralElement<ValueType>;
-
- ValueType m_init_value;
- FirstFrom m_first_from;
- FirstDest m_first_dest;
-
- KOKKOS_FUNCTION
- ExclusiveScanDefaultFunctor(ValueType init, FirstFrom first_from,
- FirstDest first_dest)
- : m_init_value(std::move(init)),
- m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)) {}
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, value_type& update,
- const bool final_pass) const {
- if (final_pass) {
- if (i == 0) {
- m_first_dest[i] = m_init_value;
- } else {
- m_first_dest[i] = update.val + m_init_value;
- }
- }
-
- const auto tmp = value_type{m_first_from[i], false};
- this->join(update, tmp);
- }
-
- KOKKOS_FUNCTION
- void init(value_type& update) const {
- update.val = {};
- update.is_initial = true;
- }
-
- KOKKOS_FUNCTION
- void join(value_type& update, const value_type& input) const {
- if (update.is_initial) {
- update.val = input.val;
- update.is_initial = false;
- } else {
- update.val = update.val + input.val;
- }
- }
-};
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType, class BinaryOpType>
-OutputIteratorType exclusive_scan_custom_op_impl(
- const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first_from, InputIteratorType last_from,
- OutputIteratorType first_dest, ValueType init_value, BinaryOpType bop) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- // aliases
- using index_type = typename InputIteratorType::difference_type;
- using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<ValueType>;
- using func_type =
- TransformExclusiveScanFunctor<ExecutionSpace, index_type, ValueType,
- InputIteratorType, OutputIteratorType,
- BinaryOpType, unary_op_type>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- ::Kokkos::parallel_scan(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_type(init_value, first_from, first_dest, bop, unary_op_type()));
- ex.fence("Kokkos::exclusive_scan_custom_op: fence after operation");
-
- // return
- return first_dest + num_elements;
-}
-
-template <typename ValueType>
-using ex_scan_has_reduction_identity_sum_t =
- decltype(Kokkos::reduction_identity<ValueType>::sum());
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType>
-OutputIteratorType exclusive_scan_default_op_impl(const std::string& label,
- const ExecutionSpace& ex,
- InputIteratorType first_from,
- InputIteratorType last_from,
- OutputIteratorType first_dest,
- ValueType init_value) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- // does it make sense to do this static_assert too?
- // using input_iterator_value_type = typename InputIteratorType::value_type;
- // static_assert
- // (std::is_convertible<std::remove_cv_t<input_iterator_value_type>,
- // ValueType>::value,
- // "exclusive_scan: InputIteratorType::value_type not convertible to
- // ValueType");
-
- // we are unnecessarily duplicating code, but this is on purpose
- // so that we can use the default_op for OpenMPTarget.
- // Originally, I had this implemented as:
- // '''
- // using bop_type = StdExclusiveScanDefaultJoinFunctor<ValueType>;
- // call exclusive_scan_custom_op_impl(..., bop_type());
- // '''
- // which avoids duplicating the functors, but for OpenMPTarget
- // I cannot use a custom binary op.
- // This is the same problem that occurs for reductions.
-
- // aliases
- using index_type = typename InputIteratorType::difference_type;
- using func_type = std::conditional_t<
- ::Kokkos::is_detected<ex_scan_has_reduction_identity_sum_t,
- ValueType>::value,
- ExclusiveScanDefaultFunctorForKnownNeutralElement<
- ExecutionSpace, index_type, ValueType, InputIteratorType,
- OutputIteratorType>,
- ExclusiveScanDefaultFunctor<ExecutionSpace, index_type, ValueType,
- InputIteratorType, OutputIteratorType>>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- ::Kokkos::parallel_scan(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_type(init_value, first_from, first_dest));
-
- ex.fence("Kokkos::exclusive_scan_default_op: fence after operation");
-
- return first_dest + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FILL_AND_FILL_N_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_FILL_AND_FILL_N_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class InputIterator, class T>
-struct StdFillFunctor {
- using index_type = typename InputIterator::difference_type;
- InputIterator m_first;
- T m_value;
-
- KOKKOS_FUNCTION
- void operator()(index_type i) const { m_first[i] = m_value; }
-
- KOKKOS_FUNCTION
- StdFillFunctor(InputIterator _first, T _value)
- : m_first(std::move(_first)), m_value(std::move(_value)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class T>
-void fill_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, const T& value) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- StdFillFunctor<IteratorType, T>(first, value));
- ex.fence("Kokkos::fill: fence after operation");
-}
-
-template <class ExecutionSpace, class IteratorType, class SizeType, class T>
-IteratorType fill_n_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, SizeType n, const T& value) {
- auto last = first + n;
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- if (n <= 0) {
- return first;
- }
-
- fill_impl(label, ex, first, last, value);
- return last;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_FOR_EACH_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IteratorType, class UnaryFunctorType>
-struct StdForEachFunctor {
- using index_type = typename IteratorType::difference_type;
- IteratorType m_first;
- UnaryFunctorType m_functor;
-
- KOKKOS_FUNCTION
- void operator()(index_type i) const { m_functor(m_first[i]); }
-
- KOKKOS_FUNCTION
- StdForEachFunctor(IteratorType _first, UnaryFunctorType _functor)
- : m_first(std::move(_first)), m_functor(std::move(_functor)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class UnaryFunctorType>
-UnaryFunctorType for_each_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last, UnaryFunctorType functor) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- StdForEachFunctor<IteratorType, UnaryFunctorType>(first, functor));
- ex.fence("Kokkos::for_each: fence after operation");
-
- return functor;
-}
-
-template <class ExecutionSpace, class IteratorType, class SizeType,
- class UnaryFunctorType>
-IteratorType for_each_n_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, SizeType n,
- UnaryFunctorType functor) {
- auto last = first + n;
- Impl::static_assert_random_access_and_accessible(ex, first, last);
- Impl::expect_valid_range(first, last);
-
- if (n == 0) {
- return first;
- }
-
- for_each_impl(label, ex, first, last, std::move(functor));
- // no neeed to fence since for_each_impl fences already
-
- return last;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_GENERATE_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IteratorType, class Generator>
-struct StdGenerateFunctor {
- using index_type = typename IteratorType::difference_type;
- IteratorType m_first;
- Generator m_generator;
-
- KOKKOS_FUNCTION
- void operator()(index_type i) const { m_first[i] = m_generator(); }
-
- KOKKOS_FUNCTION
- StdGenerateFunctor(IteratorType _first, Generator _g)
- : m_first(std::move(_first)), m_generator(std::move(_g)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class Generator>
-void generate_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, Generator g) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using func_t = StdGenerateFunctor<IteratorType, Generator>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first, g));
- ex.fence("Kokkos::generate: fence after operation");
-}
-
-template <class ExecutionSpace, class IteratorType, class Size, class Generator>
-IteratorType generate_n_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, Size count, Generator g) {
- if (count <= 0) {
- return first;
- }
-
- generate_impl(label, ex, first, first + count, g);
- return first + count;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_NUMERIC_IDENTITY_REFERENCE_UNARY_FUNCTOR_HPP
-#define KOKKOS_STD_ALGORITHMS_NUMERIC_IDENTITY_REFERENCE_UNARY_FUNCTOR_HPP
-
-#include <Kokkos_Macros.hpp>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class ValueType>
-struct StdNumericScanIdentityReferenceUnaryFunctor {
- KOKKOS_FUNCTION
- constexpr const ValueType& operator()(const ValueType& a) const { return a; }
-};
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <std_algorithms/Kokkos_Find.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IteratorType, class IndicatorViewType, class ComparatorType>
-struct StdIsSortedUntilFunctor {
- using index_type = typename IteratorType::difference_type;
- IteratorType m_first;
- IndicatorViewType m_indicator;
- ComparatorType m_comparator;
-
- KOKKOS_FUNCTION
- void operator()(const index_type i, int& update, const bool final) const {
- const auto& val_i = m_first[i];
- const auto& val_ip1 = m_first[i + 1];
-
- if (m_comparator(val_ip1, val_i)) {
- ++update;
- }
-
- if (final) {
- m_indicator(i) = update;
- }
- }
-
- KOKKOS_FUNCTION
- StdIsSortedUntilFunctor(IteratorType _first1, IndicatorViewType indicator,
- ComparatorType comparator)
- : m_first(std::move(_first1)),
- m_indicator(std::move(indicator)),
- m_comparator(std::move(comparator)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class ComparatorType>
-IteratorType is_sorted_until_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last, ComparatorType comp) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- const auto num_elements = Kokkos::Experimental::distance(first, last);
-
- // trivial case
- if (num_elements <= 1) {
- return last;
- }
-
- /*
- use scan and a helper "indicator" view
- such that we scan the data and fill the indicator with
- partial sum that is always 0 unless we find a pair that
- breaks the sorting, so in that case the indicator will
- have a 1 starting at the location where the sorting breaks.
- So finding that 1 means finding the location we want.
- */
-
- // aliases
- using indicator_value_type = std::size_t;
- using indicator_view_type =
- ::Kokkos::View<indicator_value_type*, ExecutionSpace>;
- using functor_type =
- StdIsSortedUntilFunctor<IteratorType, indicator_view_type,
- ComparatorType>;
-
- // do scan
- // use num_elements-1 because each index handles i and i+1
- const auto num_elements_minus_one = num_elements - 1;
- indicator_view_type indicator("is_sorted_until_indicator_helper",
- num_elements_minus_one);
- ::Kokkos::parallel_scan(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_minus_one),
- functor_type(first, indicator, std::move(comp)));
-
- // try to find the first sentinel value, which indicates
- // where the sorting condition breaks
- namespace KE = ::Kokkos::Experimental;
- constexpr indicator_value_type sentinel_value = 1;
- auto r =
- KE::find(ex, KE::cbegin(indicator), KE::cend(indicator), sentinel_value);
- const auto shift = r - ::Kokkos::Experimental::cbegin(indicator);
-
- return first + (shift + 1);
-}
-
-template <class ExecutionSpace, class IteratorType>
-IteratorType is_sorted_until_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last) {
- using value_type = typename IteratorType::value_type;
- using pred_t = Impl::StdAlgoLessThanBinaryPredicate<value_type>;
- return is_sorted_until_impl(label, ex, first, last, pred_t());
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MISMATCH_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_MISMATCH_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class IteratorType1, class IteratorType2,
- class ReducerType, class BinaryPredicateType>
-struct StdMismatchRedFunctor {
- using red_value_type = typename ReducerType::value_type;
-
- IteratorType1 m_first1;
- IteratorType2 m_first2;
- ReducerType m_reducer;
- BinaryPredicateType m_predicate;
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, red_value_type& red_value) const {
- const auto& my_value1 = m_first1[i];
- const auto& my_value2 = m_first2[i];
-
- auto rv =
- !m_predicate(my_value1, my_value2)
- ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
-
- m_reducer.join(red_value, rv);
- }
-
- KOKKOS_FUNCTION
- StdMismatchRedFunctor(IteratorType1 first1, IteratorType2 first2,
- ReducerType reducer, BinaryPredicateType predicate)
- : m_first1(std::move(first1)),
- m_first2(std::move(first2)),
- m_reducer(std::move(reducer)),
- m_predicate(std::move(predicate)) {}
-};
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2,
- class BinaryPredicateType>
-::Kokkos::pair<IteratorType1, IteratorType2> mismatch_impl(
- const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
- BinaryPredicateType predicate) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first1, first2);
- Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
- Impl::expect_valid_range(first1, last1);
- Impl::expect_valid_range(first2, last2);
-
- // aliases
- using return_type = ::Kokkos::pair<IteratorType1, IteratorType2>;
- using index_type = typename IteratorType1::difference_type;
- using reducer_type = FirstLoc<index_type>;
- using reduction_value_type = typename reducer_type::value_type;
- using functor_type =
- StdMismatchRedFunctor<index_type, IteratorType1, IteratorType2,
- reducer_type, BinaryPredicateType>;
-
- // trivial case: note that this is important,
- // for OpenMPTarget, omitting special handling of
- // the trivial case was giving all sorts of strange stuff.
- const auto num_e1 = last1 - first1;
- const auto num_e2 = last2 - first2;
- if (num_e1 == 0 || num_e2 == 0) {
- return return_type(first1, first2);
- }
-
- // run
- const auto num_elemen_par_reduce = (num_e1 <= num_e2) ? num_e1 : num_e2;
- reduction_value_type red_result;
- reducer_type reducer(red_result);
- ::Kokkos::parallel_reduce(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elemen_par_reduce),
- functor_type(first1, first2, reducer, std::move(predicate)), reducer);
-
- // fence not needed because reducing into scalar
-
- // decide and return
- constexpr auto red_min = ::Kokkos::reduction_identity<index_type>::min();
- if (red_result.min_loc_true == red_min) {
- // in here means mismatch has not been found
- if (num_e1 == num_e2) {
- return return_type(last1, last2);
- } else if (num_e1 < num_e2) {
- return return_type(last1, first2 + num_e1);
- } else {
- return return_type(first1 + num_e2, last2);
- }
- } else {
- // in here means mismatch has been found
- return return_type(first1 + red_result.min_loc_true,
- first2 + red_result.min_loc_true);
- }
-}
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-::Kokkos::pair<IteratorType1, IteratorType2> mismatch_impl(
- const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
- using value_type1 = typename IteratorType1::value_type;
- using value_type2 = typename IteratorType2::value_type;
- using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
- return mismatch_impl(label, ex, first1, last1, first2, last2, pred_t());
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MOVE_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_MOVE_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class InputIterator, class OutputIterator>
-struct StdMoveFunctor {
- InputIterator m_first;
- OutputIterator m_dest_first;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const {
- m_dest_first[i] = std::move(m_first[i]);
- }
-
- StdMoveFunctor(InputIterator _first, OutputIterator _dest_first)
- : m_first(std::move(_first)), m_dest_first(std::move(_dest_first)) {}
-};
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator move_impl(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first, d_first);
- Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using index_type = typename InputIterator::difference_type;
- using func_t = StdMoveFunctor<index_type, InputIterator, OutputIterator>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first, d_first));
- ex.fence("Kokkos::move: fence after operation");
-
- // return
- return d_first + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class IteratorType1, class IteratorType2>
-struct StdMoveBackwardFunctor {
- static_assert(std::is_signed<IndexType>::value,
- "Kokkos: StdMoveBackwardFunctor requires signed index type");
-
- IteratorType1 m_last;
- IteratorType2 m_dest_last;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const {
- m_dest_last[-i - 1] = std::move(m_last[-i - 1]);
- }
-
- StdMoveBackwardFunctor(IteratorType1 _last, IteratorType2 _dest_last)
- : m_last(std::move(_last)), m_dest_last(std::move(_dest_last)) {}
-};
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 move_backward_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 d_last) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first, d_last);
- Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using index_type = typename IteratorType1::difference_type;
- using func_t =
- StdMoveBackwardFunctor<index_type, IteratorType1, IteratorType2>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(last, d_last));
- ex.fence("Kokkos::move_backward: fence after operation");
-
- // return
- return d_last - num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class InputIterator, class ValueType>
-struct StdReplaceFunctor {
- using index_type = typename InputIterator::difference_type;
- InputIterator m_first;
- ValueType m_old_value;
- ValueType m_new_value;
-
- KOKKOS_FUNCTION
- void operator()(index_type i) const {
- if (m_first[i] == m_old_value) {
- m_first[i] = m_new_value;
- }
- }
-
- KOKKOS_FUNCTION
- StdReplaceFunctor(InputIterator first, ValueType old_value,
- ValueType new_value)
- : m_first(std::move(first)),
- m_old_value(std::move(old_value)),
- m_new_value(std::move(new_value)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class ValueType>
-void replace_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- const ValueType& old_value, const ValueType& new_value) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using func_t = StdReplaceFunctor<IteratorType, ValueType>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first, old_value, new_value));
- ex.fence("Kokkos::replace: fence after operation");
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class InputIterator, class OutputIterator, class ValueType>
-struct StdReplaceCopyFunctor {
- using index_type = typename InputIterator::difference_type;
-
- InputIterator m_first_from;
- OutputIterator m_first_dest;
- ValueType m_old_value;
- ValueType m_new_value;
-
- KOKKOS_FUNCTION
- void operator()(index_type i) const {
- const auto& myvalue_from = m_first_from[i];
-
- if (myvalue_from == m_old_value) {
- m_first_dest[i] = m_new_value;
- } else {
- m_first_dest[i] = myvalue_from;
- }
- }
-
- KOKKOS_FUNCTION
- StdReplaceCopyFunctor(InputIterator first_from, OutputIterator first_dest,
- ValueType old_value, ValueType new_value)
- : m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)),
- m_old_value(std::move(old_value)),
- m_new_value(std::move(new_value)) {}
-};
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType>
-OutputIteratorType replace_copy_impl(const std::string& label,
- const ExecutionSpace& ex,
- InputIteratorType first_from,
- InputIteratorType last_from,
- OutputIteratorType first_dest,
- const ValueType& old_value,
- const ValueType& new_value) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- // aliases
- using func_t =
- StdReplaceCopyFunctor<InputIteratorType, OutputIteratorType, ValueType>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first_from, first_dest, old_value, new_value));
- ex.fence("Kokkos::replace_copy: fence after operation");
-
- // return
- return first_dest + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class InputIterator, class OutputIterator,
- class PredicateType, class ValueType>
-struct StdReplaceIfCopyFunctor {
- InputIterator m_first_from;
- OutputIterator m_first_dest;
- PredicateType m_pred;
- ValueType m_new_value;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const {
- const auto& myvalue_from = m_first_from[i];
-
- if (m_pred(myvalue_from)) {
- m_first_dest[i] = m_new_value;
- } else {
- m_first_dest[i] = myvalue_from;
- }
- }
-
- KOKKOS_FUNCTION
- StdReplaceIfCopyFunctor(InputIterator first_from, OutputIterator first_dest,
- PredicateType pred, ValueType new_value)
- : m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)),
- m_pred(std::move(pred)),
- m_new_value(std::move(new_value)) {}
-};
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class PredicateType, class ValueType>
-OutputIteratorType replace_copy_if_impl(const std::string& label,
- const ExecutionSpace& ex,
- InputIteratorType first_from,
- InputIteratorType last_from,
- OutputIteratorType first_dest,
- PredicateType pred,
- const ValueType& new_value) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- // aliases
- using index_type = typename InputIteratorType::difference_type;
- using func_t =
- StdReplaceIfCopyFunctor<index_type, InputIteratorType, OutputIteratorType,
- PredicateType, ValueType>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- ::Kokkos::parallel_for(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first_from, first_dest, std::move(pred), new_value));
- ex.fence("Kokkos::replace_copy_if: fence after operation");
-
- // return
- return first_dest + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IF_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_REPLACE_IF_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class InputIterator, class PredicateType, class NewValueType>
-struct StdReplaceIfFunctor {
- using index_type = typename InputIterator::difference_type;
-
- InputIterator m_first;
- PredicateType m_predicate;
- NewValueType m_new_value;
-
- KOKKOS_FUNCTION
- void operator()(index_type i) const {
- if (m_predicate(m_first[i])) {
- m_first[i] = m_new_value;
- }
- }
-
- KOKKOS_FUNCTION
- StdReplaceIfFunctor(InputIterator first, PredicateType pred,
- NewValueType new_value)
- : m_first(std::move(first)),
- m_predicate(std::move(pred)),
- m_new_value(std::move(new_value)) {}
-};
-
-template <class ExecutionSpace, class IteratorType, class PredicateType,
- class ValueType>
-void replace_if_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last, PredicateType pred,
- const ValueType& new_value) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using func_t = StdReplaceIfFunctor<IteratorType, PredicateType, ValueType>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first, std::move(pred), new_value));
- ex.fence("Kokkos::replace_if: fence after operation");
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_REVERSE_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <std_algorithms/Kokkos_Swap.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class InputIterator>
-struct StdReverseFunctor {
- using index_type = typename InputIterator::difference_type;
- static_assert(std::is_signed<index_type>::value,
- "Kokkos: StdReverseFunctor requires signed index type");
-
- InputIterator m_first;
- InputIterator m_last;
-
- KOKKOS_FUNCTION
- void operator()(index_type i) const {
- // the swap below is doing the same thing, but
- // for Intel 18.0.5 does not work.
- // But putting the impl directly here, it works.
-#ifdef KOKKOS_COMPILER_INTEL
- typename InputIterator::value_type tmp = std::move(m_first[i]);
- m_first[i] = std::move(m_last[-i - 1]);
- m_last[-i - 1] = std::move(tmp);
-#else
- ::Kokkos::Experimental::swap(m_first[i], m_last[-i - 1]);
-#endif
- }
-
- StdReverseFunctor(InputIterator first, InputIterator last)
- : m_first(std::move(first)), m_last(std::move(last)) {}
-};
-
-template <class ExecutionSpace, class InputIterator>
-void reverse_impl(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using func_t = StdReverseFunctor<InputIterator>;
-
- // run
- if (last >= first + 2) {
- // only need half
- const auto num_elements = Kokkos::Experimental::distance(first, last) / 2;
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first, last));
- ex.fence("Kokkos::reverse: fence after operation");
- }
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_COPY_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_REVERSE_COPY_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class InputIterator, class OutputIterator>
-struct StdReverseCopyFunctor {
- static_assert(std::is_signed<IndexType>::value,
- "Kokkos: StdReverseCopyFunctor requires signed index type");
-
- InputIterator m_last;
- OutputIterator m_dest_first;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const { m_dest_first[i] = m_last[-1 - i]; }
-
- StdReverseCopyFunctor(InputIterator _last, OutputIterator _dest_first)
- : m_last(std::move(_last)), m_dest_first(std::move(_dest_first)) {}
-};
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator reverse_copy_impl(const std::string& label,
- const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first, d_first);
- Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using index_type = typename InputIterator::difference_type;
- using func_t =
- StdReverseCopyFunctor<index_type, InputIterator, OutputIterator>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(last, d_first));
- ex.fence("Kokkos::reverse_copy: fence after operation");
-
- // return
- return d_first + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_SWAP_RANGES_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_SWAP_RANGES_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <std_algorithms/Kokkos_Swap.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class IteratorType1, class IteratorType2>
-struct StdSwapRangesFunctor {
- IteratorType1 m_first1;
- IteratorType2 m_first2;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const {
- // the swap below is doing the same thing, but
- // for Intel 18.0.5 does not work.
- // But putting the impl directly here, it works.
-#ifdef KOKKOS_COMPILER_INTEL
- typename IteratorType1::value_type tmp = std::move(m_first1[i]);
- m_first1[i] = std::move(m_first2[i]);
- m_first2[i] = std::move(tmp);
-#else
- ::Kokkos::Experimental::swap(m_first1[i], m_first2[i]);
-#endif
- }
-
- KOKKOS_FUNCTION
- StdSwapRangesFunctor(IteratorType1 _first1, IteratorType2 _first2)
- : m_first1(std::move(_first1)), m_first2(std::move(_first2)) {}
-};
-
-template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType2 swap_ranges_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType1 first1,
- IteratorType1 last1, IteratorType2 first2) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first1, first2);
- Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
- Impl::expect_valid_range(first1, last1);
-
- // aliases
- using index_type = typename IteratorType1::difference_type;
- using func_t = StdSwapRangesFunctor<index_type, IteratorType1, IteratorType2>;
-
- // run
- const auto num_elements_to_swap =
- Kokkos::Experimental::distance(first1, last1);
- ::Kokkos::parallel_for(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_to_swap),
- func_t(first1, first2));
- ex.fence("Kokkos::swap_ranges: fence after operation");
-
- // return
- return first2 + num_elements_to_swap;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_TRANSFORM_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class InputIterator, class OutputIterator,
- class UnaryFunctorType>
-struct StdTransformFunctor {
- InputIterator m_first;
- OutputIterator m_d_first;
- UnaryFunctorType m_unary_op;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const { m_d_first[i] = m_unary_op(m_first[i]); }
-
- KOKKOS_FUNCTION
- StdTransformFunctor(InputIterator _first, OutputIterator _m_d_first,
- UnaryFunctorType _functor)
- : m_first(std::move(_first)),
- m_d_first(std::move(_m_d_first)),
- m_unary_op(std::move(_functor)) {}
-};
-
-template <class IndexType, class InputIterator1, class InputIterator2,
- class OutputIterator, class BinaryFunctorType>
-struct StdTransformBinaryFunctor {
- InputIterator1 m_first1;
- InputIterator2 m_first2;
- OutputIterator m_d_first;
- BinaryFunctorType m_binary_op;
-
- KOKKOS_FUNCTION
- void operator()(IndexType i) const {
- m_d_first[i] = m_binary_op(m_first1[i], m_first2[i]);
- }
-
- KOKKOS_FUNCTION
- StdTransformBinaryFunctor(InputIterator1 _first1, InputIterator2 _first2,
- OutputIterator _m_d_first,
- BinaryFunctorType _functor)
- : m_first1(std::move(_first1)),
- m_first2(std::move(_first2)),
- m_d_first(std::move(_m_d_first)),
- m_binary_op(std::move(_functor)) {}
-};
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class UnaryOperation>
-OutputIterator transform_impl(const std::string& label,
- const ExecutionSpace& ex, InputIterator first1,
- InputIterator last1, OutputIterator d_first,
- UnaryOperation unary_op) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first1, d_first);
- Impl::static_assert_iterators_have_matching_difference_type(first1, d_first);
- Impl::expect_valid_range(first1, last1);
-
- // aliases
- using index_type = typename InputIterator::difference_type;
- using func_t = StdTransformFunctor<index_type, InputIterator, OutputIterator,
- UnaryOperation>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first1, last1);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first1, d_first, unary_op));
- ex.fence("Kokkos::transform: fence after operation");
-
- // return
- return d_first + num_elements;
-}
-
-template <class ExecutionSpace, class InputIterator1, class InputIterator2,
- class OutputIterator, class BinaryOperation>
-OutputIterator transform_impl(const std::string& label,
- const ExecutionSpace& ex, InputIterator1 first1,
- InputIterator1 last1, InputIterator2 first2,
- OutputIterator d_first,
- BinaryOperation binary_op) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first1, first2, d_first);
- Impl::static_assert_iterators_have_matching_difference_type(first1, first2,
- d_first);
- Impl::expect_valid_range(first1, last1);
-
- // aliases
- using index_type = typename InputIterator1::difference_type;
- using func_t =
- StdTransformBinaryFunctor<index_type, InputIterator1, InputIterator2,
- OutputIterator, BinaryOperation>;
-
- // run
- const auto num_elements = Kokkos::Experimental::distance(first1, last1);
- ::Kokkos::parallel_for(label,
- RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_t(first1, first2, d_first, binary_op));
- ex.fence("Kokkos::transform: fence after operation");
- return d_first + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_EXCLUSIVE_SCAN_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_TRANSFORM_EXCLUSIVE_SCAN_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
- class FirstDest, class BinaryOpType, class UnaryOpType>
-struct TransformExclusiveScanFunctor {
- using execution_space = ExeSpace;
- using value_type =
- ::Kokkos::Experimental::Impl::ValueWrapperForNoNeutralElement<ValueType>;
-
- ValueType m_init_value;
- FirstFrom m_first_from;
- FirstDest m_first_dest;
- BinaryOpType m_binary_op;
- UnaryOpType m_unary_op;
-
- KOKKOS_FUNCTION
- TransformExclusiveScanFunctor(ValueType init, FirstFrom first_from,
- FirstDest first_dest, BinaryOpType bop,
- UnaryOpType uop)
- : m_init_value(std::move(init)),
- m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)),
- m_binary_op(std::move(bop)),
- m_unary_op(std::move(uop)) {}
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, value_type& update,
- const bool final_pass) const {
- if (final_pass) {
- if (i == 0) {
- // for both ExclusiveScan and TransformExclusiveScan,
- // init is unmodified
- m_first_dest[i] = m_init_value;
- } else {
- m_first_dest[i] = m_binary_op(update.val, m_init_value);
- }
- }
-
- const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
- this->join(update, tmp);
- }
-
- KOKKOS_FUNCTION
- void init(value_type& update) const {
- update.val = {};
- update.is_initial = true;
- }
-
- KOKKOS_FUNCTION
- void join(value_type& update, const value_type& input) const {
- if (update.is_initial) {
- update.val = input.val;
- } else {
- update.val = m_binary_op(update.val, input.val);
- }
- update.is_initial = false;
- }
-};
-
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class ValueType, class BinaryOpType,
- class UnaryOpType>
-OutputIteratorType transform_exclusive_scan_impl(
- const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first_from, InputIteratorType last_from,
- OutputIteratorType first_dest, ValueType init_value, BinaryOpType bop,
- UnaryOpType uop) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- // aliases
- using index_type = typename InputIteratorType::difference_type;
- using func_type =
- TransformExclusiveScanFunctor<ExecutionSpace, index_type, ValueType,
- InputIteratorType, OutputIteratorType,
- BinaryOpType, UnaryOpType>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- ::Kokkos::parallel_scan(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_type(init_value, first_from, first_dest, bop, uop));
- ex.fence("Kokkos::transform_exclusive_scan: fence after operation");
-
- // return
- return first_dest + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
-#include "Kokkos_IdentityReferenceUnaryFunctor.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
- class FirstDest, class BinaryOpType, class UnaryOpType>
-struct TransformInclusiveScanNoInitValueFunctor {
- using execution_space = ExeSpace;
- using value_type = ValueWrapperForNoNeutralElement<ValueType>;
-
- FirstFrom m_first_from;
- FirstDest m_first_dest;
- BinaryOpType m_binary_op;
- UnaryOpType m_unary_op;
-
- KOKKOS_FUNCTION
- TransformInclusiveScanNoInitValueFunctor(FirstFrom first_from,
- FirstDest first_dest,
- BinaryOpType bop, UnaryOpType uop)
- : m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)),
- m_binary_op(std::move(bop)),
- m_unary_op(std::move(uop)) {}
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, value_type& update,
- const bool final_pass) const {
- const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
- this->join(update, tmp);
- if (final_pass) {
- m_first_dest[i] = update.val;
- }
- }
-
- KOKKOS_FUNCTION
- void init(value_type& update) const {
- update.val = {};
- update.is_initial = true;
- }
-
- KOKKOS_FUNCTION
- void join(value_type& update, const value_type& input) const {
- if (update.is_initial) {
- update.val = input.val;
- } else {
- update.val = m_binary_op(update.val, input.val);
- }
- update.is_initial = false;
- }
-};
-
-template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
- class FirstDest, class BinaryOpType, class UnaryOpType>
-struct TransformInclusiveScanWithInitValueFunctor {
- using execution_space = ExeSpace;
- using value_type = ValueWrapperForNoNeutralElement<ValueType>;
-
- FirstFrom m_first_from;
- FirstDest m_first_dest;
- BinaryOpType m_binary_op;
- UnaryOpType m_unary_op;
- ValueType m_init;
-
- KOKKOS_FUNCTION
- TransformInclusiveScanWithInitValueFunctor(FirstFrom first_from,
- FirstDest first_dest,
- BinaryOpType bop, UnaryOpType uop,
- ValueType init)
- : m_first_from(std::move(first_from)),
- m_first_dest(std::move(first_dest)),
- m_binary_op(std::move(bop)),
- m_unary_op(std::move(uop)),
- m_init(std::move(init)) {}
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, value_type& update,
- const bool final_pass) const {
- const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
- this->join(update, tmp);
-
- if (final_pass) {
- m_first_dest[i] = m_binary_op(update.val, m_init);
- }
- }
-
- KOKKOS_FUNCTION
- void init(value_type& update) const {
- update.val = {};
- update.is_initial = true;
- }
-
- KOKKOS_FUNCTION
- void join(value_type& update, const value_type& input) const {
- if (update.is_initial) {
- update.val = input.val;
- } else {
- update.val = m_binary_op(update.val, input.val);
- }
- update.is_initial = false;
- }
-};
-
-// -------------------------------------------------------------
-// transform_inclusive_scan_impl without init_value
-// -------------------------------------------------------------
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOpType, class UnaryOpType>
-OutputIteratorType transform_inclusive_scan_impl(const std::string& label,
- const ExecutionSpace& ex,
- InputIteratorType first_from,
- InputIteratorType last_from,
- OutputIteratorType first_dest,
- BinaryOpType binary_op,
- UnaryOpType unary_op) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- // aliases
- using index_type = typename InputIteratorType::difference_type;
- using value_type =
- std::remove_const_t<typename InputIteratorType::value_type>;
- using func_type = TransformInclusiveScanNoInitValueFunctor<
- ExecutionSpace, index_type, value_type, InputIteratorType,
- OutputIteratorType, BinaryOpType, UnaryOpType>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- ::Kokkos::parallel_scan(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_type(first_from, first_dest, binary_op, unary_op));
- ex.fence("Kokkos::transform_inclusive_scan: fence after operation");
-
- // return
- return first_dest + num_elements;
-}
-
-// -------------------------------------------------------------
-// transform_inclusive_scan_impl with init_value
-// -------------------------------------------------------------
-template <class ExecutionSpace, class InputIteratorType,
- class OutputIteratorType, class BinaryOpType, class UnaryOpType,
- class ValueType>
-OutputIteratorType transform_inclusive_scan_impl(
- const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first_from, InputIteratorType last_from,
- OutputIteratorType first_dest, BinaryOpType binary_op, UnaryOpType unary_op,
- ValueType init_value) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
- Impl::static_assert_iterators_have_matching_difference_type(first_from,
- first_dest);
- Impl::expect_valid_range(first_from, last_from);
-
- // aliases
- using index_type = typename InputIteratorType::difference_type;
- using func_type = TransformInclusiveScanWithInitValueFunctor<
- ExecutionSpace, index_type, ValueType, InputIteratorType,
- OutputIteratorType, BinaryOpType, UnaryOpType>;
-
- // run
- const auto num_elements =
- Kokkos::Experimental::distance(first_from, last_from);
- ::Kokkos::parallel_scan(
- label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_type(first_from, first_dest, binary_op, unary_op, init_value));
- ex.fence("Kokkos::transform_inclusive_scan: fence after operation");
-
- // return
- return first_dest + num_elements;
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_IMPL_HPP
-#define KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include "Kokkos_Constraints.hpp"
-#include "Kokkos_HelperPredicates.hpp"
-#include "Kokkos_CopyCopyN.hpp"
-#include <std_algorithms/Kokkos_Distance.hpp>
-#include <string>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-template <class IndexType, class InputIt, class OutputIt,
- class BinaryPredicateType>
-struct StdUniqueCopyFunctor {
- InputIt m_first_from;
- InputIt m_last_from;
- OutputIt m_first_dest;
- BinaryPredicateType m_pred;
-
- KOKKOS_FUNCTION
- StdUniqueCopyFunctor(InputIt first_from, InputIt last_from,
- OutputIt first_dest, BinaryPredicateType pred)
- : m_first_from(std::move(first_from)),
- m_last_from(std::move(last_from)),
- m_first_dest(std::move(first_dest)),
- m_pred(std::move(pred)) {}
-
- KOKKOS_FUNCTION
- void operator()(const IndexType i, IndexType& update,
- const bool final_pass) const {
- const auto& val_i = m_first_from[i];
- const auto& val_ip1 = m_first_from[i + 1];
-
- if (final_pass) {
- if (!m_pred(val_i, val_ip1)) {
- m_first_dest[update] = val_i;
- }
- }
-
- if (!m_pred(val_i, val_ip1)) {
- update += 1;
- }
- }
-};
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator,
- class PredicateType>
-OutputIterator unique_copy_impl(const std::string& label,
- const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first,
- PredicateType pred) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first, d_first);
- Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
- Impl::expect_valid_range(first, last);
-
- // branch for trivial vs non trivial case
- const auto num_elements = Kokkos::Experimental::distance(first, last);
- if (num_elements == 0) {
- return d_first;
- } else if (num_elements == 1) {
- return Impl::copy_impl("Kokkos::copy_from_unique_copy", ex, first, last,
- d_first);
- } else {
- // aliases
- using index_type = typename InputIterator::difference_type;
- using func_type = StdUniqueCopyFunctor<index_type, InputIterator,
- OutputIterator, PredicateType>;
-
- // note here that we run scan for num_elements - 1
- // because of the way we implement this, the last element is always needed.
- // We avoid performing checks inside functor that we are within limits
- // and run a "safe" scan and then copy the last element.
- const auto scan_size = num_elements - 1;
- index_type count = 0;
- ::Kokkos::parallel_scan(label,
- RangePolicy<ExecutionSpace>(ex, 0, scan_size),
- func_type(first, last, d_first, pred), count);
-
- return Impl::copy_impl("Kokkos::copy_from_unique_copy", ex,
- first + scan_size, last, d_first + count);
- }
-}
-
-template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator unique_copy_impl(const std::string& label,
- const ExecutionSpace& ex, InputIterator first,
- InputIterator last, OutputIterator d_first) {
- // checks
- Impl::static_assert_random_access_and_accessible(ex, first, d_first);
- Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
- Impl::expect_valid_range(first, last);
-
- // aliases
- using value_type1 = typename InputIterator::value_type;
- using value_type2 = typename OutputIterator::value_type;
-
- // default binary predicate uses ==
- using binary_pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
-
- // run
- return unique_copy_impl(label, ex, first, last, d_first, binary_pred_t());
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_STD_ALGORITHMS_VALUE_WRAPPER_FOR_NO_NEUTRAL_ELEMENT_HPP
-#define KOKKOS_STD_ALGORITHMS_VALUE_WRAPPER_FOR_NO_NEUTRAL_ELEMENT_HPP
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-//
-// scalar wrapper used for reductions and scans
-// when we don't have neutral element
-//
-template <class Scalar>
-struct ValueWrapperForNoNeutralElement {
- Scalar val;
- bool is_initial = true;
-
- KOKKOS_FUNCTION
- void operator=(const ValueWrapperForNoNeutralElement& rhs) {
- val = rhs.val;
- is_initial = rhs.is_initial;
- }
-};
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/// \file Kokkos_DynRankView.hpp
-/// \brief Declaration and definition of Kokkos::DynRankView.
-///
-/// This header file declares and defines Kokkos::DynRankView and its
-/// related nonmember functions.
-
-#ifndef KOKKOS_DYNRANKVIEW_HPP
-#define KOKKOS_DYNRANKVIEW_HPP
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
-#endif
-
-#include <Kokkos_Core.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <type_traits>
-
-namespace Kokkos {
-
-template <typename DataType, class... Properties>
-class DynRankView; // forward declare
-
-namespace Impl {
-
-template <typename Specialize>
-struct DynRankDimTraits {
- enum : size_t { unspecified = KOKKOS_INVALID_INDEX };
-
- // Compute the rank of the view from the nonzero dimension arguments.
- KOKKOS_INLINE_FUNCTION
- static size_t computeRank(const size_t N0, const size_t N1, const size_t N2,
- const size_t N3, const size_t N4, const size_t N5,
- const size_t N6, const size_t /* N7 */) {
- return (
- (N6 == unspecified && N5 == unspecified && N4 == unspecified &&
- N3 == unspecified && N2 == unspecified && N1 == unspecified &&
- N0 == unspecified)
- ? 0
- : ((N6 == unspecified && N5 == unspecified && N4 == unspecified &&
- N3 == unspecified && N2 == unspecified && N1 == unspecified)
- ? 1
- : ((N6 == unspecified && N5 == unspecified &&
- N4 == unspecified && N3 == unspecified &&
- N2 == unspecified)
- ? 2
- : ((N6 == unspecified && N5 == unspecified &&
- N4 == unspecified && N3 == unspecified)
- ? 3
- : ((N6 == unspecified && N5 == unspecified &&
- N4 == unspecified)
- ? 4
- : ((N6 == unspecified &&
- N5 == unspecified)
- ? 5
- : ((N6 == unspecified)
- ? 6
- : 7)))))));
- }
-
- // Compute the rank of the view from the nonzero layout arguments.
- template <typename Layout>
- KOKKOS_INLINE_FUNCTION static size_t computeRank(const Layout& layout) {
- return computeRank(layout.dimension[0], layout.dimension[1],
- layout.dimension[2], layout.dimension[3],
- layout.dimension[4], layout.dimension[5],
- layout.dimension[6], layout.dimension[7]);
- }
-
- // Extra overload to match that for specialize types v2
- template <typename Layout, typename... P>
- KOKKOS_INLINE_FUNCTION static size_t computeRank(
- const Kokkos::Impl::ViewCtorProp<P...>& /* prop */,
- const Layout& layout) {
- return computeRank(layout);
- }
-
- // Create the layout for the rank-7 view.
- // Non-strided Layout
- template <typename Layout>
- KOKKOS_INLINE_FUNCTION static std::enable_if_t<
- (std::is_same<Layout, Kokkos::LayoutRight>::value ||
- std::is_same<Layout, Kokkos::LayoutLeft>::value),
- Layout>
- createLayout(const Layout& layout) {
- return Layout(layout.dimension[0] != unspecified ? layout.dimension[0] : 1,
- layout.dimension[1] != unspecified ? layout.dimension[1] : 1,
- layout.dimension[2] != unspecified ? layout.dimension[2] : 1,
- layout.dimension[3] != unspecified ? layout.dimension[3] : 1,
- layout.dimension[4] != unspecified ? layout.dimension[4] : 1,
- layout.dimension[5] != unspecified ? layout.dimension[5] : 1,
- layout.dimension[6] != unspecified ? layout.dimension[6] : 1,
- layout.dimension[7] != unspecified ? layout.dimension[7] : 1);
- }
-
- // LayoutStride
- template <typename Layout>
- KOKKOS_INLINE_FUNCTION static std::enable_if_t<
- (std::is_same<Layout, Kokkos::LayoutStride>::value), Layout>
- createLayout(const Layout& layout) {
- return Layout(layout.dimension[0] != unspecified ? layout.dimension[0] : 1,
- layout.stride[0],
- layout.dimension[1] != unspecified ? layout.dimension[1] : 1,
- layout.stride[1],
- layout.dimension[2] != unspecified ? layout.dimension[2] : 1,
- layout.stride[2],
- layout.dimension[3] != unspecified ? layout.dimension[3] : 1,
- layout.stride[3],
- layout.dimension[4] != unspecified ? layout.dimension[4] : 1,
- layout.stride[4],
- layout.dimension[5] != unspecified ? layout.dimension[5] : 1,
- layout.stride[5],
- layout.dimension[6] != unspecified ? layout.dimension[6] : 1,
- layout.stride[6],
- layout.dimension[7] != unspecified ? layout.dimension[7] : 1,
- layout.stride[7]);
- }
-
- // Extra overload to match that for specialize types
- template <typename Traits, typename... P>
- KOKKOS_INLINE_FUNCTION static std::enable_if_t<
- (std::is_same<typename Traits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename Traits::array_layout, Kokkos::LayoutLeft>::value ||
- std::is_same<typename Traits::array_layout,
- Kokkos::LayoutStride>::value),
- typename Traits::array_layout>
- createLayout(const Kokkos::Impl::ViewCtorProp<P...>& /* prop */,
- const typename Traits::array_layout& layout) {
- return createLayout(layout);
- }
-
- // Create a view from the given dimension arguments.
- // This is only necessary because the shmem constructor doesn't take a layout.
- // NDE shmem View's are not compatible with the added view_alloc value_type
- // / fad_dim deduction functionality
- template <typename ViewType, typename ViewArg>
- static ViewType createView(const ViewArg& arg, const size_t N0,
- const size_t N1, const size_t N2, const size_t N3,
- const size_t N4, const size_t N5, const size_t N6,
- const size_t N7) {
- return ViewType(arg, N0 != unspecified ? N0 : 1, N1 != unspecified ? N1 : 1,
- N2 != unspecified ? N2 : 1, N3 != unspecified ? N3 : 1,
- N4 != unspecified ? N4 : 1, N5 != unspecified ? N5 : 1,
- N6 != unspecified ? N6 : 1, N7 != unspecified ? N7 : 1);
- }
-};
-
-// Non-strided Layout
-template <typename Layout, typename iType>
-KOKKOS_INLINE_FUNCTION static std::enable_if_t<
- (std::is_same<Layout, Kokkos::LayoutRight>::value ||
- std::is_same<Layout, Kokkos::LayoutLeft>::value) &&
- std::is_integral<iType>::value,
- Layout>
-reconstructLayout(const Layout& layout, iType dynrank) {
- return Layout(dynrank > 0 ? layout.dimension[0] : KOKKOS_INVALID_INDEX,
- dynrank > 1 ? layout.dimension[1] : KOKKOS_INVALID_INDEX,
- dynrank > 2 ? layout.dimension[2] : KOKKOS_INVALID_INDEX,
- dynrank > 3 ? layout.dimension[3] : KOKKOS_INVALID_INDEX,
- dynrank > 4 ? layout.dimension[4] : KOKKOS_INVALID_INDEX,
- dynrank > 5 ? layout.dimension[5] : KOKKOS_INVALID_INDEX,
- dynrank > 6 ? layout.dimension[6] : KOKKOS_INVALID_INDEX,
- dynrank > 7 ? layout.dimension[7] : KOKKOS_INVALID_INDEX);
-}
-
-// LayoutStride
-template <typename Layout, typename iType>
-KOKKOS_INLINE_FUNCTION static std::enable_if_t<
- (std::is_same<Layout, Kokkos::LayoutStride>::value) &&
- std::is_integral<iType>::value,
- Layout>
-reconstructLayout(const Layout& layout, iType dynrank) {
- return Layout(dynrank > 0 ? layout.dimension[0] : KOKKOS_INVALID_INDEX,
- dynrank > 0 ? layout.stride[0] : (0),
- dynrank > 1 ? layout.dimension[1] : KOKKOS_INVALID_INDEX,
- dynrank > 1 ? layout.stride[1] : (0),
- dynrank > 2 ? layout.dimension[2] : KOKKOS_INVALID_INDEX,
- dynrank > 2 ? layout.stride[2] : (0),
- dynrank > 3 ? layout.dimension[3] : KOKKOS_INVALID_INDEX,
- dynrank > 3 ? layout.stride[3] : (0),
- dynrank > 4 ? layout.dimension[4] : KOKKOS_INVALID_INDEX,
- dynrank > 4 ? layout.stride[4] : (0),
- dynrank > 5 ? layout.dimension[5] : KOKKOS_INVALID_INDEX,
- dynrank > 5 ? layout.stride[5] : (0),
- dynrank > 6 ? layout.dimension[6] : KOKKOS_INVALID_INDEX,
- dynrank > 6 ? layout.stride[6] : (0),
- dynrank > 7 ? layout.dimension[7] : KOKKOS_INVALID_INDEX,
- dynrank > 7 ? layout.stride[7] : (0));
-}
-
-/** \brief Debug bounds-checking routines */
-// Enhanced debug checking - most infrastructure matches that of functions in
-// Kokkos_ViewMapping; additional checks for extra arguments beyond rank are 0
-template <unsigned, typename iType0, class MapType>
-KOKKOS_INLINE_FUNCTION bool dyn_rank_view_verify_operator_bounds(
- const iType0&, const MapType&) {
- return true;
-}
-
-template <unsigned R, typename iType0, class MapType, typename iType1,
- class... Args>
-KOKKOS_INLINE_FUNCTION bool dyn_rank_view_verify_operator_bounds(
- const iType0& rank, const MapType& map, const iType1& i, Args... args) {
- if (static_cast<iType0>(R) < rank) {
- return (size_t(i) < map.extent(R)) &&
- dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
- } else if (i != 0) {
- KOKKOS_IMPL_DO_NOT_USE_PRINTF(
- "DynRankView Debug Bounds Checking Error: at rank %u\n Extra "
- "arguments beyond the rank must be zero \n",
- R);
- return (false) &&
- dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
- } else {
- return (true) &&
- dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
- }
-}
-
-template <unsigned, class MapType>
-inline void dyn_rank_view_error_operator_bounds(char*, int, const MapType&) {}
-
-template <unsigned R, class MapType, class iType, class... Args>
-inline void dyn_rank_view_error_operator_bounds(char* buf, int len,
- const MapType& map,
- const iType& i, Args... args) {
- const int n = snprintf(
- buf, len, " %ld < %ld %c", static_cast<unsigned long>(i),
- static_cast<unsigned long>(map.extent(R)), (sizeof...(Args) ? ',' : ')'));
- dyn_rank_view_error_operator_bounds<R + 1>(buf + n, len - n, map, args...);
-}
-
-// op_rank = rank of the operator version that was called
-template <typename MemorySpace, typename iType0, typename iType1, class MapType,
- class... Args>
-KOKKOS_INLINE_FUNCTION void dyn_rank_view_verify_operator_bounds(
- const iType0& op_rank, const iType1& rank,
- const Kokkos::Impl::SharedAllocationTracker& tracker, const MapType& map,
- Args... args) {
- if (static_cast<iType0>(rank) > op_rank) {
- Kokkos::abort(
- "DynRankView Bounds Checking Error: Need at least rank arguments to "
- "the operator()");
- }
-
- if (!dyn_rank_view_verify_operator_bounds<0>(rank, map, args...)) {
- KOKKOS_IF_ON_HOST(
- (enum {LEN = 1024}; char buffer[LEN];
- const std::string label = tracker.template get_label<MemorySpace>();
- int n = snprintf(buffer, LEN, "DynRankView bounds error of view %s (",
- label.c_str());
- dyn_rank_view_error_operator_bounds<0>(buffer + n, LEN - n, map,
- args...);
- Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
-
- KOKKOS_IF_ON_DEVICE(
- ((void)tracker; Kokkos::abort("DynRankView bounds error");))
- }
-}
-
-/** \brief Assign compatible default mappings */
-struct ViewToDynRankViewTag {};
-
-} // namespace Impl
-
-namespace Impl {
-
-template <class DstTraits, class SrcTraits>
-class ViewMapping<
- DstTraits, SrcTraits,
- std::enable_if_t<(std::is_same<typename DstTraits::memory_space,
- typename SrcTraits::memory_space>::value &&
- std::is_void<typename DstTraits::specialize>::value &&
- std::is_void<typename SrcTraits::specialize>::value &&
- (std::is_same<typename DstTraits::array_layout,
- typename SrcTraits::array_layout>::value ||
- ((std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutStride>::value) &&
- (std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutStride>::value)))),
- Kokkos::Impl::ViewToDynRankViewTag>> {
- private:
- enum {
- is_assignable_value_type =
- std::is_same<typename DstTraits::value_type,
- typename SrcTraits::value_type>::value ||
- std::is_same<typename DstTraits::value_type,
- typename SrcTraits::const_value_type>::value
- };
-
- enum {
- is_assignable_layout =
- std::is_same<typename DstTraits::array_layout,
- typename SrcTraits::array_layout>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutStride>::value
- };
-
- public:
- enum { is_assignable = is_assignable_value_type && is_assignable_layout };
-
- using DstType = ViewMapping<DstTraits, typename DstTraits::specialize>;
- using SrcType = ViewMapping<SrcTraits, typename SrcTraits::specialize>;
-
- template <typename DT, typename... DP, typename ST, typename... SP>
- KOKKOS_INLINE_FUNCTION static void assign(
- Kokkos::DynRankView<DT, DP...>& dst, const Kokkos::View<ST, SP...>& src) {
- static_assert(
- is_assignable_value_type,
- "View assignment must have same value type or const = non-const");
-
- static_assert(
- is_assignable_layout,
- "View assignment must have compatible layout or have rank <= 1");
-
- // Removed dimension checks...
-
- using dst_offset_type = typename DstType::offset_type;
- dst.m_map.m_impl_offset = dst_offset_type(
- std::integral_constant<unsigned, 0>(),
- src.layout()); // Check this for integer input1 for padding, etc
- dst.m_map.m_impl_handle = Kokkos::Impl::ViewDataHandle<DstTraits>::assign(
- src.m_map.m_impl_handle, src.m_track.m_tracker);
- dst.m_track.assign(src.m_track.m_tracker, DstTraits::is_managed);
- dst.m_rank = src.Rank;
- }
-};
-
-} // namespace Impl
-
-/* \class DynRankView
- * \brief Container that creates a Kokkos view with rank determined at runtime.
- * Essentially this is a rank 7 view
- *
- * Changes from View
- * 1. The rank of the DynRankView is returned by the method rank()
- * 2. Max rank of a DynRankView is 7
- * 3. subview called with 'subview(...)' or 'subdynrankview(...)' (backward
- * compatibility)
- * 4. Every subview is returned with LayoutStride
- * 5. Copy and Copy-Assign View to DynRankView
- * 6. deep_copy between Views and DynRankViews
- * 7. rank( view ); returns the rank of View or DynRankView
- *
- */
-
-template <class>
-struct is_dyn_rank_view : public std::false_type {};
-
-template <class D, class... P>
-struct is_dyn_rank_view<Kokkos::DynRankView<D, P...>> : public std::true_type {
-};
-
-template <typename DataType, class... Properties>
-class DynRankView : public ViewTraits<DataType, Properties...> {
- static_assert(!std::is_array<DataType>::value &&
- !std::is_pointer<DataType>::value,
- "Cannot template DynRankView with array or pointer datatype - "
- "must be pod");
-
- private:
- template <class, class...>
- friend class DynRankView;
- template <class, class...>
- friend class Kokkos::Impl::ViewMapping;
-
- public:
- using drvtraits = ViewTraits<DataType, Properties...>;
-
- using view_type = View<DataType*******, Properties...>;
-
- using traits = ViewTraits<DataType*******, Properties...>;
-
- private:
- using map_type =
- Kokkos::Impl::ViewMapping<traits, typename traits::specialize>;
- using track_type = Kokkos::Impl::SharedAllocationTracker;
-
- track_type m_track;
- map_type m_map;
- unsigned m_rank;
-
- public:
- KOKKOS_INLINE_FUNCTION
- view_type& DownCast() const { return (view_type&)(*this); }
- KOKKOS_INLINE_FUNCTION
- const view_type& ConstDownCast() const { return (const view_type&)(*this); }
-
- // Types below - at least the HostMirror requires the value_type, NOT the rank
- // 7 data_type of the traits
-
- /** \brief Compatible view of array of scalar types */
- using array_type = DynRankView<
- typename drvtraits::scalar_array_type, typename drvtraits::array_layout,
- typename drvtraits::device_type, typename drvtraits::memory_traits>;
-
- /** \brief Compatible view of const data type */
- using const_type = DynRankView<
- typename drvtraits::const_data_type, typename drvtraits::array_layout,
- typename drvtraits::device_type, typename drvtraits::memory_traits>;
-
- /** \brief Compatible view of non-const data type */
- using non_const_type = DynRankView<
- typename drvtraits::non_const_data_type, typename drvtraits::array_layout,
- typename drvtraits::device_type, typename drvtraits::memory_traits>;
-
- /** \brief Compatible HostMirror view */
- using HostMirror = DynRankView<typename drvtraits::non_const_data_type,
- typename drvtraits::array_layout,
- typename drvtraits::host_mirror_space>;
-
- //----------------------------------------
- // Domain rank and extents
-
- // enum { Rank = map_type::Rank }; //Will be dyn rank of 7 always, keep the
- // enum?
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, size_t>
- extent(const iType& r) const {
- return m_map.extent(r);
- }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, int>
- extent_int(const iType& r) const {
- return static_cast<int>(m_map.extent(r));
- }
-
- KOKKOS_INLINE_FUNCTION constexpr typename traits::array_layout layout() const;
-
- //----------------------------------------
- /* Deprecate all 'dimension' functions in favor of
- * ISO/C++ vocabulary 'extent'.
- */
-
- KOKKOS_INLINE_FUNCTION constexpr size_t size() const {
- return m_map.extent(0) * m_map.extent(1) * m_map.extent(2) *
- m_map.extent(3) * m_map.extent(4) * m_map.extent(5) *
- m_map.extent(6) * m_map.extent(7);
- }
-
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
- return m_map.stride_0();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
- return m_map.stride_1();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
- return m_map.stride_2();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
- return m_map.stride_3();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
- return m_map.stride_4();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
- return m_map.stride_5();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
- return m_map.stride_6();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
- return m_map.stride_7();
- }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
- m_map.stride(s);
- }
-
- //----------------------------------------
- // Range span is the span which contains all members.
-
- using reference_type = typename map_type::reference_type;
- using pointer_type = typename map_type::pointer_type;
-
- enum {
- reference_type_is_lvalue_reference =
- std::is_lvalue_reference<reference_type>::value
- };
-
- KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_map.span(); }
- KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
- return m_map.span_is_contiguous();
- }
- KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
- return m_map.data();
- }
- KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
- return (m_map.data() != nullptr);
- }
-
- //----------------------------------------
- // Allow specializations to query their specialized map
- KOKKOS_INLINE_FUNCTION
- const Kokkos::Impl::ViewMapping<traits, typename traits::specialize>&
- impl_map() const {
- return m_map;
- }
-
- //----------------------------------------
-
- private:
- enum {
- is_layout_left =
- std::is_same<typename traits::array_layout, Kokkos::LayoutLeft>::value,
-
- is_layout_right =
- std::is_same<typename traits::array_layout, Kokkos::LayoutRight>::value,
-
- is_layout_stride = std::is_same<typename traits::array_layout,
- Kokkos::LayoutStride>::value,
-
- is_default_map = std::is_void<typename traits::specialize>::value &&
- (is_layout_left || is_layout_right || is_layout_stride)
- };
-
-// Bounds checking macros
-#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
-
-// rank of the calling operator - included as first argument in ARG
-#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(ARG) \
- Kokkos::Impl::runtime_check_memory_access_violation< \
- typename traits::memory_space>( \
- "Kokkos::DynRankView ERROR: attempt to access inaccessible memory " \
- "space"); \
- Kokkos::Impl::dyn_rank_view_verify_operator_bounds< \
- typename traits::memory_space> \
- ARG;
-
-#else
-
-#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(ARG) \
- Kokkos::Impl::runtime_check_memory_access_violation< \
- typename traits::memory_space>( \
- "Kokkos::DynRankView ERROR: attempt to access inaccessible memory " \
- "space");
-
-#endif
-
- public:
- KOKKOS_INLINE_FUNCTION
- constexpr unsigned rank() const { return m_rank; }
-
- // operators ()
- // Rank 0
- KOKKOS_INLINE_FUNCTION
- reference_type operator()() const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((0, this->rank(), m_track, m_map))
- return impl_map().reference();
- // return m_map.reference(0,0,0,0,0,0,0);
- }
-
- // Rank 1
- // This assumes a contiguous underlying memory (i.e. no padding, no
- // striding...)
- template <typename iType>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- std::is_same<typename drvtraits::value_type,
- typename drvtraits::scalar_array_type>::value &&
- std::is_integral<iType>::value,
- reference_type>
- operator[](const iType& i0) const {
- // Phalanx is violating this, since they use the operator to access ALL
- // elements in the allocation KOKKOS_IMPL_VIEW_OPERATOR_VERIFY( (1 ,
- // this->rank(), m_track, m_map) )
- return data()[i0];
- }
-
- // This assumes a contiguous underlying memory (i.e. no padding, no
- // striding... AND a Trilinos/Sacado scalar type )
- template <typename iType>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- !std::is_same<typename drvtraits::value_type,
- typename drvtraits::scalar_array_type>::value &&
- std::is_integral<iType>::value,
- reference_type>
- operator[](const iType& i0) const {
- // auto map = impl_map();
- const size_t dim_scalar = m_map.dimension_scalar();
- const size_t bytes = this->span() / dim_scalar;
-
- using tmp_view_type = Kokkos::View<
- DataType*, typename traits::array_layout, typename traits::device_type,
- Kokkos::MemoryTraits<traits::memory_traits::is_unmanaged |
- traits::memory_traits::is_random_access |
- traits::memory_traits::is_atomic>>;
- tmp_view_type rankone_view(this->data(), bytes, dim_scalar);
- return rankone_view(i0);
- }
-
- // Rank 1 parenthesis
- template <typename iType>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<(std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType>::value),
- reference_type>
- operator()(const iType& i0) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
- return m_map.reference(i0);
- }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType>::value),
- reference_type>
- operator()(const iType& i0) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
- return m_map.reference(i0, 0, 0, 0, 0, 0, 0);
- }
-
- // Rank 2
- template <typename iType0, typename iType1>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
- return m_map.reference(i0, i1);
- }
-
- template <typename iType0, typename iType1>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
- return m_map.reference(i0, i1, 0, 0, 0, 0, 0);
- }
-
- // Rank 3
- template <typename iType0, typename iType1, typename iType2>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (3, this->rank(), m_track, m_map, i0, i1, i2))
- return m_map.reference(i0, i1, i2);
- }
-
- template <typename iType0, typename iType1, typename iType2>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (3, this->rank(), m_track, m_map, i0, i1, i2))
- return m_map.reference(i0, i1, i2, 0, 0, 0, 0);
- }
-
- // Rank 4
- template <typename iType0, typename iType1, typename iType2, typename iType3>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
- return m_map.reference(i0, i1, i2, i3);
- }
-
- template <typename iType0, typename iType1, typename iType2, typename iType3>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
- return m_map.reference(i0, i1, i2, i3, 0, 0, 0);
- }
-
- // Rank 5
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
- std::is_integral<iType4>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3, const iType4& i4) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
- return m_map.reference(i0, i1, i2, i3, i4);
- }
-
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3, const iType4& i4) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
- return m_map.reference(i0, i1, i2, i3, i4, 0, 0);
- }
-
- // Rank 6
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4, typename iType5>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
- std::is_integral<iType4>::value && std::is_integral<iType5>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3, const iType4& i4, const iType5& i5) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
- return m_map.reference(i0, i1, i2, i3, i4, i5);
- }
-
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4, typename iType5>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3, const iType4& i4, const iType5& i5) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
- return m_map.reference(i0, i1, i2, i3, i4, i5, 0);
- }
-
- // Rank 7
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4, typename iType5, typename iType6>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
- std::is_integral<iType4>::value && std::is_integral<iType5>::value &&
- std::is_integral<iType6>::value),
- reference_type>
- operator()(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3, const iType4& i4, const iType5& i5,
- const iType6& i6) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (7, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5, i6))
- return m_map.reference(i0, i1, i2, i3, i4, i5, i6);
- }
-
- // Rank 0
- KOKKOS_INLINE_FUNCTION
- reference_type access() const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((0, this->rank(), m_track, m_map))
- return impl_map().reference();
- // return m_map.reference(0,0,0,0,0,0,0);
- }
-
- // Rank 1
- // Rank 1 parenthesis
- template <typename iType>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<(std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType>::value),
- reference_type>
- access(const iType& i0) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
- return m_map.reference(i0);
- }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType>::value),
- reference_type>
- access(const iType& i0) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
- return m_map.reference(i0, 0, 0, 0, 0, 0, 0);
- }
-
- // Rank 2
- template <typename iType0, typename iType1>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
- return m_map.reference(i0, i1);
- }
-
- template <typename iType0, typename iType1>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
- return m_map.reference(i0, i1, 0, 0, 0, 0, 0);
- }
-
- // Rank 3
- template <typename iType0, typename iType1, typename iType2>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (3, this->rank(), m_track, m_map, i0, i1, i2))
- return m_map.reference(i0, i1, i2);
- }
-
- template <typename iType0, typename iType1, typename iType2>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (3, this->rank(), m_track, m_map, i0, i1, i2))
- return m_map.reference(i0, i1, i2, 0, 0, 0, 0);
- }
-
- // Rank 4
- template <typename iType0, typename iType1, typename iType2, typename iType3>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
- return m_map.reference(i0, i1, i2, i3);
- }
-
- template <typename iType0, typename iType1, typename iType2, typename iType3>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
- return m_map.reference(i0, i1, i2, i3, 0, 0, 0);
- }
-
- // Rank 5
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
- std::is_integral<iType4>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2, const iType3& i3,
- const iType4& i4) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
- return m_map.reference(i0, i1, i2, i3, i4);
- }
-
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3, const iType4& i4) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
- return m_map.reference(i0, i1, i2, i3, i4, 0, 0);
- }
-
- // Rank 6
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4, typename iType5>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_void<typename traits::specialize>::value &&
- std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
- std::is_integral<iType4>::value && std::is_integral<iType5>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2, const iType3& i3,
- const iType4& i4, const iType5& i5) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
- return m_map.reference(i0, i1, i2, i3, i4, i5);
- }
-
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4, typename iType5>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
- std::is_integral<iType0>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2,
- const iType3& i3, const iType4& i4, const iType5& i5) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
- return m_map.reference(i0, i1, i2, i3, i4, i5, 0);
- }
-
- // Rank 7
- template <typename iType0, typename iType1, typename iType2, typename iType3,
- typename iType4, typename iType5, typename iType6>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<
- (std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
- std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
- std::is_integral<iType4>::value && std::is_integral<iType5>::value &&
- std::is_integral<iType6>::value),
- reference_type>
- access(const iType0& i0, const iType1& i1, const iType2& i2, const iType3& i3,
- const iType4& i4, const iType5& i5, const iType6& i6) const {
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
- (7, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5, i6))
- return m_map.reference(i0, i1, i2, i3, i4, i5, i6);
- }
-
-#undef KOKKOS_IMPL_VIEW_OPERATOR_VERIFY
-
- //----------------------------------------
- // Standard constructor, destructor, and assignment operators...
-
- KOKKOS_DEFAULTED_FUNCTION
- ~DynRankView() = default;
-
- KOKKOS_INLINE_FUNCTION
- DynRankView() : m_track(), m_map(), m_rank() {} // Default ctor
-
- KOKKOS_INLINE_FUNCTION
- DynRankView(const DynRankView& rhs)
- : m_track(rhs.m_track), m_map(rhs.m_map), m_rank(rhs.m_rank) {}
-
- KOKKOS_INLINE_FUNCTION
- DynRankView(DynRankView&& rhs)
- : m_track(rhs.m_track), m_map(rhs.m_map), m_rank(rhs.m_rank) {}
-
- KOKKOS_INLINE_FUNCTION
- DynRankView& operator=(const DynRankView& rhs) {
- m_track = rhs.m_track;
- m_map = rhs.m_map;
- m_rank = rhs.m_rank;
- return *this;
- }
-
- KOKKOS_INLINE_FUNCTION
- DynRankView& operator=(DynRankView&& rhs) {
- m_track = rhs.m_track;
- m_map = rhs.m_map;
- m_rank = rhs.m_rank;
- return *this;
- }
-
- //----------------------------------------
- // Compatible view copy constructor and assignment
- // may assign unmanaged from managed.
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION DynRankView(const DynRankView<RT, RP...>& rhs)
- : m_track(rhs.m_track, traits::is_managed), m_map(), m_rank(rhs.m_rank) {
- using SrcTraits = typename DynRankView<RT, RP...>::traits;
- using Mapping = Kokkos::Impl::ViewMapping<traits, SrcTraits,
- typename traits::specialize>;
- static_assert(Mapping::is_assignable,
- "Incompatible DynRankView copy construction");
- Mapping::assign(m_map, rhs.m_map, rhs.m_track);
- }
-
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION DynRankView& operator=(
- const DynRankView<RT, RP...>& rhs) {
- using SrcTraits = typename DynRankView<RT, RP...>::traits;
- using Mapping = Kokkos::Impl::ViewMapping<traits, SrcTraits,
- typename traits::specialize>;
- static_assert(Mapping::is_assignable,
- "Incompatible DynRankView copy construction");
- Mapping::assign(m_map, rhs.m_map, rhs.m_track);
- m_track.assign(rhs.m_track, traits::is_managed);
- m_rank = rhs.rank();
- return *this;
- }
-
- // Copy/Assign View to DynRankView
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION DynRankView(const View<RT, RP...>& rhs)
- : m_track(), m_map(), m_rank(rhs.Rank) {
- using SrcTraits = typename View<RT, RP...>::traits;
- using Mapping =
- Kokkos::Impl::ViewMapping<traits, SrcTraits,
- Kokkos::Impl::ViewToDynRankViewTag>;
- static_assert(Mapping::is_assignable,
- "Incompatible View to DynRankView copy construction");
- Mapping::assign(*this, rhs);
- }
-
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION DynRankView& operator=(const View<RT, RP...>& rhs) {
- using SrcTraits = typename View<RT, RP...>::traits;
- using Mapping =
- Kokkos::Impl::ViewMapping<traits, SrcTraits,
- Kokkos::Impl::ViewToDynRankViewTag>;
- static_assert(Mapping::is_assignable,
- "Incompatible View to DynRankView copy assignment");
- Mapping::assign(*this, rhs);
- return *this;
- }
-
- //----------------------------------------
- // Allocation tracking properties
-
- KOKKOS_INLINE_FUNCTION
- int use_count() const { return m_track.use_count(); }
-
- inline const std::string label() const {
- return m_track.template get_label<typename traits::memory_space>();
- }
-
- //----------------------------------------
- // Allocation according to allocation properties and array layout
- // unused arg_layout dimensions must be set to KOKKOS_INVALID_INDEX so that
- // rank deduction can properly take place
- template <class... P>
- explicit inline DynRankView(
- const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
- std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
- typename traits::array_layout> const& arg_layout)
- : m_track(),
- m_map(),
- m_rank(Impl::DynRankDimTraits<typename traits::specialize>::
- template computeRank<typename traits::array_layout, P...>(
- arg_prop, arg_layout)) {
- // Append layout and spaces if not input
- using alloc_prop_input = Kokkos::Impl::ViewCtorProp<P...>;
-
- // use 'std::integral_constant<unsigned,I>' for non-types
- // to avoid duplicate class error.
- using alloc_prop = Kokkos::Impl::ViewCtorProp<
- P...,
- std::conditional_t<alloc_prop_input::has_label,
- std::integral_constant<unsigned, 0>, std::string>,
- std::conditional_t<alloc_prop_input::has_memory_space,
- std::integral_constant<unsigned, 1>,
- typename traits::device_type::memory_space>,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned, 2>,
- typename traits::device_type::execution_space>>;
-
- static_assert(traits::is_managed,
- "View allocation constructor requires managed memory");
-
- if (alloc_prop::initialize &&
- !alloc_prop::execution_space::impl_is_initialized()) {
- // If initializing view data then
- // the execution space must be initialized.
- Kokkos::Impl::throw_runtime_exception(
- "Constructing DynRankView and initializing data with uninitialized "
- "execution space");
- }
-
- // Copy the input allocation properties with possibly defaulted properties
- alloc_prop prop_copy(arg_prop);
-
-//------------------------------------------------------------
-#if defined(KOKKOS_ENABLE_CUDA)
- // If allocating in CudaUVMSpace must fence before and after
- // the allocation to protect against possible concurrent access
- // on the CPU and the GPU.
- // Fence using the trait's executon space (which will be Kokkos::Cuda)
- // to avoid incomplete type errors from usng Kokkos::Cuda directly.
- if (std::is_same<Kokkos::CudaUVMSpace,
- typename traits::device_type::memory_space>::value) {
- typename traits::device_type::memory_space::execution_space().fence(
- "Kokkos::DynRankView<>::DynRankView: fence before UVM allocation");
- }
-#endif
- //------------------------------------------------------------
-
- Kokkos::Impl::SharedAllocationRecord<>* record = m_map.allocate_shared(
- prop_copy,
- Impl::DynRankDimTraits<typename traits::specialize>::
- template createLayout<traits, P...>(arg_prop, arg_layout),
- Impl::ViewCtorProp<P...>::has_execution_space);
-
-//------------------------------------------------------------
-#if defined(KOKKOS_ENABLE_CUDA)
- if (std::is_same<Kokkos::CudaUVMSpace,
- typename traits::device_type::memory_space>::value) {
- typename traits::device_type::memory_space::execution_space().fence(
- "Kokkos::DynRankView<>::DynRankView: fence after UVM allocation");
- }
-#endif
- //------------------------------------------------------------
-
- // Setup and initialization complete, start tracking
- m_track.assign_allocated_record_to_uninitialized(record);
- }
-
- // Wrappers
- template <class... P>
- explicit KOKKOS_INLINE_FUNCTION DynRankView(
- const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
- std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
- typename traits::array_layout> const& arg_layout)
- : m_track() // No memory tracking
- ,
- m_map(arg_prop,
- Impl::DynRankDimTraits<typename traits::specialize>::
- template createLayout<traits, P...>(arg_prop, arg_layout)),
- m_rank(Impl::DynRankDimTraits<typename traits::specialize>::
- template computeRank<typename traits::array_layout, P...>(
- arg_prop, arg_layout)) {
- static_assert(
- std::is_same<pointer_type,
- typename Impl::ViewCtorProp<P...>::pointer_type>::value,
- "Constructing DynRankView to wrap user memory must supply matching "
- "pointer type");
- }
-
- //----------------------------------------
- // Constructor(s)
-
- // Simple dimension-only layout
- template <class... P>
- explicit inline DynRankView(
- const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
- std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
- size_t> const arg_N0 = KOKKOS_INVALID_INDEX,
- const size_t arg_N1 = KOKKOS_INVALID_INDEX,
- const size_t arg_N2 = KOKKOS_INVALID_INDEX,
- const size_t arg_N3 = KOKKOS_INVALID_INDEX,
- const size_t arg_N4 = KOKKOS_INVALID_INDEX,
- const size_t arg_N5 = KOKKOS_INVALID_INDEX,
- const size_t arg_N6 = KOKKOS_INVALID_INDEX,
- const size_t arg_N7 = KOKKOS_INVALID_INDEX)
- : DynRankView(arg_prop, typename traits::array_layout(
- arg_N0, arg_N1, arg_N2, arg_N3, arg_N4,
- arg_N5, arg_N6, arg_N7)) {}
-
- template <class... P>
- explicit KOKKOS_INLINE_FUNCTION DynRankView(
- const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
- std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
- size_t> const arg_N0 = KOKKOS_INVALID_INDEX,
- const size_t arg_N1 = KOKKOS_INVALID_INDEX,
- const size_t arg_N2 = KOKKOS_INVALID_INDEX,
- const size_t arg_N3 = KOKKOS_INVALID_INDEX,
- const size_t arg_N4 = KOKKOS_INVALID_INDEX,
- const size_t arg_N5 = KOKKOS_INVALID_INDEX,
- const size_t arg_N6 = KOKKOS_INVALID_INDEX,
- const size_t arg_N7 = KOKKOS_INVALID_INDEX)
- : DynRankView(arg_prop, typename traits::array_layout(
- arg_N0, arg_N1, arg_N2, arg_N3, arg_N4,
- arg_N5, arg_N6, arg_N7)) {}
-
- // Allocate with label and layout
- template <typename Label>
- explicit inline DynRankView(
- const Label& arg_label,
- std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
- typename traits::array_layout> const& arg_layout)
- : DynRankView(Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
- arg_layout) {}
-
- // Allocate label and layout, must disambiguate from subview constructor
- template <typename Label>
- explicit inline DynRankView(
- const Label& arg_label,
- std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value, const size_t>
- arg_N0 = KOKKOS_INVALID_INDEX,
- const size_t arg_N1 = KOKKOS_INVALID_INDEX,
- const size_t arg_N2 = KOKKOS_INVALID_INDEX,
- const size_t arg_N3 = KOKKOS_INVALID_INDEX,
- const size_t arg_N4 = KOKKOS_INVALID_INDEX,
- const size_t arg_N5 = KOKKOS_INVALID_INDEX,
- const size_t arg_N6 = KOKKOS_INVALID_INDEX,
- const size_t arg_N7 = KOKKOS_INVALID_INDEX)
- : DynRankView(
- Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
- typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
- arg_N4, arg_N5, arg_N6, arg_N7)) {}
-
- //----------------------------------------
- // Memory span required to wrap these dimensions.
- static constexpr size_t required_allocation_size(
- const size_t arg_N0 = 0, const size_t arg_N1 = 0, const size_t arg_N2 = 0,
- const size_t arg_N3 = 0, const size_t arg_N4 = 0, const size_t arg_N5 = 0,
- const size_t arg_N6 = 0, const size_t arg_N7 = 0) {
- return map_type::memory_span(typename traits::array_layout(
- arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7));
- }
-
- explicit KOKKOS_INLINE_FUNCTION DynRankView(
- pointer_type arg_ptr, const size_t arg_N0 = KOKKOS_INVALID_INDEX,
- const size_t arg_N1 = KOKKOS_INVALID_INDEX,
- const size_t arg_N2 = KOKKOS_INVALID_INDEX,
- const size_t arg_N3 = KOKKOS_INVALID_INDEX,
- const size_t arg_N4 = KOKKOS_INVALID_INDEX,
- const size_t arg_N5 = KOKKOS_INVALID_INDEX,
- const size_t arg_N6 = KOKKOS_INVALID_INDEX,
- const size_t arg_N7 = KOKKOS_INVALID_INDEX)
- : DynRankView(Kokkos::Impl::ViewCtorProp<pointer_type>(arg_ptr), arg_N0,
- arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7) {}
-
- explicit KOKKOS_INLINE_FUNCTION DynRankView(
- pointer_type arg_ptr, typename traits::array_layout& arg_layout)
- : DynRankView(Kokkos::Impl::ViewCtorProp<pointer_type>(arg_ptr),
- arg_layout) {}
-
- //----------------------------------------
- // Shared scratch memory constructor
-
- static inline size_t shmem_size(const size_t arg_N0 = KOKKOS_INVALID_INDEX,
- const size_t arg_N1 = KOKKOS_INVALID_INDEX,
- const size_t arg_N2 = KOKKOS_INVALID_INDEX,
- const size_t arg_N3 = KOKKOS_INVALID_INDEX,
- const size_t arg_N4 = KOKKOS_INVALID_INDEX,
- const size_t arg_N5 = KOKKOS_INVALID_INDEX,
- const size_t arg_N6 = KOKKOS_INVALID_INDEX,
- const size_t arg_N7 = KOKKOS_INVALID_INDEX) {
- const size_t num_passed_args =
- (arg_N0 != KOKKOS_INVALID_INDEX) + (arg_N1 != KOKKOS_INVALID_INDEX) +
- (arg_N2 != KOKKOS_INVALID_INDEX) + (arg_N3 != KOKKOS_INVALID_INDEX) +
- (arg_N4 != KOKKOS_INVALID_INDEX) + (arg_N5 != KOKKOS_INVALID_INDEX) +
- (arg_N6 != KOKKOS_INVALID_INDEX) + (arg_N7 != KOKKOS_INVALID_INDEX);
-
- if (std::is_void<typename traits::specialize>::value &&
- num_passed_args != traits::rank_dynamic) {
- Kokkos::abort(
- "Kokkos::View::shmem_size() rank_dynamic != number of arguments.\n");
- }
- {}
-
- return map_type::memory_span(typename traits::array_layout(
- arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7));
- }
-
- explicit KOKKOS_INLINE_FUNCTION DynRankView(
- const typename traits::execution_space::scratch_memory_space& arg_space,
- const typename traits::array_layout& arg_layout)
- : DynRankView(
- Kokkos::Impl::ViewCtorProp<pointer_type>(
- reinterpret_cast<pointer_type>(
- arg_space.get_shmem(map_type::memory_span(
- Impl::DynRankDimTraits<typename traits::specialize>::
- createLayout(arg_layout) // is this correct?
- )))),
- arg_layout) {}
-
- explicit KOKKOS_INLINE_FUNCTION DynRankView(
- const typename traits::execution_space::scratch_memory_space& arg_space,
- const size_t arg_N0 = KOKKOS_INVALID_INDEX,
- const size_t arg_N1 = KOKKOS_INVALID_INDEX,
- const size_t arg_N2 = KOKKOS_INVALID_INDEX,
- const size_t arg_N3 = KOKKOS_INVALID_INDEX,
- const size_t arg_N4 = KOKKOS_INVALID_INDEX,
- const size_t arg_N5 = KOKKOS_INVALID_INDEX,
- const size_t arg_N6 = KOKKOS_INVALID_INDEX,
- const size_t arg_N7 = KOKKOS_INVALID_INDEX)
-
- : DynRankView(
- Kokkos::Impl::ViewCtorProp<pointer_type>(
- reinterpret_cast<pointer_type>(
- arg_space.get_shmem(map_type::memory_span(
- Impl::DynRankDimTraits<typename traits::specialize>::
- createLayout(typename traits::array_layout(
- arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5,
- arg_N6, arg_N7)))))),
- typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
- arg_N4, arg_N5, arg_N6, arg_N7)) {}
-};
-
-template <typename D, class... P>
-KOKKOS_INLINE_FUNCTION constexpr unsigned rank(
- const DynRankView<D, P...>& DRV) {
- return DRV.rank();
-} // needed for transition to common constexpr method in view and dynrankview
- // to return rank
-
-//----------------------------------------------------------------------------
-// Subview mapping.
-// Deduce destination view type from source view traits and subview arguments
-
-namespace Impl {
-
-struct DynRankSubviewTag {};
-
-} // namespace Impl
-
-namespace Impl {
-
-template <class SrcTraits, class... Args>
-class ViewMapping<
- std::enable_if_t<(std::is_void<typename SrcTraits::specialize>::value &&
- (std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutStride>::value)),
- Kokkos::Impl::DynRankSubviewTag>,
- SrcTraits, Args...> {
- private:
- enum {
- RZ = false,
- R0 = bool(is_integral_extent<0, Args...>::value),
- R1 = bool(is_integral_extent<1, Args...>::value),
- R2 = bool(is_integral_extent<2, Args...>::value),
- R3 = bool(is_integral_extent<3, Args...>::value),
- R4 = bool(is_integral_extent<4, Args...>::value),
- R5 = bool(is_integral_extent<5, Args...>::value),
- R6 = bool(is_integral_extent<6, Args...>::value)
- };
-
- enum {
- rank = unsigned(R0) + unsigned(R1) + unsigned(R2) + unsigned(R3) +
- unsigned(R4) + unsigned(R5) + unsigned(R6)
- };
-
- using array_layout = Kokkos::LayoutStride;
-
- using value_type = typename SrcTraits::value_type;
-
- using data_type = value_type*******;
-
- public:
- using traits_type = Kokkos::ViewTraits<data_type, array_layout,
- typename SrcTraits::device_type,
- typename SrcTraits::memory_traits>;
-
- using type =
- Kokkos::View<data_type, array_layout, typename SrcTraits::device_type,
- typename SrcTraits::memory_traits>;
-
- template <class MemoryTraits>
- struct apply {
- static_assert(Kokkos::is_memory_traits<MemoryTraits>::value, "");
-
- using traits_type =
- Kokkos::ViewTraits<data_type, array_layout,
- typename SrcTraits::device_type, MemoryTraits>;
-
- using type = Kokkos::View<data_type, array_layout,
- typename SrcTraits::device_type, MemoryTraits>;
- };
-
- using dimension = typename SrcTraits::dimension;
-
- template <class Arg0 = int, class Arg1 = int, class Arg2 = int,
- class Arg3 = int, class Arg4 = int, class Arg5 = int,
- class Arg6 = int>
- struct ExtentGenerator {
- KOKKOS_INLINE_FUNCTION
- static SubviewExtents<7, rank> generator(
- const dimension& dim, Arg0 arg0 = Arg0(), Arg1 arg1 = Arg1(),
- Arg2 arg2 = Arg2(), Arg3 arg3 = Arg3(), Arg4 arg4 = Arg4(),
- Arg5 arg5 = Arg5(), Arg6 arg6 = Arg6()) {
- return SubviewExtents<7, rank>(dim, arg0, arg1, arg2, arg3, arg4, arg5,
- arg6);
- }
- };
-
- using ret_type = Kokkos::DynRankView<value_type, array_layout,
- typename SrcTraits::device_type,
- typename SrcTraits::memory_traits>;
-
- template <typename T, class... P>
- KOKKOS_INLINE_FUNCTION static ret_type subview(
- const unsigned src_rank, Kokkos::DynRankView<T, P...> const& src,
- Args... args) {
- using DstType = ViewMapping<traits_type, typename traits_type::specialize>;
-
- using DstDimType = std::conditional_t<
- (rank == 0), ViewDimension<>,
- std::conditional_t<
- (rank == 1), ViewDimension<0>,
- std::conditional_t<
- (rank == 2), ViewDimension<0, 0>,
- std::conditional_t<
- (rank == 3), ViewDimension<0, 0, 0>,
- std::conditional_t<
- (rank == 4), ViewDimension<0, 0, 0, 0>,
- std::conditional_t<
- (rank == 5), ViewDimension<0, 0, 0, 0, 0>,
- std::conditional_t<
- (rank == 6), ViewDimension<0, 0, 0, 0, 0, 0>,
- ViewDimension<0, 0, 0, 0, 0, 0, 0>>>>>>>>;
-
- using dst_offset_type = ViewOffset<DstDimType, Kokkos::LayoutStride>;
- using dst_handle_type = typename DstType::handle_type;
-
- ret_type dst;
-
- const SubviewExtents<7, rank> extents = ExtentGenerator<Args...>::generator(
- src.m_map.m_impl_offset.m_dim, args...);
-
- dst_offset_type tempdst(src.m_map.m_impl_offset, extents);
-
- dst.m_track = src.m_track;
-
- dst.m_map.m_impl_offset.m_dim.N0 = tempdst.m_dim.N0;
- dst.m_map.m_impl_offset.m_dim.N1 = tempdst.m_dim.N1;
- dst.m_map.m_impl_offset.m_dim.N2 = tempdst.m_dim.N2;
- dst.m_map.m_impl_offset.m_dim.N3 = tempdst.m_dim.N3;
- dst.m_map.m_impl_offset.m_dim.N4 = tempdst.m_dim.N4;
- dst.m_map.m_impl_offset.m_dim.N5 = tempdst.m_dim.N5;
- dst.m_map.m_impl_offset.m_dim.N6 = tempdst.m_dim.N6;
-
- dst.m_map.m_impl_offset.m_stride.S0 = tempdst.m_stride.S0;
- dst.m_map.m_impl_offset.m_stride.S1 = tempdst.m_stride.S1;
- dst.m_map.m_impl_offset.m_stride.S2 = tempdst.m_stride.S2;
- dst.m_map.m_impl_offset.m_stride.S3 = tempdst.m_stride.S3;
- dst.m_map.m_impl_offset.m_stride.S4 = tempdst.m_stride.S4;
- dst.m_map.m_impl_offset.m_stride.S5 = tempdst.m_stride.S5;
- dst.m_map.m_impl_offset.m_stride.S6 = tempdst.m_stride.S6;
-
- dst.m_map.m_impl_handle =
- dst_handle_type(src.m_map.m_impl_handle +
- src.m_map.m_impl_offset(
- extents.domain_offset(0), extents.domain_offset(1),
- extents.domain_offset(2), extents.domain_offset(3),
- extents.domain_offset(4), extents.domain_offset(5),
- extents.domain_offset(6)));
-
- dst.m_rank =
- (src_rank > 0 ? unsigned(R0) : 0) + (src_rank > 1 ? unsigned(R1) : 0) +
- (src_rank > 2 ? unsigned(R2) : 0) + (src_rank > 3 ? unsigned(R3) : 0) +
- (src_rank > 4 ? unsigned(R4) : 0) + (src_rank > 5 ? unsigned(R5) : 0) +
- (src_rank > 6 ? unsigned(R6) : 0);
-
- return dst;
- }
-};
-
-} // namespace Impl
-
-template <class V, class... Args>
-using Subdynrankview =
- typename Kokkos::Impl::ViewMapping<Kokkos::Impl::DynRankSubviewTag, V,
- Args...>::ret_type;
-
-template <class D, class... P, class... Args>
-KOKKOS_INLINE_FUNCTION Subdynrankview<ViewTraits<D*******, P...>, Args...>
-subdynrankview(const Kokkos::DynRankView<D, P...>& src, Args... args) {
- if (src.rank() > sizeof...(Args)) // allow sizeof...(Args) >= src.rank(),
- // ignore the remaining args
- {
- Kokkos::abort(
- "subdynrankview: num of args must be >= rank of the source "
- "DynRankView");
- }
-
- using metafcn =
- Kokkos::Impl::ViewMapping<Kokkos::Impl::DynRankSubviewTag,
- Kokkos::ViewTraits<D*******, P...>, Args...>;
-
- return metafcn::subview(src.rank(), src, args...);
-}
-
-// Wrapper to allow subview function name
-template <class D, class... P, class... Args>
-KOKKOS_INLINE_FUNCTION Subdynrankview<ViewTraits<D*******, P...>, Args...>
-subview(const Kokkos::DynRankView<D, P...>& src, Args... args) {
- return subdynrankview(src, args...);
-}
-
-} // namespace Kokkos
-
-namespace Kokkos {
-
-// overload == and !=
-template <class LT, class... LP, class RT, class... RP>
-KOKKOS_INLINE_FUNCTION bool operator==(const DynRankView<LT, LP...>& lhs,
- const DynRankView<RT, RP...>& rhs) {
- // Same data, layout, dimensions
- using lhs_traits = ViewTraits<LT, LP...>;
- using rhs_traits = ViewTraits<RT, RP...>;
-
- return std::is_same<typename lhs_traits::const_value_type,
- typename rhs_traits::const_value_type>::value &&
- std::is_same<typename lhs_traits::array_layout,
- typename rhs_traits::array_layout>::value &&
- std::is_same<typename lhs_traits::memory_space,
- typename rhs_traits::memory_space>::value &&
- lhs.rank() == rhs.rank() && lhs.data() == rhs.data() &&
- lhs.span() == rhs.span() && lhs.extent(0) == rhs.extent(0) &&
- lhs.extent(1) == rhs.extent(1) && lhs.extent(2) == rhs.extent(2) &&
- lhs.extent(3) == rhs.extent(3) && lhs.extent(4) == rhs.extent(4) &&
- lhs.extent(5) == rhs.extent(5) && lhs.extent(6) == rhs.extent(6) &&
- lhs.extent(7) == rhs.extent(7);
-}
-
-template <class LT, class... LP, class RT, class... RP>
-KOKKOS_INLINE_FUNCTION bool operator!=(const DynRankView<LT, LP...>& lhs,
- const DynRankView<RT, RP...>& rhs) {
- return !(operator==(lhs, rhs));
-}
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-namespace Kokkos {
-namespace Impl {
-
-template <class OutputView, class Enable = void>
-struct DynRankViewFill {
- using const_value_type = typename OutputView::traits::const_value_type;
-
- const OutputView output;
- const_value_type input;
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const size_t i0) const {
- const size_t n1 = output.extent(1);
- const size_t n2 = output.extent(2);
- const size_t n3 = output.extent(3);
- const size_t n4 = output.extent(4);
- const size_t n5 = output.extent(5);
- const size_t n6 = output.extent(6);
-
- for (size_t i1 = 0; i1 < n1; ++i1) {
- for (size_t i2 = 0; i2 < n2; ++i2) {
- for (size_t i3 = 0; i3 < n3; ++i3) {
- for (size_t i4 = 0; i4 < n4; ++i4) {
- for (size_t i5 = 0; i5 < n5; ++i5) {
- for (size_t i6 = 0; i6 < n6; ++i6) {
- output.access(i0, i1, i2, i3, i4, i5, i6) = input;
- }
- }
- }
- }
- }
- }
- }
-
- DynRankViewFill(const OutputView& arg_out, const_value_type& arg_in)
- : output(arg_out), input(arg_in) {
- using execution_space = typename OutputView::execution_space;
- using Policy = Kokkos::RangePolicy<execution_space>;
-
- Kokkos::parallel_for("Kokkos::DynRankViewFill", Policy(0, output.extent(0)),
- *this);
- }
-};
-
-template <class OutputView>
-struct DynRankViewFill<OutputView, std::enable_if_t<OutputView::Rank == 0>> {
- DynRankViewFill(const OutputView& dst,
- const typename OutputView::const_value_type& src) {
- Kokkos::Impl::DeepCopy<typename OutputView::memory_space,
- Kokkos::HostSpace>(
- dst.data(), &src, sizeof(typename OutputView::const_value_type));
- }
-};
-
-template <class OutputView, class InputView,
- class ExecSpace = typename OutputView::execution_space>
-struct DynRankViewRemap {
- const OutputView output;
- const InputView input;
- const size_t n0;
- const size_t n1;
- const size_t n2;
- const size_t n3;
- const size_t n4;
- const size_t n5;
- const size_t n6;
- const size_t n7;
-
- DynRankViewRemap(const ExecSpace& exec_space, const OutputView& arg_out,
- const InputView& arg_in)
- : output(arg_out),
- input(arg_in),
- n0(std::min((size_t)arg_out.extent(0), (size_t)arg_in.extent(0))),
- n1(std::min((size_t)arg_out.extent(1), (size_t)arg_in.extent(1))),
- n2(std::min((size_t)arg_out.extent(2), (size_t)arg_in.extent(2))),
- n3(std::min((size_t)arg_out.extent(3), (size_t)arg_in.extent(3))),
- n4(std::min((size_t)arg_out.extent(4), (size_t)arg_in.extent(4))),
- n5(std::min((size_t)arg_out.extent(5), (size_t)arg_in.extent(5))),
- n6(std::min((size_t)arg_out.extent(6), (size_t)arg_in.extent(6))),
- n7(std::min((size_t)arg_out.extent(7), (size_t)arg_in.extent(7))) {
- using Policy = Kokkos::RangePolicy<ExecSpace>;
-
- Kokkos::parallel_for("Kokkos::DynRankViewRemap", Policy(exec_space, 0, n0),
- *this);
- }
-
- DynRankViewRemap(const OutputView& arg_out, const InputView& arg_in)
- : output(arg_out),
- input(arg_in),
- n0(std::min((size_t)arg_out.extent(0), (size_t)arg_in.extent(0))),
- n1(std::min((size_t)arg_out.extent(1), (size_t)arg_in.extent(1))),
- n2(std::min((size_t)arg_out.extent(2), (size_t)arg_in.extent(2))),
- n3(std::min((size_t)arg_out.extent(3), (size_t)arg_in.extent(3))),
- n4(std::min((size_t)arg_out.extent(4), (size_t)arg_in.extent(4))),
- n5(std::min((size_t)arg_out.extent(5), (size_t)arg_in.extent(5))),
- n6(std::min((size_t)arg_out.extent(6), (size_t)arg_in.extent(6))),
- n7(std::min((size_t)arg_out.extent(7), (size_t)arg_in.extent(7))) {
- using Policy = Kokkos::RangePolicy<ExecSpace>;
-
- Kokkos::parallel_for("Kokkos::DynRankViewRemap", Policy(0, n0), *this);
- }
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const size_t i0) const {
- for (size_t i1 = 0; i1 < n1; ++i1) {
- for (size_t i2 = 0; i2 < n2; ++i2) {
- for (size_t i3 = 0; i3 < n3; ++i3) {
- for (size_t i4 = 0; i4 < n4; ++i4) {
- for (size_t i5 = 0; i5 < n5; ++i5) {
- for (size_t i6 = 0; i6 < n6; ++i6) {
- output.access(i0, i1, i2, i3, i4, i5, i6) =
- input.access(i0, i1, i2, i3, i4, i5, i6);
- }
- }
- }
- }
- }
- }
- }
-};
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-
-namespace Kokkos {
-
-namespace Impl {
-
-/* \brief Returns a View of the requested rank, aliasing the
- underlying memory, to facilitate implementation of deep_copy() and
- other routines that are defined on View */
-template <unsigned N, typename T, typename... Args>
-KOKKOS_FUNCTION auto as_view_of_rank_n(DynRankView<T, Args...> v) {
- if (v.rank() != N) {
- KOKKOS_IF_ON_HOST(
- const std::string message =
- "Converting DynRankView of rank " + std::to_string(v.rank()) +
- " to a View of mis-matched rank " + std::to_string(N) + "!";
- Kokkos::abort(message.c_str());)
- KOKKOS_IF_ON_DEVICE(
- Kokkos::abort("Converting DynRankView to a View of mis-matched rank!");)
- }
-
- return View<typename RankDataType<T, N>::type, Args...>(
- v.data(), v.impl_map().layout());
-}
-
-template <typename Function, typename... Args>
-void apply_to_view_of_static_rank(Function&& f, DynRankView<Args...> a) {
- switch (rank(a)) {
- case 0: f(as_view_of_rank_n<0>(a)); break;
- case 1: f(as_view_of_rank_n<1>(a)); break;
- case 2: f(as_view_of_rank_n<2>(a)); break;
- case 3: f(as_view_of_rank_n<3>(a)); break;
- case 4: f(as_view_of_rank_n<4>(a)); break;
- case 5: f(as_view_of_rank_n<5>(a)); break;
- case 6: f(as_view_of_rank_n<6>(a)); break;
- case 7: f(as_view_of_rank_n<7>(a)); break;
- default:
- KOKKOS_IF_ON_HOST(
- Kokkos::abort(
- std::string(
- "Trying to apply a function to a view of unexpected rank " +
- std::to_string(rank(a)))
- .c_str());)
- KOKKOS_IF_ON_DEVICE(
- Kokkos::abort(
- "Trying to apply a function to a view of unexpected rank");)
- }
-}
-
-} // namespace Impl
-
-template <typename D, class... P>
-KOKKOS_INLINE_FUNCTION constexpr auto DynRankView<D, P...>::layout() const ->
- typename traits::array_layout {
- switch (rank()) {
- case 0: return Impl::as_view_of_rank_n<0>(*this).layout();
- case 1: return Impl::as_view_of_rank_n<1>(*this).layout();
- case 2: return Impl::as_view_of_rank_n<2>(*this).layout();
- case 3: return Impl::as_view_of_rank_n<3>(*this).layout();
- case 4: return Impl::as_view_of_rank_n<4>(*this).layout();
- case 5: return Impl::as_view_of_rank_n<5>(*this).layout();
- case 6: return Impl::as_view_of_rank_n<6>(*this).layout();
- case 7: return Impl::as_view_of_rank_n<7>(*this).layout();
- default:
- KOKKOS_IF_ON_HOST(
- Kokkos::abort(
- std::string(
- "Calling DynRankView::layout on DRV of unexpected rank " +
- std::to_string(rank()))
- .c_str());)
- KOKKOS_IF_ON_DEVICE(
- Kokkos::abort(
- "Calling DynRankView::layout on DRV of unexpected rank");)
- }
- // control flow should never reach here
- return m_map.layout();
-}
-
-/** \brief Deep copy a value from Host memory into a view. */
-template <class ExecSpace, class DT, class... DP>
-inline void deep_copy(
- const ExecSpace& e, const DynRankView<DT, DP...>& dst,
- typename ViewTraits<DT, DP...>::const_value_type& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
- static_assert(
- std::is_same<typename ViewTraits<DT, DP...>::non_const_value_type,
- typename ViewTraits<DT, DP...>::value_type>::value,
- "deep_copy requires non-const type");
-
- Impl::apply_to_view_of_static_rank(
- [=](auto view) { deep_copy(e, view, value); }, dst);
-}
-
-template <class DT, class... DP>
-inline void deep_copy(
- const DynRankView<DT, DP...>& dst,
- typename ViewTraits<DT, DP...>::const_value_type& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
- Impl::apply_to_view_of_static_rank([=](auto view) { deep_copy(view, value); },
- dst);
-}
-
-/** \brief Deep copy into a value in Host memory from a view. */
-template <class ExecSpace, class ST, class... SP>
-inline void deep_copy(
- const ExecSpace& e,
- typename ViewTraits<ST, SP...>::non_const_value_type& dst,
- const DynRankView<ST, SP...>& src,
- std::enable_if_t<std::is_same<typename ViewTraits<ST, SP...>::specialize,
- void>::value>* = 0) {
- deep_copy(e, dst, Impl::as_view_of_rank_n<0>(src));
-}
-
-template <class ST, class... SP>
-inline void deep_copy(
- typename ViewTraits<ST, SP...>::non_const_value_type& dst,
- const DynRankView<ST, SP...>& src,
- std::enable_if_t<std::is_same<typename ViewTraits<ST, SP...>::specialize,
- void>::value>* = 0) {
- deep_copy(dst, Impl::as_view_of_rank_n<0>(src));
-}
-
-//----------------------------------------------------------------------------
-/** \brief A deep copy between views of the default specialization, compatible
- * type, same rank, same contiguous layout.
- *
- * A rank mismatch will error out in the attempt to convert to a View
- */
-template <class ExecSpace, class DstType, class SrcType>
-inline void deep_copy(
- const ExecSpace& exec_space, const DstType& dst, const SrcType& src,
- std::enable_if_t<
- (std::is_void<typename DstType::traits::specialize>::value &&
- std::is_void<typename SrcType::traits::specialize>::value &&
- (Kokkos::is_dyn_rank_view<DstType>::value ||
- Kokkos::is_dyn_rank_view<SrcType>::value))>* = nullptr) {
- static_assert(
- std::is_same<typename DstType::traits::value_type,
- typename DstType::traits::non_const_value_type>::value,
- "deep_copy requires non-const destination type");
-
- switch (rank(dst)) {
- case 0:
- deep_copy(exec_space, Impl::as_view_of_rank_n<0>(dst),
- Impl::as_view_of_rank_n<0>(src));
- break;
- case 1:
- deep_copy(exec_space, Impl::as_view_of_rank_n<1>(dst),
- Impl::as_view_of_rank_n<1>(src));
- break;
- case 2:
- deep_copy(exec_space, Impl::as_view_of_rank_n<2>(dst),
- Impl::as_view_of_rank_n<2>(src));
- break;
- case 3:
- deep_copy(exec_space, Impl::as_view_of_rank_n<3>(dst),
- Impl::as_view_of_rank_n<3>(src));
- break;
- case 4:
- deep_copy(exec_space, Impl::as_view_of_rank_n<4>(dst),
- Impl::as_view_of_rank_n<4>(src));
- break;
- case 5:
- deep_copy(exec_space, Impl::as_view_of_rank_n<5>(dst),
- Impl::as_view_of_rank_n<5>(src));
- break;
- case 6:
- deep_copy(exec_space, Impl::as_view_of_rank_n<6>(dst),
- Impl::as_view_of_rank_n<6>(src));
- break;
- case 7:
- deep_copy(exec_space, Impl::as_view_of_rank_n<7>(dst),
- Impl::as_view_of_rank_n<7>(src));
- break;
- default:
- Kokkos::Impl::throw_runtime_exception(
- "Calling DynRankView deep_copy with a view of unexpected rank " +
- std::to_string(rank(dst)));
- }
-}
-
-template <class DstType, class SrcType>
-inline void deep_copy(
- const DstType& dst, const SrcType& src,
- std::enable_if_t<
- (std::is_void<typename DstType::traits::specialize>::value &&
- std::is_void<typename SrcType::traits::specialize>::value &&
- (Kokkos::is_dyn_rank_view<DstType>::value ||
- Kokkos::is_dyn_rank_view<SrcType>::value))>* = nullptr) {
- static_assert(
- std::is_same<typename DstType::traits::value_type,
- typename DstType::traits::non_const_value_type>::value,
- "deep_copy requires non-const destination type");
-
- switch (rank(dst)) {
- case 0:
- deep_copy(Impl::as_view_of_rank_n<0>(dst),
- Impl::as_view_of_rank_n<0>(src));
- break;
- case 1:
- deep_copy(Impl::as_view_of_rank_n<1>(dst),
- Impl::as_view_of_rank_n<1>(src));
- break;
- case 2:
- deep_copy(Impl::as_view_of_rank_n<2>(dst),
- Impl::as_view_of_rank_n<2>(src));
- break;
- case 3:
- deep_copy(Impl::as_view_of_rank_n<3>(dst),
- Impl::as_view_of_rank_n<3>(src));
- break;
- case 4:
- deep_copy(Impl::as_view_of_rank_n<4>(dst),
- Impl::as_view_of_rank_n<4>(src));
- break;
- case 5:
- deep_copy(Impl::as_view_of_rank_n<5>(dst),
- Impl::as_view_of_rank_n<5>(src));
- break;
- case 6:
- deep_copy(Impl::as_view_of_rank_n<6>(dst),
- Impl::as_view_of_rank_n<6>(src));
- break;
- case 7:
- deep_copy(Impl::as_view_of_rank_n<7>(dst),
- Impl::as_view_of_rank_n<7>(src));
- break;
- default:
- Kokkos::Impl::throw_runtime_exception(
- "Calling DynRankView deep_copy with a view of unexpected rank " +
- std::to_string(rank(dst)));
- }
-}
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-// Deduce Mirror Types
-template <class Space, class T, class... P>
-struct MirrorDRViewType {
- // The incoming view_type
- using src_view_type = typename Kokkos::DynRankView<T, P...>;
- // The memory space for the mirror view
- using memory_space = typename Space::memory_space;
- // Check whether it is the same memory space
- enum {
- is_same_memspace =
- std::is_same<memory_space, typename src_view_type::memory_space>::value
- };
- // The array_layout
- using array_layout = typename src_view_type::array_layout;
- // The data type (we probably want it non-const since otherwise we can't even
- // deep_copy to it.
- using data_type = typename src_view_type::non_const_data_type;
- // The destination view type if it is not the same memory space
- using dest_view_type = Kokkos::DynRankView<data_type, array_layout, Space>;
- // If it is the same memory_space return the existsing view_type
- // This will also keep the unmanaged trait if necessary
- using view_type =
- std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
-};
-
-template <class Space, class T, class... P>
-struct MirrorDRVType {
- // The incoming view_type
- using src_view_type = typename Kokkos::DynRankView<T, P...>;
- // The memory space for the mirror view
- using memory_space = typename Space::memory_space;
- // Check whether it is the same memory space
- enum {
- is_same_memspace =
- std::is_same<memory_space, typename src_view_type::memory_space>::value
- };
- // The array_layout
- using array_layout = typename src_view_type::array_layout;
- // The data type (we probably want it non-const since otherwise we can't even
- // deep_copy to it.
- using data_type = typename src_view_type::non_const_data_type;
- // The destination view type if it is not the same memory space
- using view_type = Kokkos::DynRankView<data_type, array_layout, Space>;
-};
-
-} // namespace Impl
-
-namespace Impl {
-template <class T, class... P, class... ViewCtorArgs>
-inline typename DynRankView<T, P...>::HostMirror create_mirror(
- const DynRankView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- std::enable_if_t<!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
- nullptr) {
- using src_type = DynRankView<T, P...>;
- using dst_type = typename src_type::HostMirror;
-
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
-
- static_assert(
- !alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
- "must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
-
- return dst_type(prop_copy, Impl::reconstructLayout(src.layout(), src.rank()));
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline auto create_mirror(
- const DynRankView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- std::enable_if_t<Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
- nullptr) {
- using dst_type = typename Impl::MirrorDRVType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::view_type;
-
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
-
- static_assert(
- !alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
- "must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
-
- return dst_type(prop_copy, Impl::reconstructLayout(src.layout(), src.rank()));
-}
-
-} // namespace Impl
-
-// Create a mirror in host space
-template <class T, class... P>
-inline typename DynRankView<T, P...>::HostMirror create_mirror(
- const DynRankView<T, P...>& src,
- std::enable_if_t<std::is_same<typename ViewTraits<T, P...>::specialize,
- void>::value>* = nullptr) {
- return Impl::create_mirror(src, Kokkos::Impl::ViewCtorProp<>{});
-}
-
-template <class T, class... P>
-inline typename DynRankView<T, P...>::HostMirror create_mirror(
- Kokkos::Impl::WithoutInitializing_t wi, const DynRankView<T, P...>& src,
- std::enable_if_t<std::is_same<typename ViewTraits<T, P...>::specialize,
- void>::value>* = nullptr) {
- return Impl::create_mirror(src, Kokkos::view_alloc(wi));
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline typename DynRankView<T, P...>::HostMirror create_mirror(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const DynRankView<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* = nullptr) {
- return Impl::create_mirror(src, arg_prop);
-}
-
-// Create a mirror in a new space
-template <class Space, class T, class... P,
- typename Enable = std::enable_if_t<
- Kokkos::is_space<Space>::value &&
- std::is_void<typename ViewTraits<T, P...>::specialize>::value>>
-typename Impl::MirrorDRVType<Space, T, P...>::view_type create_mirror(
- const Space&, const Kokkos::DynRankView<T, P...>& src) {
- return Impl::create_mirror(
- src, Kokkos::view_alloc(typename Space::memory_space{}));
-}
-
-template <class Space, class T, class... P>
-typename Impl::MirrorDRVType<Space, T, P...>::view_type create_mirror(
- Kokkos::Impl::WithoutInitializing_t wi, const Space&,
- const Kokkos::DynRankView<T, P...>& src,
- std::enable_if_t<std::is_same<typename ViewTraits<T, P...>::specialize,
- void>::value>* = nullptr) {
- return Impl::create_mirror(
- src, Kokkos::view_alloc(wi, typename Space::memory_space{}));
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline auto create_mirror(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const DynRankView<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* = nullptr) {
- using ReturnType = typename Impl::MirrorDRVType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::view_type;
- return ReturnType{Impl::create_mirror(src, arg_prop)};
-}
-
-namespace Impl {
-template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- std::is_same<
- typename DynRankView<T, P...>::memory_space,
- typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename DynRankView<T, P...>::data_type,
- typename DynRankView<T, P...>::HostMirror::data_type>::value,
- typename DynRankView<T, P...>::HostMirror>
-create_mirror_view(const DynRankView<T, P...>& src,
- const typename Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- !(std::is_same<
- typename DynRankView<T, P...>::memory_space,
- typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename DynRankView<T, P...>::data_type,
- typename DynRankView<T, P...>::HostMirror::data_type>::value),
- typename DynRankView<T, P...>::HostMirror>
-create_mirror_view(
- const DynRankView<T, P...>& src,
- const typename Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- return Kokkos::Impl::create_mirror(src, arg_prop);
-}
-
-template <class Space, class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- Kokkos::is_space<Space>::value &&
- Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace,
- typename Impl::MirrorDRViewType<Space, T, P...>::view_type>
-create_mirror_view(const Space&, const Kokkos::DynRankView<T, P...>& src,
- const typename Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
-}
-
-template <class Space, class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- Kokkos::is_space<Space>::value &&
- !Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace,
- typename Impl::MirrorDRViewType<Space, T, P...>::view_type>
-create_mirror_view(
- const Space&, const Kokkos::DynRankView<T, P...>& src,
- const typename Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- using MemorySpace = typename Space::memory_space;
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., MemorySpace>;
- alloc_prop prop_copy(arg_prop);
-
- return Kokkos::Impl::create_mirror(src, prop_copy);
-}
-} // namespace Impl
-
-// Create a mirror view in host space
-template <class T, class... P>
-inline std::enable_if_t<
- (std::is_same<
- typename DynRankView<T, P...>::memory_space,
- typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<typename DynRankView<T, P...>::data_type,
- typename DynRankView<T, P...>::HostMirror::data_type>::value),
- typename DynRankView<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::DynRankView<T, P...>& src) {
- return src;
-}
-
-template <class T, class... P>
-inline std::enable_if_t<
- !(std::is_same<
- typename DynRankView<T, P...>::memory_space,
- typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename DynRankView<T, P...>::data_type,
- typename DynRankView<T, P...>::HostMirror::data_type>::value),
- typename DynRankView<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::DynRankView<T, P...>& src) {
- return Kokkos::create_mirror(src);
-}
-
-template <class T, class... P>
-inline auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi,
- const DynRankView<T, P...>& src) {
- return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
-}
-
-// Create a mirror view in a new space
-// FIXME_C++17 Improve SFINAE here.
-template <class Space, class T, class... P,
- class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-inline typename Impl::MirrorDRViewType<Space, T, P...>::view_type
-create_mirror_view(
- const Space&, const Kokkos::DynRankView<T, P...>& src,
- std::enable_if_t<
- Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace>* = nullptr) {
- return src;
-}
-
-// FIXME_C++17 Improve SFINAE here.
-template <class Space, class T, class... P,
- class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-inline typename Impl::MirrorDRViewType<Space, T, P...>::view_type
-create_mirror_view(
- const Space& space, const Kokkos::DynRankView<T, P...>& src,
- std::enable_if_t<
- !Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace>* = nullptr) {
- return Kokkos::create_mirror(space, src);
-}
-
-template <class Space, class T, class... P>
-inline auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi,
- const Space& space,
- const Kokkos::DynRankView<T, P...>& src) {
- return Impl::create_mirror_view(space, src, Kokkos::view_alloc(wi));
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline auto create_mirror_view(
- const typename Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::DynRankView<T, P...>& src) {
- return Impl::create_mirror_view(src, arg_prop);
-}
-
-template <class... ViewCtorArgs, class T, class... P>
-auto create_mirror_view_and_copy(
- const Impl::ViewCtorProp<ViewCtorArgs...>&,
- const Kokkos::DynRankView<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- Impl::MirrorDRViewType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::is_same_memspace>* = nullptr) {
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
- static_assert(
- alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must include a memory space!");
- static_assert(!alloc_prop_input::has_pointer,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not include a pointer!");
- static_assert(!alloc_prop_input::allow_padding,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not explicitly allow padding!");
-
- // same behavior as deep_copy(src, src)
- if (!alloc_prop_input::has_execution_space)
- fence(
- "Kokkos::create_mirror_view_and_copy: fence before returning src view");
- return src;
-}
-
-template <class... ViewCtorArgs, class T, class... P>
-auto create_mirror_view_and_copy(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::DynRankView<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- !Impl::MirrorDRViewType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::is_same_memspace>* = nullptr) {
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
- static_assert(
- alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must include a memory space!");
- static_assert(!alloc_prop_input::has_pointer,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not include a pointer!");
- static_assert(!alloc_prop_input::allow_padding,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not explicitly allow padding!");
- using Space = typename alloc_prop_input::memory_space;
- using Mirror = typename Impl::MirrorDRViewType<Space, T, P...>::view_type;
-
- // Add some properties if not provided to avoid need for if constexpr
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs...,
- std::conditional_t<alloc_prop_input::has_label,
- std::integral_constant<unsigned int, 12>, std::string>,
- std::conditional_t<!alloc_prop_input::initialize,
- std::integral_constant<unsigned int, 13>,
- Impl::WithoutInitializing_t>,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 14>,
- typename Space::execution_space>>;
- alloc_prop arg_prop_copy(arg_prop);
-
- std::string& label =
- static_cast<Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy).value;
- if (label.empty()) label = src.label();
- auto mirror = typename Mirror::non_const_type{
- arg_prop_copy, Impl::reconstructLayout(src.layout(), src.rank())};
- if (alloc_prop_input::has_execution_space) {
- using ExecutionSpace = typename alloc_prop::execution_space;
- deep_copy(
- static_cast<Impl::ViewCtorProp<void, ExecutionSpace>&>(arg_prop_copy)
- .value,
- mirror, src);
- } else
- deep_copy(mirror, src);
- return mirror;
-}
-
-template <class Space, class T, class... P>
-auto create_mirror_view_and_copy(const Space&,
- const Kokkos::DynRankView<T, P...>& src,
- std::string const& name = "") {
- return create_mirror_view_and_copy(
- Kokkos::view_alloc(typename Space::memory_space{}, name), src);
-}
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-/** \brief Resize a view with copying old data to new data at the corresponding
- * indices. */
-template <class... ViewCtorArgs, class T, class... P>
-inline void impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- DynRankView<T, P...>& v, const size_t n0,
- const size_t n1, const size_t n2, const size_t n3,
- const size_t n4, const size_t n5, const size_t n6,
- const size_t n7) {
- using drview_type = DynRankView<T, P...>;
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
-
- static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
- "Can only resize managed views");
- static_assert(!alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::resize "
- "must not include a label!");
- static_assert(!alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::resize must "
- "not include a pointer!");
- static_assert(!alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to Kokkos::resize must "
- "not include a memory space instance!");
-
- // Add execution space here to avoid the need for if constexpr below
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs..., std::string,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 10>,
- typename drview_type::execution_space>>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- v.label();
-
- drview_type v_resized(prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
-
- if (alloc_prop_input::has_execution_space)
- Kokkos::Impl::DynRankViewRemap<drview_type, drview_type>(
- static_cast<const Impl::ViewCtorProp<
- void, typename alloc_prop::execution_space>&>(prop_copy)
- .value,
- v_resized, v);
- else
- Kokkos::Impl::DynRankViewRemap<drview_type, drview_type>(v_resized, v);
-
- v = v_resized;
-}
-
-template <class T, class... P>
-inline void resize(DynRankView<T, P...>& v,
- const size_t n0 = KOKKOS_INVALID_INDEX,
- const size_t n1 = KOKKOS_INVALID_INDEX,
- const size_t n2 = KOKKOS_INVALID_INDEX,
- const size_t n3 = KOKKOS_INVALID_INDEX,
- const size_t n4 = KOKKOS_INVALID_INDEX,
- const size_t n5 = KOKKOS_INVALID_INDEX,
- const size_t n6 = KOKKOS_INVALID_INDEX,
- const size_t n7 = KOKKOS_INVALID_INDEX) {
- impl_resize(Impl::ViewCtorProp<>{}, v, n0, n1, n2, n3, n4, n5, n6, n7);
-}
-
-template <class... ViewCtorArgs, class T, class... P>
-void resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- DynRankView<T, P...>& v,
- const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
- impl_resize(arg_prop, v, n0, n1, n2, n3, n4, n5, n6, n7);
-}
-
-template <class I, class T, class... P>
-inline std::enable_if_t<Impl::is_view_ctor_property<I>::value> resize(
- const I& arg_prop, DynRankView<T, P...>& v,
- const size_t n0 = KOKKOS_INVALID_INDEX,
- const size_t n1 = KOKKOS_INVALID_INDEX,
- const size_t n2 = KOKKOS_INVALID_INDEX,
- const size_t n3 = KOKKOS_INVALID_INDEX,
- const size_t n4 = KOKKOS_INVALID_INDEX,
- const size_t n5 = KOKKOS_INVALID_INDEX,
- const size_t n6 = KOKKOS_INVALID_INDEX,
- const size_t n7 = KOKKOS_INVALID_INDEX) {
- impl_resize(Kokkos::view_alloc(arg_prop), v, n0, n1, n2, n3, n4, n5, n6, n7);
-}
-
-/** \brief Resize a view with copying old data to new data at the corresponding
- * indices. */
-template <class... ViewCtorArgs, class T, class... P>
-inline void impl_realloc(DynRankView<T, P...>& v, const size_t n0,
- const size_t n1, const size_t n2, const size_t n3,
- const size_t n4, const size_t n5, const size_t n6,
- const size_t n7,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- using drview_type = DynRankView<T, P...>;
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
-
- static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
- "Can only realloc managed views");
- static_assert(!alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::realloc must "
- "not include a label!");
- static_assert(!alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::realloc must "
- "not include a pointer!");
- static_assert(!alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to Kokkos::realloc must "
- "not include a memory space instance!");
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop arg_prop_copy(arg_prop);
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
- .value = v.label();
-
- v = drview_type(); // Deallocate first, if the only view to allocation
- v = drview_type(arg_prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline void realloc(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- DynRankView<T, P...>& v,
- const size_t n0 = KOKKOS_INVALID_INDEX,
- const size_t n1 = KOKKOS_INVALID_INDEX,
- const size_t n2 = KOKKOS_INVALID_INDEX,
- const size_t n3 = KOKKOS_INVALID_INDEX,
- const size_t n4 = KOKKOS_INVALID_INDEX,
- const size_t n5 = KOKKOS_INVALID_INDEX,
- const size_t n6 = KOKKOS_INVALID_INDEX,
- const size_t n7 = KOKKOS_INVALID_INDEX) {
- impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, arg_prop);
-}
-
-template <class T, class... P>
-inline void realloc(DynRankView<T, P...>& v,
- const size_t n0 = KOKKOS_INVALID_INDEX,
- const size_t n1 = KOKKOS_INVALID_INDEX,
- const size_t n2 = KOKKOS_INVALID_INDEX,
- const size_t n3 = KOKKOS_INVALID_INDEX,
- const size_t n4 = KOKKOS_INVALID_INDEX,
- const size_t n5 = KOKKOS_INVALID_INDEX,
- const size_t n6 = KOKKOS_INVALID_INDEX,
- const size_t n7 = KOKKOS_INVALID_INDEX) {
- impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Impl::ViewCtorProp<>{});
-}
-
-template <class I, class T, class... P>
-inline std::enable_if_t<Impl::is_view_ctor_property<I>::value> realloc(
- const I& arg_prop, DynRankView<T, P...>& v,
- const size_t n0 = KOKKOS_INVALID_INDEX,
- const size_t n1 = KOKKOS_INVALID_INDEX,
- const size_t n2 = KOKKOS_INVALID_INDEX,
- const size_t n3 = KOKKOS_INVALID_INDEX,
- const size_t n4 = KOKKOS_INVALID_INDEX,
- const size_t n5 = KOKKOS_INVALID_INDEX,
- const size_t n6 = KOKKOS_INVALID_INDEX,
- const size_t n7 = KOKKOS_INVALID_INDEX) {
- impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Kokkos::view_alloc(arg_prop));
-}
-
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
-#endif
-#endif
+++ /dev/null
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-
-#ifndef KOKKOS_FUNCTIONAL_HPP
-#define KOKKOS_FUNCTIONAL_HPP
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
-#endif
-
-#include <Kokkos_Macros.hpp>
-#include <impl/Kokkos_Functional_impl.hpp>
-
-namespace Kokkos {
-
-// These should work for most types
-
-template <typename T>
-struct pod_hash {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using argument_type KOKKOS_DEPRECATED = T;
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = uint32_t;
- using result_type KOKKOS_DEPRECATED = uint32_t;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- uint32_t operator()(T const& t) const {
- return Impl::MurmurHash3_x86_32(&t, sizeof(T), 0);
- }
-
- KOKKOS_FORCEINLINE_FUNCTION
- uint32_t operator()(T const& t, uint32_t seed) const {
- return Impl::MurmurHash3_x86_32(&t, sizeof(T), seed);
- }
-};
-
-template <typename T>
-struct pod_equal_to {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const {
- return Impl::bitwise_equal(&a, &b);
- }
-};
-
-template <typename T>
-struct pod_not_equal_to {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const {
- return !Impl::bitwise_equal(&a, &b);
- }
-};
-
-template <typename T>
-struct equal_to {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const { return a == b; }
-};
-
-template <typename T>
-struct not_equal_to {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const { return a != b; }
-};
-
-template <typename T>
-struct greater {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const { return a > b; }
-};
-
-template <typename T>
-struct less {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const { return a < b; }
-};
-
-template <typename T>
-struct greater_equal {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const { return a >= b; }
-};
-
-template <typename T>
-struct less_equal {
-#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
- using first_argument_type KOKKOS_DEPRECATED = T;
- using second_argument_type KOKKOS_DEPRECATED = T;
- using result_type KOKKOS_DEPRECATED = bool;
-#endif
-
- KOKKOS_FORCEINLINE_FUNCTION
- bool operator()(T const& a, T const& b) const { return a <= b; }
-};
-
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
-#endif
-#endif // KOKKOS_FUNCTIONAL_HPP
+++ /dev/null
-/*
- * Kokkos_OffsetView.hpp
- *
- * Created on: Apr 23, 2018
- * Author: swbova
- */
-
-#ifndef KOKKOS_OFFSETVIEW_HPP_
-#define KOKKOS_OFFSETVIEW_HPP_
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
-#endif
-
-#include <Kokkos_Core.hpp>
-
-#include <Kokkos_View.hpp>
-
-namespace Kokkos {
-
-namespace Experimental {
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-template <class DataType, class... Properties>
-class OffsetView;
-
-template <class>
-struct is_offset_view : public std::false_type {};
-
-template <class D, class... P>
-struct is_offset_view<OffsetView<D, P...>> : public std::true_type {};
-
-template <class D, class... P>
-struct is_offset_view<const OffsetView<D, P...>> : public std::true_type {};
-
-#define KOKKOS_INVALID_OFFSET int64_t(0x7FFFFFFFFFFFFFFFLL)
-#define KOKKOS_INVALID_INDEX_RANGE \
- { KOKKOS_INVALID_OFFSET, KOKKOS_INVALID_OFFSET }
-
-template <typename iType, std::enable_if_t<std::is_integral<iType>::value &&
- std::is_signed<iType>::value,
- iType> = 0>
-using IndexRange = Kokkos::Array<iType, 2>;
-
-using index_list_type = std::initializer_list<int64_t>;
-
-// template <typename iType,
-// std::enable_if_t< std::is_integral<iType>::value &&
-// std::is_signed<iType>::value, iType > = 0> using min_index_type =
-// std::initializer_list<iType>;
-
-namespace Impl {
-
-template <class ViewType>
-struct GetOffsetViewTypeFromViewType {
- using type =
- OffsetView<typename ViewType::data_type, typename ViewType::array_layout,
- typename ViewType::device_type,
- typename ViewType::memory_traits>;
-};
-
-template <unsigned, class MapType, class BeginsType>
-KOKKOS_INLINE_FUNCTION bool offsetview_verify_operator_bounds(
- const MapType&, const BeginsType&) {
- return true;
-}
-
-template <unsigned R, class MapType, class BeginsType, class iType,
- class... Args>
-KOKKOS_INLINE_FUNCTION bool offsetview_verify_operator_bounds(
- const MapType& map, const BeginsType& begins, const iType& i,
- Args... args) {
- const bool legalIndex =
- (int64_t(i) >= begins[R]) &&
- (int64_t(i) <= int64_t(begins[R] + map.extent(R) - 1));
- return legalIndex &&
- offsetview_verify_operator_bounds<R + 1>(map, begins, args...);
-}
-template <unsigned, class MapType, class BeginsType>
-inline void offsetview_error_operator_bounds(char*, int, const MapType&,
- const BeginsType&) {}
-
-template <unsigned R, class MapType, class BeginsType, class iType,
- class... Args>
-inline void offsetview_error_operator_bounds(char* buf, int len,
- const MapType& map,
- const BeginsType begins,
- const iType& i, Args... args) {
- const int64_t b = begins[R];
- const int64_t e = b + map.extent(R) - 1;
- const int n =
- snprintf(buf, len, " %ld <= %ld <= %ld %c", static_cast<unsigned long>(b),
- static_cast<unsigned long>(i), static_cast<unsigned long>(e),
- (sizeof...(Args) ? ',' : ')'));
- offsetview_error_operator_bounds<R + 1>(buf + n, len - n, map, begins,
- args...);
-}
-
-template <class MemorySpace, class MapType, class BeginsType, class... Args>
-KOKKOS_INLINE_FUNCTION void offsetview_verify_operator_bounds(
- Kokkos::Impl::SharedAllocationTracker const& tracker, const MapType& map,
- const BeginsType& begins, Args... args) {
- if (!offsetview_verify_operator_bounds<0>(map, begins, args...)) {
- KOKKOS_IF_ON_HOST(
- (enum {LEN = 1024}; char buffer[LEN];
- const std::string label = tracker.template get_label<MemorySpace>();
- int n = snprintf(buffer, LEN,
- "OffsetView bounds error of view labeled %s (",
- label.c_str());
- offsetview_error_operator_bounds<0>(buffer + n, LEN - n, map, begins,
- args...);
- Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
-
- KOKKOS_IF_ON_DEVICE((
- /* Check #1: is there a SharedAllocationRecord?
- (we won't use it, but if it is not there then there isn't
- a corresponding SharedAllocationHeader containing a label).
- This check should cover the case of Views that don't
- have the Unmanaged trait but were initialized by pointer. */
- if (tracker.has_record()) {
- Kokkos::Impl::operator_bounds_error_on_device(map);
- } else { Kokkos::abort("OffsetView bounds error"); }))
- }
-}
-
-inline void runtime_check_rank_host(const size_t rank_dynamic,
- const size_t rank,
- const index_list_type minIndices,
- const std::string& label) {
- bool isBad = false;
- std::string message =
- "Kokkos::Experimental::OffsetView ERROR: for OffsetView labeled '" +
- label + "':";
- if (rank_dynamic != rank) {
- message +=
- "The full rank must be the same as the dynamic rank. full rank = ";
- message += std::to_string(rank) +
- " dynamic rank = " + std::to_string(rank_dynamic) + "\n";
- isBad = true;
- }
-
- size_t numOffsets = 0;
- for (size_t i = 0; i < minIndices.size(); ++i) {
- if (minIndices.begin()[i] != KOKKOS_INVALID_OFFSET) numOffsets++;
- }
- if (numOffsets != rank_dynamic) {
- message += "The number of offsets provided ( " +
- std::to_string(numOffsets) +
- " ) must equal the dynamic rank ( " +
- std::to_string(rank_dynamic) + " ).";
- isBad = true;
- }
-
- if (isBad) Kokkos::abort(message.c_str());
-}
-
-KOKKOS_INLINE_FUNCTION
-void runtime_check_rank_device(const size_t rank_dynamic, const size_t rank,
- const index_list_type minIndices) {
- if (rank_dynamic != rank) {
- Kokkos::abort(
- "The full rank of an OffsetView must be the same as the dynamic rank.");
- }
- size_t numOffsets = 0;
- for (size_t i = 0; i < minIndices.size(); ++i) {
- if (minIndices.begin()[i] != KOKKOS_INVALID_OFFSET) numOffsets++;
- }
- if (numOffsets != rank) {
- Kokkos::abort(
- "The number of offsets provided to an OffsetView constructor must "
- "equal the dynamic rank.");
- }
-}
-} // namespace Impl
-
-template <class DataType, class... Properties>
-class OffsetView : public ViewTraits<DataType, Properties...> {
- public:
- using traits = ViewTraits<DataType, Properties...>;
-
- private:
- template <class, class...>
- friend class OffsetView;
- template <class, class...>
- friend class View; // FIXME delete this line
- template <class, class...>
- friend class Kokkos::Impl::ViewMapping;
-
- using map_type = Kokkos::Impl::ViewMapping<traits, void>;
- using track_type = Kokkos::Impl::SharedAllocationTracker;
-
- public:
- enum { Rank = map_type::Rank };
- using begins_type = Kokkos::Array<int64_t, Rank>;
-
- template <typename iType,
- std::enable_if_t<std::is_integral<iType>::value, iType> = 0>
- KOKKOS_INLINE_FUNCTION int64_t begin(const iType local_dimension) const {
- return local_dimension < Rank ? m_begins[local_dimension]
- : KOKKOS_INVALID_OFFSET;
- }
-
- KOKKOS_INLINE_FUNCTION
- begins_type begins() const { return m_begins; }
-
- template <typename iType,
- std::enable_if_t<std::is_integral<iType>::value, iType> = 0>
- KOKKOS_INLINE_FUNCTION int64_t end(const iType local_dimension) const {
- return begin(local_dimension) + m_map.extent(local_dimension);
- }
-
- private:
- track_type m_track;
- map_type m_map;
- begins_type m_begins;
-
- public:
- //----------------------------------------
- /** \brief Compatible view of array of scalar types */
- using array_type =
- OffsetView<typename traits::scalar_array_type,
- typename traits::array_layout, typename traits::device_type,
- typename traits::memory_traits>;
-
- /** \brief Compatible view of const data type */
- using const_type =
- OffsetView<typename traits::const_data_type,
- typename traits::array_layout, typename traits::device_type,
- typename traits::memory_traits>;
-
- /** \brief Compatible view of non-const data type */
- using non_const_type =
- OffsetView<typename traits::non_const_data_type,
- typename traits::array_layout, typename traits::device_type,
- typename traits::memory_traits>;
-
- /** \brief Compatible HostMirror view */
- using HostMirror = OffsetView<typename traits::non_const_data_type,
- typename traits::array_layout,
- typename traits::host_mirror_space>;
-
- //----------------------------------------
- // Domain rank and extents
-
- /** \brief rank() to be implemented
- */
- // KOKKOS_INLINE_FUNCTION
- // static
- // constexpr unsigned rank() { return map_type::Rank; }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, size_t>
- extent(const iType& r) const {
- return m_map.extent(r);
- }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, int>
- extent_int(const iType& r) const {
- return static_cast<int>(m_map.extent(r));
- }
-
- KOKKOS_INLINE_FUNCTION constexpr typename traits::array_layout layout()
- const {
- return m_map.layout();
- }
-
- KOKKOS_INLINE_FUNCTION constexpr size_t size() const {
- return m_map.dimension_0() * m_map.dimension_1() * m_map.dimension_2() *
- m_map.dimension_3() * m_map.dimension_4() * m_map.dimension_5() *
- m_map.dimension_6() * m_map.dimension_7();
- }
-
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
- return m_map.stride_0();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
- return m_map.stride_1();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
- return m_map.stride_2();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
- return m_map.stride_3();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
- return m_map.stride_4();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
- return m_map.stride_5();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
- return m_map.stride_6();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
- return m_map.stride_7();
- }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, size_t>
- stride(iType r) const {
- return (
- r == 0
- ? m_map.stride_0()
- : (r == 1
- ? m_map.stride_1()
- : (r == 2
- ? m_map.stride_2()
- : (r == 3
- ? m_map.stride_3()
- : (r == 4
- ? m_map.stride_4()
- : (r == 5
- ? m_map.stride_5()
- : (r == 6
- ? m_map.stride_6()
- : m_map.stride_7())))))));
- }
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
- m_map.stride(s);
- }
-
- //----------------------------------------
- // Range span is the span which contains all members.
-
- using reference_type = typename map_type::reference_type;
- using pointer_type = typename map_type::pointer_type;
-
- enum {
- reference_type_is_lvalue_reference =
- std::is_lvalue_reference<reference_type>::value
- };
-
- KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_map.span(); }
- KOKKOS_INLINE_FUNCTION bool span_is_contiguous() const {
- return m_map.span_is_contiguous();
- }
- KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
- return m_map.data() != nullptr;
- }
- KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
- return m_map.data();
- }
-
- //----------------------------------------
- // Allow specializations to query their specialized map
-
- KOKKOS_INLINE_FUNCTION
- const Kokkos::Impl::ViewMapping<traits, void>& implementation_map() const {
- return m_map;
- }
-
- //----------------------------------------
-
- private:
- static constexpr bool is_layout_left =
- std::is_same<typename traits::array_layout, Kokkos::LayoutLeft>::value;
-
- static constexpr bool is_layout_right =
- std::is_same<typename traits::array_layout, Kokkos::LayoutRight>::value;
-
- static constexpr bool is_layout_stride =
- std::is_same<typename traits::array_layout, Kokkos::LayoutStride>::value;
-
- static constexpr bool is_default_map =
- std::is_void<typename traits::specialize>::value &&
- (is_layout_left || is_layout_right || is_layout_stride);
-
-#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
-
-#define KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(ARG) \
- Kokkos::Impl::runtime_check_memory_access_violation< \
- typename traits::memory_space>( \
- "Kokkos::OffsetView ERROR: attempt to access inaccessible memory " \
- "space"); \
- Kokkos::Experimental::Impl::offsetview_verify_operator_bounds< \
- typename traits::memory_space> \
- ARG;
-
-#else
-
-#define KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(ARG) \
- Kokkos::Impl::runtime_check_memory_access_violation< \
- typename traits::memory_space>( \
- "Kokkos::OffsetView ERROR: attempt to access inaccessible memory " \
- "space");
-
-#endif
- public:
- //------------------------------
- // Rank 0 operator()
-
- KOKKOS_FORCEINLINE_FUNCTION
- reference_type operator()() const { return m_map.reference(); }
- //------------------------------
- // Rank 1 operator()
-
- template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0>::value && (1 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
- const size_t j0 = i0 - m_begins[0];
- return m_map.reference(j0);
- }
-
- template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
- is_default_map && !is_layout_stride),
- reference_type>
- operator()(const I0& i0) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
- const size_t j0 = i0 - m_begins[0];
- return m_map.m_impl_handle[j0];
- }
-
- template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
- is_default_map && is_layout_stride),
- reference_type>
- operator()(const I0& i0) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
- const size_t j0 = i0 - m_begins[0];
- return m_map.m_impl_handle[m_map.m_impl_offset.m_stride.S0 * j0];
- }
- //------------------------------
- // Rank 1 operator[]
-
- template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0>::value && (1 == Rank) && !is_default_map),
- reference_type>
- operator[](const I0& i0) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
- const size_t j0 = i0 - m_begins[0];
- return m_map.reference(j0);
- }
-
- template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
- is_default_map && !is_layout_stride),
- reference_type>
- operator[](const I0& i0) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
- const size_t j0 = i0 - m_begins[0];
- return m_map.m_impl_handle[j0];
- }
-
- template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
- is_default_map && is_layout_stride),
- reference_type>
- operator[](const I0& i0) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
- const size_t j0 = i0 - m_begins[0];
- return m_map.m_impl_handle[m_map.m_impl_offset.m_stride.S0 * j0];
- }
-
- //------------------------------
- // Rank 2
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1>::value &&
- (2 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- return m_map.reference(j0, j1);
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
- is_default_map && is_layout_left && (traits::rank_dynamic == 0)),
- reference_type>
- operator()(const I0& i0, const I1& i1) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- return m_map.m_impl_handle[j0 + m_map.m_impl_offset.m_dim.N0 * j1];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
- is_default_map && is_layout_left && (traits::rank_dynamic != 0)),
- reference_type>
- operator()(const I0& i0, const I1& i1) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- return m_map.m_impl_handle[j0 + m_map.m_impl_offset.m_stride * j1];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
- is_default_map && is_layout_right && (traits::rank_dynamic == 0)),
- reference_type>
- operator()(const I0& i0, const I1& i1) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- return m_map.m_impl_handle[j1 + m_map.m_impl_offset.m_dim.N1 * j0];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
- is_default_map && is_layout_right && (traits::rank_dynamic != 0)),
- reference_type>
- operator()(const I0& i0, const I1& i1) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- return m_map.m_impl_handle[j1 + m_map.m_impl_offset.m_stride * j0];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1>::value &&
- (2 == Rank) && is_default_map && is_layout_stride),
- reference_type>
- operator()(const I0& i0, const I1& i1) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- return m_map.m_impl_handle[j0 * m_map.m_impl_offset.m_stride.S0 +
- j1 * m_map.m_impl_offset.m_stride.S1];
- }
-
- //------------------------------
- // Rank 3
-
- template <typename I0, typename I1, typename I2>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2>::value &&
- (3 == Rank) && is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2)];
- }
-
- template <typename I0, typename I1, typename I2>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2>::value &&
- (3 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- return m_map.reference(j0, j1, j2);
- }
-
- //------------------------------
- // Rank 4
-
- template <typename I0, typename I1, typename I2, typename I3>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3>::value &&
- (4 == Rank) && is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3)];
- }
-
- template <typename I0, typename I1, typename I2, typename I3>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3>::value &&
- (4 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- return m_map.reference(j0, j1, j2, j3);
- }
-
- //------------------------------
- // Rank 5
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3, I4>::value &&
- (5 == Rank) && is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4)];
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3, I4>::value &&
- (5 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- return m_map.reference(j0, j1, j2, j3, j4);
- }
-
- //------------------------------
- // Rank 6
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5>::value &&
- (6 == Rank) && is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4, const I5& i5) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- const size_t j5 = i5 - m_begins[5];
- return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4, j5)];
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5>::value &&
- (6 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4, const I5& i5) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- const size_t j5 = i5 - m_begins[5];
- return m_map.reference(j0, j1, j2, j3, j4, j5);
- }
-
- //------------------------------
- // Rank 7
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6>::value &&
- (7 == Rank) && is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4, const I5& i5, const I6& i6) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- const size_t j5 = i5 - m_begins[5];
- const size_t j6 = i6 - m_begins[6];
- return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4, j5, j6)];
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6>::value &&
- (7 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4, const I5& i5, const I6& i6) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- const size_t j5 = i5 - m_begins[5];
- const size_t j6 = i6 - m_begins[6];
- return m_map.reference(j0, j1, j2, j3, j4, j5, j6);
- }
-
- //------------------------------
- // Rank 8
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6, typename I7>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6, I7>::value &&
- (8 == Rank) && is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4, const I5& i5, const I6& i6, const I7& i7) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6, i7))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- const size_t j5 = i5 - m_begins[5];
- const size_t j6 = i6 - m_begins[6];
- const size_t j7 = i7 - m_begins[7];
- return m_map
- .m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4, j5, j6, j7)];
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6, typename I7>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6, I7>::value &&
- (8 == Rank) && !is_default_map),
- reference_type>
- operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
- const I4& i4, const I5& i5, const I6& i6, const I7& i7) const {
- KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
- (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6, i7))
- const size_t j0 = i0 - m_begins[0];
- const size_t j1 = i1 - m_begins[1];
- const size_t j2 = i2 - m_begins[2];
- const size_t j3 = i3 - m_begins[3];
- const size_t j4 = i4 - m_begins[4];
- const size_t j5 = i5 - m_begins[5];
- const size_t j6 = i6 - m_begins[6];
- const size_t j7 = i7 - m_begins[7];
- return m_map.reference(j0, j1, j2, j3, j4, j5, j6, j7);
- }
-
-#undef KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY
-
- //----------------------------------------
- // Standard destructor, constructors, and assignment operators
-
- KOKKOS_DEFAULTED_FUNCTION
- ~OffsetView() = default;
-
- KOKKOS_INLINE_FUNCTION
- OffsetView() : m_track(), m_map() {
- for (size_t i = 0; i < Rank; ++i) m_begins[i] = KOKKOS_INVALID_OFFSET;
- }
-
- KOKKOS_INLINE_FUNCTION
- OffsetView(const OffsetView& rhs)
- : m_track(rhs.m_track, traits::is_managed),
- m_map(rhs.m_map),
- m_begins(rhs.m_begins) {}
-
- KOKKOS_INLINE_FUNCTION
- OffsetView(OffsetView&& rhs)
- : m_track(std::move(rhs.m_track)),
- m_map(std::move(rhs.m_map)),
- m_begins(std::move(rhs.m_begins)) {}
-
- KOKKOS_INLINE_FUNCTION
- OffsetView& operator=(const OffsetView& rhs) {
- m_track = rhs.m_track;
- m_map = rhs.m_map;
- m_begins = rhs.m_begins;
- return *this;
- }
-
- KOKKOS_INLINE_FUNCTION
- OffsetView& operator=(OffsetView&& rhs) {
- m_track = std::move(rhs.m_track);
- m_map = std::move(rhs.m_map);
- m_begins = std::move(rhs.m_begins);
- return *this;
- }
-
- // interoperability with View
- private:
- using view_type =
- View<typename traits::scalar_array_type, typename traits::array_layout,
- typename traits::device_type, typename traits::memory_traits>;
-
- public:
- KOKKOS_INLINE_FUNCTION
- view_type view() const {
- view_type v(m_track, m_map);
- return v;
- }
-
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION OffsetView(const View<RT, RP...>& aview)
- : m_track(aview.impl_track()), m_map() {
- using SrcTraits = typename OffsetView<RT, RP...>::traits;
- using Mapping = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
- static_assert(Mapping::is_assignable,
- "Incompatible OffsetView copy construction");
- Mapping::assign(m_map, aview.impl_map(), m_track);
-
- for (int i = 0; i < aview.Rank; ++i) {
- m_begins[i] = 0;
- }
- }
-
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION OffsetView(const View<RT, RP...>& aview,
- const index_list_type& minIndices)
- : m_track(aview.impl_track()), m_map() {
- using SrcTraits = typename OffsetView<RT, RP...>::traits;
- using Mapping = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
- static_assert(Mapping::is_assignable,
- "Incompatible OffsetView copy construction");
- Mapping::assign(m_map, aview.impl_map(), m_track);
-
- KOKKOS_IF_ON_HOST((Kokkos::Experimental::Impl::runtime_check_rank_host(
- traits::rank_dynamic, Rank, minIndices, label());))
-
- KOKKOS_IF_ON_DEVICE((Kokkos::Experimental::Impl::runtime_check_rank_device(
- traits::rank_dynamic, Rank, minIndices);))
-
- for (size_t i = 0; i < minIndices.size(); ++i) {
- m_begins[i] = minIndices.begin()[i];
- }
- }
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION OffsetView(const View<RT, RP...>& aview,
- const begins_type& beg)
- : m_track(aview.impl_track()), m_map(), m_begins(beg) {
- using SrcTraits = typename OffsetView<RT, RP...>::traits;
- using Mapping = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
- static_assert(Mapping::is_assignable,
- "Incompatible OffsetView copy construction");
- Mapping::assign(m_map, aview.impl_map(), m_track);
- }
-
- // may assign unmanaged from managed.
-
- template <class RT, class... RP>
- KOKKOS_INLINE_FUNCTION OffsetView(const OffsetView<RT, RP...>& rhs)
- : m_track(rhs.m_track, traits::is_managed),
- m_map(),
- m_begins(rhs.m_begins) {
- using SrcTraits = typename OffsetView<RT, RP...>::traits;
- using Mapping = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
- static_assert(Mapping::is_assignable,
- "Incompatible OffsetView copy construction");
- Mapping::assign(m_map, rhs.m_map, rhs.m_track); // swb what about assign?
- }
-
- private:
- enum class subtraction_failure {
- none,
- negative,
- overflow,
- };
-
- // Subtraction should return a non-negative number and not overflow
- KOKKOS_INLINE_FUNCTION static subtraction_failure check_subtraction(
- int64_t lhs, int64_t rhs) {
- if (lhs < rhs) return subtraction_failure::negative;
-
- if (static_cast<uint64_t>(-1) / static_cast<uint64_t>(2) <
- static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs))
- return subtraction_failure::overflow;
-
- return subtraction_failure::none;
- }
-
- // Need a way to get at an element from both begins_type (aka Kokkos::Array
- // which doesn't have iterators) and index_list_type (aka
- // std::initializer_list which doesn't have .data() or operator[]).
- // Returns by value
- KOKKOS_INLINE_FUNCTION
- static int64_t at(const begins_type& a, size_t pos) { return a[pos]; }
-
- KOKKOS_INLINE_FUNCTION
- static int64_t at(index_list_type a, size_t pos) {
- return *(a.begin() + pos);
- }
-
- // Check that begins < ends for all elements
- // B, E can be begins_type and/or index_list_type
- template <typename B, typename E>
- static subtraction_failure runtime_check_begins_ends_host(const B& begins,
- const E& ends) {
- std::string message;
- if (begins.size() != Rank)
- message +=
- "begins.size() "
- "(" +
- std::to_string(begins.size()) +
- ")"
- " != Rank "
- "(" +
- std::to_string(Rank) +
- ")"
- "\n";
-
- if (ends.size() != Rank)
- message +=
- "ends.size() "
- "(" +
- std::to_string(begins.size()) +
- ")"
- " != Rank "
- "(" +
- std::to_string(Rank) +
- ")"
- "\n";
-
- // If there are no errors so far, then rank == Rank
- // Otherwise, check as much as possible
- size_t rank = begins.size() < ends.size() ? begins.size() : ends.size();
- for (size_t i = 0; i != rank; ++i) {
- subtraction_failure sf = check_subtraction(at(ends, i), at(begins, i));
- if (sf != subtraction_failure::none) {
- message +=
- "("
- "ends[" +
- std::to_string(i) +
- "]"
- " "
- "(" +
- std::to_string(at(ends, i)) +
- ")"
- " - "
- "begins[" +
- std::to_string(i) +
- "]"
- " "
- "(" +
- std::to_string(at(begins, i)) +
- ")"
- ")";
- switch (sf) {
- case subtraction_failure::negative:
- message += " must be non-negative\n";
- break;
- case subtraction_failure::overflow: message += " overflows\n"; break;
- default: break;
- }
- }
- }
-
- if (!message.empty()) {
- message =
- "Kokkos::Experimental::OffsetView ERROR: for unmanaged OffsetView\n" +
- message;
- Kokkos::Impl::throw_runtime_exception(message);
- }
-
- return subtraction_failure::none;
- }
-
- // Check the begins < ends for all elements
- template <typename B, typename E>
- KOKKOS_INLINE_FUNCTION static subtraction_failure
- runtime_check_begins_ends_device(const B& begins, const E& ends) {
- if (begins.size() != Rank)
- Kokkos::abort(
- "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
- "OffsetView: begins has bad Rank");
- if (ends.size() != Rank)
- Kokkos::abort(
- "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
- "OffsetView: ends has bad Rank");
-
- for (size_t i = 0; i != begins.size(); ++i) {
- switch (check_subtraction(at(ends, i), at(begins, i))) {
- case subtraction_failure::negative:
- Kokkos::abort(
- "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
- "OffsetView: bad range");
- break;
- case subtraction_failure::overflow:
- Kokkos::abort(
- "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
- "OffsetView: range overflows");
- break;
- default: break;
- }
- }
-
- return subtraction_failure::none;
- }
-
- template <typename B, typename E>
- KOKKOS_INLINE_FUNCTION static subtraction_failure runtime_check_begins_ends(
- const B& begins, const E& ends) {
- KOKKOS_IF_ON_HOST((return runtime_check_begins_ends_host(begins, ends);))
- KOKKOS_IF_ON_DEVICE(
- (return runtime_check_begins_ends_device(begins, ends);))
- }
-
- // Constructor around unmanaged data after checking begins < ends for all
- // elements
- // Each of B, E can be begins_type and/or index_list_type
- // Precondition: begins.size() == ends.size() == m_begins.size() == Rank
- template <typename B, typename E>
- KOKKOS_INLINE_FUNCTION OffsetView(const pointer_type& p, const B& begins_,
- const E& ends_,
- subtraction_failure)
- : m_track() // no tracking
- ,
- m_map(Kokkos::Impl::ViewCtorProp<pointer_type>(p),
- typename traits::array_layout(
- Rank > 0 ? at(ends_, 0) - at(begins_, 0) : 0,
- Rank > 1 ? at(ends_, 1) - at(begins_, 1) : 0,
- Rank > 2 ? at(ends_, 2) - at(begins_, 2) : 0,
- Rank > 3 ? at(ends_, 3) - at(begins_, 3) : 0,
- Rank > 4 ? at(ends_, 4) - at(begins_, 4) : 0,
- Rank > 5 ? at(ends_, 5) - at(begins_, 5) : 0,
- Rank > 6 ? at(ends_, 6) - at(begins_, 6) : 0,
- Rank > 7 ? at(ends_, 7) - at(begins_, 7) : 0)) {
- for (size_t i = 0; i != m_begins.size(); ++i) {
- m_begins[i] = at(begins_, i);
- };
- }
-
- public:
- // Constructor around unmanaged data
- // Four overloads, as both begins and ends can be either
- // begins_type or index_list_type
- KOKKOS_INLINE_FUNCTION
- OffsetView(const pointer_type& p, const begins_type& begins_,
- const begins_type& ends_)
- : OffsetView(p, begins_, ends_,
- runtime_check_begins_ends(begins_, ends_)) {}
-
- KOKKOS_INLINE_FUNCTION
- OffsetView(const pointer_type& p, const begins_type& begins_,
- index_list_type ends_)
- : OffsetView(p, begins_, ends_,
- runtime_check_begins_ends(begins_, ends_)) {}
-
- KOKKOS_INLINE_FUNCTION
- OffsetView(const pointer_type& p, index_list_type begins_,
- const begins_type& ends_)
- : OffsetView(p, begins_, ends_,
- runtime_check_begins_ends(begins_, ends_)) {}
-
- KOKKOS_INLINE_FUNCTION
- OffsetView(const pointer_type& p, index_list_type begins_,
- index_list_type ends_)
- : OffsetView(p, begins_, ends_,
- runtime_check_begins_ends(begins_, ends_)) {}
-
- //----------------------------------------
- // Allocation tracking properties
- KOKKOS_INLINE_FUNCTION
- int use_count() const { return m_track.use_count(); }
-
- inline const std::string label() const {
- return m_track.template get_label<typename traits::memory_space>();
- }
-
- // Choosing std::pair as type for the arguments allows constructing an
- // OffsetView using list initialization syntax, e.g.,
- // OffsetView dummy("dummy", {-1, 3}, {-2,2});
- // We could allow arbitrary types RangeType that support
- // std::get<{0,1}>(RangeType const&) with std::tuple_size<RangeType>::value==2
- // but this wouldn't allow using the syntax in the example above.
- template <typename Label>
- explicit inline OffsetView(
- const Label& arg_label,
- std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
- const std::pair<int64_t, int64_t>>
- range0,
- const std::pair<int64_t, int64_t> range1 = KOKKOS_INVALID_INDEX_RANGE,
- const std::pair<int64_t, int64_t> range2 = KOKKOS_INVALID_INDEX_RANGE,
- const std::pair<int64_t, int64_t> range3 = KOKKOS_INVALID_INDEX_RANGE,
- const std::pair<int64_t, int64_t> range4 = KOKKOS_INVALID_INDEX_RANGE,
- const std::pair<int64_t, int64_t> range5 = KOKKOS_INVALID_INDEX_RANGE,
- const std::pair<int64_t, int64_t> range6 = KOKKOS_INVALID_INDEX_RANGE,
- const std::pair<int64_t, int64_t> range7 = KOKKOS_INVALID_INDEX_RANGE
-
- )
- : OffsetView(
- Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
- typename traits::array_layout(range0.second - range0.first + 1,
- range1.second - range1.first + 1,
- range2.second - range2.first + 1,
- range3.second - range3.first + 1,
- range4.second - range4.first + 1,
- range5.second - range5.first + 1,
- range6.second - range6.first + 1,
- range7.second - range7.first + 1),
- {range0.first, range1.first, range2.first, range3.first,
- range4.first, range5.first, range6.first, range7.first}) {}
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- template <typename Label>
- KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the constructor taking std::pair<int64_t, int64_t> arguments "
- "instead!")
- explicit inline OffsetView(
- const Label& arg_label,
- std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
- const index_list_type>
- range0,
- const index_list_type range1 = KOKKOS_INVALID_INDEX_RANGE,
- const index_list_type range2 = KOKKOS_INVALID_INDEX_RANGE,
- const index_list_type range3 = KOKKOS_INVALID_INDEX_RANGE,
- const index_list_type range4 = KOKKOS_INVALID_INDEX_RANGE,
- const index_list_type range5 = KOKKOS_INVALID_INDEX_RANGE,
- const index_list_type range6 = KOKKOS_INVALID_INDEX_RANGE,
- const index_list_type range7 = KOKKOS_INVALID_INDEX_RANGE)
- : OffsetView(
- arg_label,
- std::pair<int64_t, int64_t>(range0.begin()[0], range0.begin()[1]),
- std::pair<int64_t, int64_t>(range1.begin()[0], range1.begin()[1]),
- std::pair<int64_t, int64_t>(range2.begin()[0], range2.begin()[1]),
- std::pair<int64_t, int64_t>(range3.begin()[0], range3.begin()[1]),
- std::pair<int64_t, int64_t>(range4.begin()[0], range4.begin()[1]),
- std::pair<int64_t, int64_t>(range5.begin()[0], range5.begin()[1]),
- std::pair<int64_t, int64_t>(range6.begin()[0], range6.begin()[1]),
- std::pair<int64_t, int64_t>(range7.begin()[0], range7.begin()[1])) {
- }
-#endif
-
- template <class... P>
- explicit KOKKOS_INLINE_FUNCTION OffsetView(
- const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
- std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
- typename traits::array_layout> const& arg_layout,
- const index_list_type minIndices)
- : m_track() // No memory tracking
- ,
- m_map(arg_prop, arg_layout) {
- for (size_t i = 0; i < minIndices.size(); ++i) {
- m_begins[i] = minIndices.begin()[i];
- }
- static_assert(
- std::is_same<pointer_type, typename Kokkos::Impl::ViewCtorProp<
- P...>::pointer_type>::value,
- "When constructing OffsetView to wrap user memory, you must supply "
- "matching pointer type");
- }
-
- template <class... P>
- explicit inline OffsetView(
- const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
- std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
- typename traits::array_layout> const& arg_layout,
- const index_list_type minIndices)
- : m_track(),
- m_map()
-
- {
- for (size_t i = 0; i < Rank; ++i) m_begins[i] = minIndices.begin()[i];
-
- // Append layout and spaces if not input
- using alloc_prop_input = Kokkos::Impl::ViewCtorProp<P...>;
-
- // use 'std::integral_constant<unsigned,I>' for non-types
- // to avoid duplicate class error.
- using alloc_prop = Kokkos::Impl::ViewCtorProp<
- P...,
- std::conditional_t<alloc_prop_input::has_label,
- std::integral_constant<unsigned, 0>, std::string>,
- std::conditional_t<alloc_prop_input::has_memory_space,
- std::integral_constant<unsigned, 1>,
- typename traits::device_type::memory_space>,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned, 2>,
- typename traits::device_type::execution_space>>;
-
- static_assert(traits::is_managed,
- "OffsetView allocation constructor requires managed memory");
-
- if (alloc_prop::initialize &&
- !alloc_prop::execution_space::impl_is_initialized()) {
- // If initializing view data then
- // the execution space must be initialized.
- Kokkos::Impl::throw_runtime_exception(
- "Constructing OffsetView and initializing data with uninitialized "
- "execution space");
- }
-
- // Copy the input allocation properties with possibly defaulted properties
- alloc_prop prop_copy(arg_prop);
-
- //------------------------------------------------------------
-#if defined(KOKKOS_ENABLE_CUDA)
- // If allocating in CudaUVMSpace must fence before and after
- // the allocation to protect against possible concurrent access
- // on the CPU and the GPU.
- // Fence using the trait's executon space (which will be Kokkos::Cuda)
- // to avoid incomplete type errors from usng Kokkos::Cuda directly.
- if (std::is_same<Kokkos::CudaUVMSpace,
- typename traits::device_type::memory_space>::value) {
- typename traits::device_type::memory_space::execution_space().fence(
- "Kokkos::OffsetView::OffsetView(): fence before UVM allocation");
- }
-#endif
- //------------------------------------------------------------
-
- Kokkos::Impl::SharedAllocationRecord<>* record = m_map.allocate_shared(
- prop_copy, arg_layout,
- Kokkos::Impl::ViewCtorProp<P...>::has_execution_space);
-
- //------------------------------------------------------------
-#if defined(KOKKOS_ENABLE_CUDA)
- if (std::is_same<Kokkos::CudaUVMSpace,
- typename traits::device_type::memory_space>::value) {
- typename traits::device_type::memory_space::execution_space().fence(
- "Kokkos::OffsetView::OffsetView(): fence after UVM allocation");
- }
-#endif
- //------------------------------------------------------------
-
- // Setup and initialization complete, start tracking
- m_track.assign_allocated_record_to_uninitialized(record);
-
- KOKKOS_IF_ON_HOST((Kokkos::Experimental::Impl::runtime_check_rank_host(
- traits::rank_dynamic, Rank, minIndices, label());))
-
- KOKKOS_IF_ON_DEVICE((Kokkos::Experimental::Impl::runtime_check_rank_device(
- traits::rank_dynamic, Rank, minIndices);))
- }
-};
-
-/** \brief Temporary free function rank()
- * until rank() is implemented
- * in the View
- */
-template <typename D, class... P>
-KOKKOS_INLINE_FUNCTION constexpr unsigned rank(const OffsetView<D, P...>& V) {
- return V.Rank;
-} // Temporary until added to view
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-namespace Impl {
-
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, T>
-shift_input(const T arg, const int64_t offset) {
- return arg - offset;
-}
-
-KOKKOS_INLINE_FUNCTION
-Kokkos::Impl::ALL_t shift_input(const Kokkos::Impl::ALL_t arg,
- const int64_t /*offset*/) {
- return arg;
-}
-
-template <class T>
-KOKKOS_INLINE_FUNCTION
- std::enable_if_t<std::is_integral<T>::value, Kokkos::pair<T, T>>
- shift_input(const Kokkos::pair<T, T> arg, const int64_t offset) {
- return Kokkos::make_pair<T, T>(arg.first - offset, arg.second - offset);
-}
-template <class T>
-inline std::enable_if_t<std::is_integral<T>::value, std::pair<T, T>>
-shift_input(const std::pair<T, T> arg, const int64_t offset) {
- return std::make_pair<T, T>(arg.first - offset, arg.second - offset);
-}
-
-template <size_t N, class Arg, class A>
-KOKKOS_INLINE_FUNCTION void map_arg_to_new_begin(
- const size_t i, Kokkos::Array<int64_t, N>& subviewBegins,
- std::enable_if_t<N != 0, const Arg> shiftedArg, const Arg arg,
- const A viewBegins, size_t& counter) {
- if (!std::is_integral<Arg>::value) {
- subviewBegins[counter] = shiftedArg == arg ? viewBegins[i] : 0;
- counter++;
- }
-}
-
-template <size_t N, class Arg, class A>
-KOKKOS_INLINE_FUNCTION void map_arg_to_new_begin(
- const size_t /*i*/, Kokkos::Array<int64_t, N>& /*subviewBegins*/,
- std::enable_if_t<N == 0, const Arg> /*shiftedArg*/, const Arg /*arg*/,
- const A /*viewBegins*/, size_t& /*counter*/) {}
-
-template <class D, class... P, class T>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<void /* deduce subview type from
- source view traits */
- ,
- ViewTraits<D, P...>, T>::type>::type
- subview_offset(const OffsetView<D, P...>& src, T arg) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T shiftedArg = shift_input(arg, begins[0]);
-
- constexpr size_t rank =
- Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
- traits */
- ,
- ViewTraits<D, P...>, T>::type::Rank;
-
- auto theSubview = Kokkos::subview(theView, shiftedArg);
-
- Kokkos::Array<int64_t, rank> subviewBegins;
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(0, subviewBegins, shiftedArg,
- arg, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<void /* deduce subview type from source
- view traits */
- ,
- ViewTraits<D, P...>, T>::type>::type
- offsetView(theSubview, subviewBegins);
-
- return offsetView;
-}
-
-template <class D, class... P, class T0, class T1>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1>::type>::type
- subview_offset(const Kokkos::Experimental::OffsetView<D, P...>& src,
- T0 arg0, T1 arg1) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T0 shiftedArg0 = shift_input(arg0, begins[0]);
- T1 shiftedArg1 = shift_input(arg1, begins[1]);
-
- auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1);
- constexpr size_t rank =
- Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
- traits */
- ,
- ViewTraits<D, P...>, T0, T1>::type::Rank;
-
- Kokkos::Array<int64_t, rank> subviewBegins;
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 0, subviewBegins, shiftedArg0, arg0, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 1, subviewBegins, shiftedArg1, arg1, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1>::type>::type offsetView(theSubview,
- subviewBegins);
-
- return offsetView;
-}
-
-template <class D, class... P, class T0, class T1, class T2>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2>::type>::type
- subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T0 shiftedArg0 = shift_input(arg0, begins[0]);
- T1 shiftedArg1 = shift_input(arg1, begins[1]);
- T2 shiftedArg2 = shift_input(arg2, begins[2]);
-
- auto theSubview =
- Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2);
-
- constexpr size_t rank =
- Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
- traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2>::type::Rank;
-
- Kokkos::Array<int64_t, rank> subviewBegins;
-
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 0, subviewBegins, shiftedArg0, arg0, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 1, subviewBegins, shiftedArg1, arg1, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 2, subviewBegins, shiftedArg2, arg2, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2>::type>::type
- offsetView(theSubview, subviewBegins);
-
- return offsetView;
-}
-
-template <class D, class... P, class T0, class T1, class T2, class T3>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3>::type>::type
- subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
- T3 arg3) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T0 shiftedArg0 = shift_input(arg0, begins[0]);
- T1 shiftedArg1 = shift_input(arg1, begins[1]);
- T2 shiftedArg2 = shift_input(arg2, begins[2]);
- T3 shiftedArg3 = shift_input(arg3, begins[3]);
-
- auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
- shiftedArg2, shiftedArg3);
-
- constexpr size_t rank = Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3>::type::Rank;
- Kokkos::Array<int64_t, rank> subviewBegins;
-
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 0, subviewBegins, shiftedArg0, arg0, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 1, subviewBegins, shiftedArg1, arg1, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 2, subviewBegins, shiftedArg2, arg2, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 3, subviewBegins, shiftedArg3, arg3, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3>::type>::type
- offsetView(theSubview, subviewBegins);
-
- return offsetView;
-}
-
-template <class D, class... P, class T0, class T1, class T2, class T3, class T4>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type>::type
- subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
- T3 arg3, T4 arg4) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T0 shiftedArg0 = shift_input(arg0, begins[0]);
- T1 shiftedArg1 = shift_input(arg1, begins[1]);
- T2 shiftedArg2 = shift_input(arg2, begins[2]);
- T3 shiftedArg3 = shift_input(arg3, begins[3]);
- T4 shiftedArg4 = shift_input(arg4, begins[4]);
-
- auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
- shiftedArg2, shiftedArg3, shiftedArg4);
-
- constexpr size_t rank = Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type::Rank;
- Kokkos::Array<int64_t, rank> subviewBegins;
-
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 0, subviewBegins, shiftedArg0, arg0, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 1, subviewBegins, shiftedArg1, arg1, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 2, subviewBegins, shiftedArg2, arg2, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 3, subviewBegins, shiftedArg3, arg3, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 4, subviewBegins, shiftedArg4, arg4, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type>::type
- offsetView(theSubview, subviewBegins);
-
- return offsetView;
-}
-
-template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
- class T5>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type>::type
- subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
- T3 arg3, T4 arg4, T5 arg5) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T0 shiftedArg0 = shift_input(arg0, begins[0]);
- T1 shiftedArg1 = shift_input(arg1, begins[1]);
- T2 shiftedArg2 = shift_input(arg2, begins[2]);
- T3 shiftedArg3 = shift_input(arg3, begins[3]);
- T4 shiftedArg4 = shift_input(arg4, begins[4]);
- T5 shiftedArg5 = shift_input(arg5, begins[5]);
-
- auto theSubview =
- Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2,
- shiftedArg3, shiftedArg4, shiftedArg5);
-
- constexpr size_t rank = Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type::Rank;
-
- Kokkos::Array<int64_t, rank> subviewBegins;
-
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 0, subviewBegins, shiftedArg0, arg0, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 1, subviewBegins, shiftedArg1, arg1, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 2, subviewBegins, shiftedArg2, arg2, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 3, subviewBegins, shiftedArg3, arg3, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 4, subviewBegins, shiftedArg4, arg4, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 5, subviewBegins, shiftedArg5, arg5, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type>::type
- offsetView(theSubview, subviewBegins);
-
- return offsetView;
-}
-template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
- class T5, class T6>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type>::type
- subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
- T3 arg3, T4 arg4, T5 arg5, T6 arg6) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T0 shiftedArg0 = shift_input(arg0, begins[0]);
- T1 shiftedArg1 = shift_input(arg1, begins[1]);
- T2 shiftedArg2 = shift_input(arg2, begins[2]);
- T3 shiftedArg3 = shift_input(arg3, begins[3]);
- T4 shiftedArg4 = shift_input(arg4, begins[4]);
- T5 shiftedArg5 = shift_input(arg5, begins[5]);
- T6 shiftedArg6 = shift_input(arg6, begins[6]);
-
- auto theSubview =
- Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2,
- shiftedArg3, shiftedArg4, shiftedArg5, shiftedArg6);
-
- constexpr size_t rank = Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type::Rank;
-
- Kokkos::Array<int64_t, rank> subviewBegins;
-
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 0, subviewBegins, shiftedArg0, arg0, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 1, subviewBegins, shiftedArg1, arg1, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 2, subviewBegins, shiftedArg2, arg2, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 3, subviewBegins, shiftedArg3, arg3, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 4, subviewBegins, shiftedArg4, arg4, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 5, subviewBegins, shiftedArg5, arg5, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 6, subviewBegins, shiftedArg6, arg6, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type>::type
- offsetView(theSubview, subviewBegins);
-
- return offsetView;
-}
-
-template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
- class T5, class T6, class T7>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type>::type
- subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
- T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) {
- auto theView = src.view();
- auto begins = src.begins();
-
- T0 shiftedArg0 = shift_input(arg0, begins[0]);
- T1 shiftedArg1 = shift_input(arg1, begins[1]);
- T2 shiftedArg2 = shift_input(arg2, begins[2]);
- T3 shiftedArg3 = shift_input(arg3, begins[3]);
- T4 shiftedArg4 = shift_input(arg4, begins[4]);
- T5 shiftedArg5 = shift_input(arg5, begins[5]);
- T6 shiftedArg6 = shift_input(arg6, begins[6]);
- T7 shiftedArg7 = shift_input(arg7, begins[7]);
-
- auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
- shiftedArg2, shiftedArg3, shiftedArg4,
- shiftedArg5, shiftedArg6, shiftedArg7);
-
- constexpr size_t rank = Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type::Rank;
-
- Kokkos::Array<int64_t, rank> subviewBegins;
-
- size_t counter = 0;
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 0, subviewBegins, shiftedArg0, arg0, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 1, subviewBegins, shiftedArg1, arg1, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 2, subviewBegins, shiftedArg2, arg2, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 3, subviewBegins, shiftedArg3, arg3, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 4, subviewBegins, shiftedArg4, arg4, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 5, subviewBegins, shiftedArg5, arg5, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 6, subviewBegins, shiftedArg6, arg6, begins, counter);
- Kokkos::Experimental::Impl::map_arg_to_new_begin(
- 7, subviewBegins, shiftedArg7, arg7, begins, counter);
-
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type>::type
- offsetView(theSubview, subviewBegins);
-
- return offsetView;
-}
-} // namespace Impl
-
-template <class D, class... P, class... Args>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
- typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, Args...>::type>::type
- subview(const OffsetView<D, P...>& src, Args... args) {
- static_assert(
- OffsetView<D, P...>::Rank == sizeof...(Args),
- "subview requires one argument for each source OffsetView rank");
-
- return Kokkos::Experimental::Impl::subview_offset(src, args...);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Experimental {
-template <class LT, class... LP, class RT, class... RP>
-KOKKOS_INLINE_FUNCTION bool operator==(const OffsetView<LT, LP...>& lhs,
- const OffsetView<RT, RP...>& rhs) {
- // Same data, layout, dimensions
- using lhs_traits = ViewTraits<LT, LP...>;
- using rhs_traits = ViewTraits<RT, RP...>;
-
- return std::is_same<typename lhs_traits::const_value_type,
- typename rhs_traits::const_value_type>::value &&
- std::is_same<typename lhs_traits::array_layout,
- typename rhs_traits::array_layout>::value &&
- std::is_same<typename lhs_traits::memory_space,
- typename rhs_traits::memory_space>::value &&
- unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
- lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
- lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
- lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
- lhs.extent(4) == rhs.extent(4) && lhs.extent(5) == rhs.extent(5) &&
- lhs.extent(6) == rhs.extent(6) && lhs.extent(7) == rhs.extent(7) &&
- lhs.begin(0) == rhs.begin(0) && lhs.begin(1) == rhs.begin(1) &&
- lhs.begin(2) == rhs.begin(2) && lhs.begin(3) == rhs.begin(3) &&
- lhs.begin(4) == rhs.begin(4) && lhs.begin(5) == rhs.begin(5) &&
- lhs.begin(6) == rhs.begin(6) && lhs.begin(7) == rhs.begin(7);
-}
-
-template <class LT, class... LP, class RT, class... RP>
-KOKKOS_INLINE_FUNCTION bool operator!=(const OffsetView<LT, LP...>& lhs,
- const OffsetView<RT, RP...>& rhs) {
- return !(operator==(lhs, rhs));
-}
-
-template <class LT, class... LP, class RT, class... RP>
-KOKKOS_INLINE_FUNCTION bool operator==(const View<LT, LP...>& lhs,
- const OffsetView<RT, RP...>& rhs) {
- // Same data, layout, dimensions
- using lhs_traits = ViewTraits<LT, LP...>;
- using rhs_traits = ViewTraits<RT, RP...>;
-
- return std::is_same<typename lhs_traits::const_value_type,
- typename rhs_traits::const_value_type>::value &&
- std::is_same<typename lhs_traits::array_layout,
- typename rhs_traits::array_layout>::value &&
- std::is_same<typename lhs_traits::memory_space,
- typename rhs_traits::memory_space>::value &&
- unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
- lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
- lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
- lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
- lhs.extent(4) == rhs.extent(4) && lhs.extent(5) == rhs.extent(5) &&
- lhs.extent(6) == rhs.extent(6) && lhs.extent(7) == rhs.extent(7);
-}
-
-template <class LT, class... LP, class RT, class... RP>
-KOKKOS_INLINE_FUNCTION bool operator==(const OffsetView<LT, LP...>& lhs,
- const View<RT, RP...>& rhs) {
- return rhs == lhs;
-}
-
-} // namespace Experimental
-} /* namespace Kokkos */
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-template <class DT, class... DP>
-inline void deep_copy(
- const Experimental::OffsetView<DT, DP...>& dst,
- typename ViewTraits<DT, DP...>::const_value_type& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
- static_assert(
- std::is_same<typename ViewTraits<DT, DP...>::non_const_value_type,
- typename ViewTraits<DT, DP...>::value_type>::value,
- "deep_copy requires non-const type");
-
- auto dstView = dst.view();
- Kokkos::deep_copy(dstView, value);
-}
-
-template <class DT, class... DP, class ST, class... SP>
-inline void deep_copy(
- const Experimental::OffsetView<DT, DP...>& dst,
- const Experimental::OffsetView<ST, SP...>& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
- static_assert(
- std::is_same<typename ViewTraits<DT, DP...>::value_type,
- typename ViewTraits<ST, SP...>::non_const_value_type>::value,
- "deep_copy requires matching non-const destination type");
-
- auto dstView = dst.view();
- Kokkos::deep_copy(dstView, value.view());
-}
-template <class DT, class... DP, class ST, class... SP>
-inline void deep_copy(
- const Experimental::OffsetView<DT, DP...>& dst,
- const View<ST, SP...>& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
- static_assert(
- std::is_same<typename ViewTraits<DT, DP...>::value_type,
- typename ViewTraits<ST, SP...>::non_const_value_type>::value,
- "deep_copy requires matching non-const destination type");
-
- auto dstView = dst.view();
- Kokkos::deep_copy(dstView, value);
-}
-
-template <class DT, class... DP, class ST, class... SP>
-inline void deep_copy(
- const View<DT, DP...>& dst,
- const Experimental::OffsetView<ST, SP...>& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
- static_assert(
- std::is_same<typename ViewTraits<DT, DP...>::value_type,
- typename ViewTraits<ST, SP...>::non_const_value_type>::value,
- "deep_copy requires matching non-const destination type");
-
- Kokkos::deep_copy(dst, value.view());
-}
-
-namespace Impl {
-
-// Deduce Mirror Types
-template <class Space, class T, class... P>
-struct MirrorOffsetViewType {
- // The incoming view_type
- using src_view_type = typename Kokkos::Experimental::OffsetView<T, P...>;
- // The memory space for the mirror view
- using memory_space = typename Space::memory_space;
- // Check whether it is the same memory space
- enum {
- is_same_memspace =
- std::is_same<memory_space, typename src_view_type::memory_space>::value
- };
- // The array_layout
- using array_layout = typename src_view_type::array_layout;
- // The data type (we probably want it non-const since otherwise we can't even
- // deep_copy to it.)
- using data_type = typename src_view_type::non_const_data_type;
- // The destination view type if it is not the same memory space
- using dest_view_type =
- Kokkos::Experimental::OffsetView<data_type, array_layout, Space>;
- // If it is the same memory_space return the existing view_type
- // This will also keep the unmanaged trait if necessary
- using view_type =
- std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
-};
-
-template <class Space, class T, class... P>
-struct MirrorOffsetType {
- // The incoming view_type
- using src_view_type = typename Kokkos::Experimental::OffsetView<T, P...>;
- // The memory space for the mirror view
- using memory_space = typename Space::memory_space;
- // Check whether it is the same memory space
- enum {
- is_same_memspace =
- std::is_same<memory_space, typename src_view_type::memory_space>::value
- };
- // The array_layout
- using array_layout = typename src_view_type::array_layout;
- // The data type (we probably want it non-const since otherwise we can't even
- // deep_copy to it.)
- using data_type = typename src_view_type::non_const_data_type;
- // The destination view type if it is not the same memory space
- using view_type =
- Kokkos::Experimental::OffsetView<data_type, array_layout, Space>;
-};
-
-} // namespace Impl
-
-namespace Impl {
-template <class T, class... P, class... ViewCtorArgs>
-inline typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror
-create_mirror(const Kokkos::Experimental::OffsetView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- return typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror(
- Kokkos::create_mirror(arg_prop, src.view()), src.begins());
-}
-
-template <class Space, class T, class... P, class... ViewCtorArgs>
-inline typename Kokkos::Impl::MirrorOffsetType<Space, T, P...>::view_type
-create_mirror(const Space&,
- const Kokkos::Experimental::OffsetView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
-
- static_assert(
- !alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
- "must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a memory space instance!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
-
- return typename Kokkos::Impl::MirrorOffsetType<Space, T, P...>::view_type(
- prop_copy, src.layout(),
- {src.begin(0), src.begin(1), src.begin(2), src.begin(3), src.begin(4),
- src.begin(5), src.begin(6), src.begin(7)});
-}
-} // namespace Impl
-
-// Create a mirror in host space
-template <class T, class... P>
-inline auto create_mirror(
- const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror(src, Impl::ViewCtorProp<>{});
-}
-
-template <class T, class... P>
-inline auto create_mirror(
- Kokkos::Impl::WithoutInitializing_t wi,
- const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror(src, Kokkos::view_alloc(wi));
-}
-
-// Create a mirror in a new space
-template <class Space, class T, class... P,
- typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-inline auto create_mirror(
- const Space& space, const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror(space, src, Impl::ViewCtorProp<>{});
-}
-
-template <class Space, class T, class... P>
-typename Kokkos::Impl::MirrorOffsetType<Space, T, P...>::view_type
-create_mirror(Kokkos::Impl::WithoutInitializing_t wi, const Space& space,
- const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror(space, src, Kokkos::view_alloc(wi));
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline auto create_mirror(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror(src, arg_prop);
-}
-
-namespace Impl {
-template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- (std::is_same<
- typename Kokkos::Experimental::OffsetView<T, P...>::memory_space,
- typename Kokkos::Experimental::OffsetView<
- T, P...>::HostMirror::memory_space>::value &&
- std::is_same<typename Kokkos::Experimental::OffsetView<T, P...>::data_type,
- typename Kokkos::Experimental::OffsetView<
- T, P...>::HostMirror::data_type>::value),
- typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror>
-create_mirror_view(
- const typename Kokkos::Experimental::OffsetView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- !(std::is_same<
- typename Kokkos::Experimental::OffsetView<T, P...>::memory_space,
- typename Kokkos::Experimental::OffsetView<
- T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename Kokkos::Experimental::OffsetView<T, P...>::data_type,
- typename Kokkos::Experimental::OffsetView<
- T, P...>::HostMirror::data_type>::value),
- typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::Experimental::OffsetView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- return Kokkos::create_mirror(arg_prop, src);
-}
-
-template <class Space, class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- Impl::MirrorOffsetViewType<Space, T, P...>::is_same_memspace,
- Kokkos::Experimental::OffsetView<T, P...>>
-create_mirror_view(const Space&,
- const Kokkos::Experimental::OffsetView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
-}
-
-template <class Space, class T, class... P, class... ViewCtorArgs>
-std::enable_if_t<
- !Impl::MirrorOffsetViewType<Space, T, P...>::is_same_memspace,
- typename Kokkos::Impl::MirrorOffsetViewType<Space, T, P...>::view_type>
-create_mirror_view(const Space& space,
- const Kokkos::Experimental::OffsetView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- return create_mirror(space, src, arg_prop);
-}
-} // namespace Impl
-
-// Create a mirror view in host space
-template <class T, class... P>
-inline auto create_mirror_view(
- const typename Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror_view(src, Impl::ViewCtorProp<>{});
-}
-
-template <class T, class... P>
-inline auto create_mirror_view(
- Kokkos::Impl::WithoutInitializing_t wi,
- const typename Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
-}
-
-// Create a mirror view in a new space
-template <class Space, class T, class... P,
- typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-inline auto create_mirror_view(
- const Space& space, const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror_view(space, src, Impl::ViewCtorProp<>{});
-}
-
-template <class Space, class T, class... P>
-inline auto create_mirror_view(
- Kokkos::Impl::WithoutInitializing_t wi, const Space& space,
- const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror_view(space, src, Kokkos::view_alloc(wi));
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline auto create_mirror_view(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return Impl::create_mirror_view(src, arg_prop);
-}
-
-// Create a mirror view and deep_copy in a new space
-template <class... ViewCtorArgs, class T, class... P>
-typename Kokkos::Impl::MirrorOffsetViewType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::view_type
-create_mirror_view_and_copy(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::Experimental::OffsetView<T, P...>& src) {
- return {create_mirror_view_and_copy(arg_prop, src.view()), src.begins()};
-}
-
-template <class Space, class T, class... P>
-typename Kokkos::Impl::MirrorOffsetViewType<Space, T, P...>::view_type
-create_mirror_view_and_copy(
- const Space& space, const Kokkos::Experimental::OffsetView<T, P...>& src,
- std::string const& name = "") {
- return {create_mirror_view_and_copy(space, src.view(), name), src.begins()};
-}
-} /* namespace Kokkos */
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
-#endif
-#endif /* KOKKOS_OFFSETVIEW_HPP_ */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_BITSET_IMPL_HPP
-#define KOKKOS_BITSET_IMPL_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <impl/Kokkos_BitOps.hpp>
-#include <cstdint>
-
-#include <cstdio>
-#include <climits>
-#include <iostream>
-#include <iomanip>
-
-namespace Kokkos {
-namespace Impl {
-
-KOKKOS_FORCEINLINE_FUNCTION
-unsigned rotate_right(unsigned i, int r) {
- constexpr int size = static_cast<int>(sizeof(unsigned) * CHAR_BIT);
- return r ? ((i >> r) | (i << (size - r))) : i;
-}
-
-template <typename Bitset>
-struct BitsetCount {
- using bitset_type = Bitset;
- using execution_space =
- typename bitset_type::execution_space::execution_space;
- using size_type = typename bitset_type::size_type;
- using value_type = size_type;
-
- bitset_type m_bitset;
-
- BitsetCount(bitset_type const& bitset) : m_bitset(bitset) {}
-
- size_type apply() const {
- size_type count = 0u;
- parallel_reduce("Kokkos::Impl::BitsetCount::apply",
- m_bitset.m_blocks.extent(0), *this, count);
- return count;
- }
-
- KOKKOS_INLINE_FUNCTION
- void init(value_type& count) const { count = 0u; }
-
- KOKKOS_INLINE_FUNCTION
- void join(value_type& count, const size_type& incr) const { count += incr; }
-
- KOKKOS_INLINE_FUNCTION
- void operator()(size_type i, value_type& count) const {
- count += bit_count(m_bitset.m_blocks[i]);
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif // KOKKOS_BITSET_IMPL_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_CUDA
-
-#include <Kokkos_Core.hpp>
-#include <Kokkos_Cuda.hpp>
-#include <Kokkos_CudaSpace.hpp>
-
-#include <cstdlib>
-#include <iostream>
-#include <sstream>
-#include <algorithm>
-#include <atomic>
-
-//#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-
-#include <impl/Kokkos_Tools.hpp>
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-cudaStream_t Kokkos::Impl::cuda_get_deep_copy_stream() {
- static cudaStream_t s = nullptr;
- if (s == nullptr) {
- cudaStreamCreate(&s);
- }
- return s;
-}
-
-const std::unique_ptr<Kokkos::Cuda> &Kokkos::Impl::cuda_get_deep_copy_space(
- bool initialize) {
- static std::unique_ptr<Cuda> space = nullptr;
- if (!space && initialize)
- space = std::make_unique<Cuda>(Kokkos::Impl::cuda_get_deep_copy_stream());
- return space;
-}
-
-namespace Kokkos {
-namespace Impl {
-
-namespace {
-
-static std::atomic<int> num_uvm_allocations(0);
-
-} // namespace
-
-void DeepCopyCuda(void *dst, const void *src, size_t n) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemcpy(dst, src, n, cudaMemcpyDefault));
-}
-
-void DeepCopyAsyncCuda(const Cuda &instance, void *dst, const void *src,
- size_t n) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, instance.cuda_stream()));
-}
-
-void DeepCopyAsyncCuda(void *dst, const void *src, size_t n) {
- cudaStream_t s = cuda_get_deep_copy_stream();
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, s));
- Impl::cuda_stream_synchronize(
- s,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- DeepCopyResourceSynchronization,
- "Kokkos::Impl::DeepCopyAsyncCuda: Deep Copy Stream Sync");
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-KOKKOS_DEPRECATED void CudaSpace::access_error() {
- const std::string msg(
- "Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
- "non-Cuda space");
- Kokkos::Impl::throw_runtime_exception(msg);
-}
-
-KOKKOS_DEPRECATED void CudaSpace::access_error(const void *const) {
- const std::string msg(
- "Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
- "non-Cuda space");
- Kokkos::Impl::throw_runtime_exception(msg);
-}
-#endif
-
-/*--------------------------------------------------------------------------*/
-
-bool CudaUVMSpace::available() {
-#if defined(CUDA_VERSION) && !defined(__APPLE__)
- enum : bool { UVM_available = true };
-#else
- enum : bool { UVM_available = false };
-#endif
- return UVM_available;
-}
-
-/*--------------------------------------------------------------------------*/
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-int CudaUVMSpace::number_of_allocations() {
- return Kokkos::Impl::num_uvm_allocations.load();
-}
-#endif
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
-// The purpose of the following variable is to allow a state-based choice
-// for pinning UVM allocations to the CPU. For now this is considered
-// an experimental debugging capability - with the potential to work around
-// some CUDA issues.
-bool CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = false;
-
-bool CudaUVMSpace::cuda_pin_uvm_to_host() {
- return CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v;
-}
-void CudaUVMSpace::cuda_set_pin_uvm_to_host(bool val) {
- CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = val;
-}
-#endif
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
-bool kokkos_impl_cuda_pin_uvm_to_host() {
- return Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host();
-}
-
-void kokkos_impl_cuda_set_pin_uvm_to_host(bool val) {
- Kokkos::CudaUVMSpace::cuda_set_pin_uvm_to_host(val);
-}
-#endif
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-
-CudaSpace::CudaSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
-
-CudaUVMSpace::CudaUVMSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
-
-CudaHostPinnedSpace::CudaHostPinnedSpace() {}
-
-int memory_threshold_g = 40000; // 40 kB
-
-//==============================================================================
-// <editor-fold desc="allocate()"> {{{1
-
-void *CudaSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-
-void *CudaSpace::allocate(const Cuda &exec_space, const char *arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(exec_space, arg_label, arg_alloc_size, arg_logical_size);
-}
-void *CudaSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-
-namespace {
-void *impl_allocate_common(const Cuda &exec_space, const char *arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle,
- bool exec_space_provided) {
- void *ptr = nullptr;
-
-#ifndef CUDART_VERSION
-#error CUDART_VERSION undefined!
-#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
- cudaError_t error_code;
- if (arg_alloc_size >= memory_threshold_g) {
- if (exec_space_provided) {
- cudaStream_t stream = exec_space.cuda_stream();
- error_code = cudaMallocAsync(&ptr, arg_alloc_size, stream);
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
- } else {
- error_code = cudaMallocAsync(&ptr, arg_alloc_size, 0);
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
- }
- } else {
- error_code = cudaMalloc(&ptr, arg_alloc_size);
- }
-#else
- (void)exec_space;
- (void)exec_space_provided;
- auto error_code = cudaMalloc(&ptr, arg_alloc_size);
-#endif
- if (error_code != cudaSuccess) { // TODO tag as unlikely branch
- cudaGetLastError(); // This is the only way to clear the last error, which
- // we should do here since we're turning it into an
- // exception here
- throw Experimental::CudaRawMemoryAllocationFailure(
- arg_alloc_size, error_code,
- Experimental::RawMemoryAllocationFailure::AllocationMechanism::
- CudaMalloc);
- }
-
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
- return ptr;
-}
-} // namespace
-
-void *CudaSpace::impl_allocate(
- const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- return impl_allocate_common(Kokkos::Cuda{}, arg_label, arg_alloc_size,
- arg_logical_size, arg_handle, false);
-}
-
-void *CudaSpace::impl_allocate(
- const Cuda &exec_space, const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- return impl_allocate_common(exec_space, arg_label, arg_alloc_size,
- arg_logical_size, arg_handle, true);
-}
-
-void *CudaUVMSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void *CudaUVMSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-void *CudaUVMSpace::impl_allocate(
- const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- void *ptr = nullptr;
-
- Cuda::impl_static_fence(
- "Kokkos::CudaUVMSpace::impl_allocate: Pre UVM Allocation");
- if (arg_alloc_size > 0) {
- Kokkos::Impl::num_uvm_allocations++;
-
- auto error_code =
- cudaMallocManaged(&ptr, arg_alloc_size, cudaMemAttachGlobal);
-
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
- if (Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host())
- cudaMemAdvise(ptr, arg_alloc_size, cudaMemAdviseSetPreferredLocation,
- cudaCpuDeviceId);
-#endif
-
- if (error_code != cudaSuccess) { // TODO tag as unlikely branch
- cudaGetLastError(); // This is the only way to clear the last error,
- // which we should do here since we're turning it
- // into an exception here
- throw Experimental::CudaRawMemoryAllocationFailure(
- arg_alloc_size, error_code,
- Experimental::RawMemoryAllocationFailure::AllocationMechanism::
- CudaMallocManaged);
- }
- }
- Cuda::impl_static_fence(
- "Kokkos::CudaUVMSpace::impl_allocate: Post UVM Allocation");
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
- return ptr;
-}
-void *CudaHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void *CudaHostPinnedSpace::allocate(const char *arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-void *CudaHostPinnedSpace::impl_allocate(
- const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- void *ptr = nullptr;
-
- auto error_code = cudaHostAlloc(&ptr, arg_alloc_size, cudaHostAllocDefault);
- if (error_code != cudaSuccess) { // TODO tag as unlikely branch
- cudaGetLastError(); // This is the only way to clear the last error, which
- // we should do here since we're turning it into an
- // exception here
- throw Experimental::CudaRawMemoryAllocationFailure(
- arg_alloc_size, error_code,
- Experimental::RawMemoryAllocationFailure::AllocationMechanism::
- CudaHostAlloc);
- }
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
- return ptr;
-}
-
-// </editor-fold> end allocate() }}}1
-//==============================================================================
-void CudaSpace::deallocate(void *const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-void CudaSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-void CudaSpace::impl_deallocate(
- const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- try {
-#ifndef CUDART_VERSION
-#error CUDART_VERSION undefined!
-#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
- if (arg_alloc_size >= memory_threshold_g) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeAsync(arg_alloc_ptr, 0));
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
- } else {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
- }
-#else
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
-#endif
- } catch (...) {
- }
-}
-void CudaUVMSpace::deallocate(void *const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void CudaUVMSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size
-
- ,
- const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-void CudaUVMSpace::impl_deallocate(
- const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size
-
- ,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- Cuda::impl_static_fence(
- "Kokkos::CudaUVMSpace::impl_deallocate: Pre UVM Deallocation");
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- try {
- if (arg_alloc_ptr != nullptr) {
- Kokkos::Impl::num_uvm_allocations--;
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
- }
- } catch (...) {
- }
- Cuda::impl_static_fence(
- "Kokkos::CudaUVMSpace::impl_deallocate: Post UVM Deallocation");
-}
-
-void CudaHostPinnedSpace::deallocate(void *const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-void CudaHostPinnedSpace::deallocate(const char *arg_label,
- void *const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-
-void CudaHostPinnedSpace::impl_deallocate(
- const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- try {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeHost(arg_alloc_ptr));
- } catch (...) {
- }
-}
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-#ifdef KOKKOS_ENABLE_DEBUG
-SharedAllocationRecord<void, void>
- SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record;
-
-SharedAllocationRecord<void, void>
- SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record;
-
-SharedAllocationRecord<void, void>
- SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::s_root_record;
-#endif
-
-::cudaTextureObject_t
-SharedAllocationRecord<Kokkos::CudaSpace, void>::attach_texture_object(
- const unsigned sizeof_alias, void *const alloc_ptr,
- size_t const alloc_size) {
- enum { TEXTURE_BOUND_1D = 1u << 27 };
-
- if ((alloc_ptr == nullptr) ||
- (sizeof_alias * TEXTURE_BOUND_1D <= alloc_size)) {
- std::ostringstream msg;
- msg << "Kokkos::CudaSpace ERROR: Cannot attach texture object to"
- << " alloc_ptr(" << alloc_ptr << ")"
- << " alloc_size(" << alloc_size << ")"
- << " max_size(" << (sizeof_alias * TEXTURE_BOUND_1D) << ")";
- std::cerr << msg.str() << std::endl;
- std::cerr.flush();
- Kokkos::Impl::throw_runtime_exception(msg.str());
- }
-
- ::cudaTextureObject_t tex_obj;
-
- struct cudaResourceDesc resDesc;
- struct cudaTextureDesc texDesc;
-
- memset(&resDesc, 0, sizeof(resDesc));
- memset(&texDesc, 0, sizeof(texDesc));
-
- resDesc.resType = cudaResourceTypeLinear;
- resDesc.res.linear.desc =
- (sizeof_alias == 4
- ? cudaCreateChannelDesc<int>()
- : (sizeof_alias == 8
- ? cudaCreateChannelDesc< ::int2>()
- :
- /* sizeof_alias == 16 */ cudaCreateChannelDesc< ::int4>()));
- resDesc.res.linear.sizeInBytes = alloc_size;
- resDesc.res.linear.devPtr = alloc_ptr;
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaCreateTextureObject(&tex_obj, &resDesc, &texDesc, nullptr));
-
- return tex_obj;
-}
-
-//==============================================================================
-// <editor-fold desc="SharedAllocationRecord destructors"> {{{1
-
-SharedAllocationRecord<Kokkos::CudaSpace, void>::~SharedAllocationRecord() {
- auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- alloc_size, (alloc_size - sizeof(SharedAllocationHeader)));
-}
-
-void SharedAllocationRecord<Kokkos::CudaSpace, void>::deep_copy_header_no_exec(
- void *ptr, const void *header) {
- Kokkos::Cuda exec;
- Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(exec, ptr, header,
- sizeof(SharedAllocationHeader));
- exec.fence(
- "SharedAllocationRecord<Kokkos::CudaSpace, "
- "void>::SharedAllocationRecord(): fence after copying header from "
- "HostSpace");
-}
-
-SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::~SharedAllocationRecord() {
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- SharedAllocationRecord<void, void>::m_alloc_size,
- (SharedAllocationRecord<void, void>::m_alloc_size -
- sizeof(SharedAllocationHeader)));
-}
-
-SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
- void>::~SharedAllocationRecord() {
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- SharedAllocationRecord<void, void>::m_alloc_size,
- (SharedAllocationRecord<void, void>::m_alloc_size -
- sizeof(SharedAllocationHeader)));
-}
-
-// </editor-fold> end SharedAllocationRecord destructors }}}1
-//==============================================================================
-
-//==============================================================================
-// <editor-fold desc="SharedAllocationRecord constructors"> {{{1
-
-SharedAllocationRecord<Kokkos::CudaSpace, void>::SharedAllocationRecord(
- const Kokkos::CudaSpace &arg_space, const std::string &arg_label,
- const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_tex_obj(0),
- m_space(arg_space) {
-
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, arg_label);
-
- // Copy to device memory
- Kokkos::Cuda exec;
- Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(
- exec, RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
- exec.fence(
- "SharedAllocationRecord<Kokkos::CudaSpace, "
- "void>::SharedAllocationRecord(): fence after copying header from "
- "HostSpace");
-}
-
-SharedAllocationRecord<Kokkos::CudaSpace, void>::SharedAllocationRecord(
- const Kokkos::Cuda &arg_exec_space, const Kokkos::CudaSpace &arg_space,
- const std::string &arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_exec_space, arg_space,
- arg_label, arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_tex_obj(0),
- m_space(arg_space) {
-
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, arg_label);
-
- // Copy to device memory
- Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(arg_exec_space,
- RecordBase::m_alloc_ptr, &header,
- sizeof(SharedAllocationHeader));
-}
-
-SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::SharedAllocationRecord(
- const Kokkos::CudaUVMSpace &arg_space, const std::string &arg_label,
- const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_tex_obj(0),
- m_space(arg_space) {
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
-}
-
-SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::
- SharedAllocationRecord(
- const Kokkos::CudaHostPinnedSpace &arg_space,
- const std::string &arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
- void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
-}
-
-// </editor-fold> end SharedAllocationRecord constructors }}}1
-//==============================================================================
-
-void cuda_prefetch_pointer(const Cuda &space, const void *ptr, size_t bytes,
- bool to_device) {
- if ((ptr == nullptr) || (bytes == 0)) return;
- cudaPointerAttributes attr;
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaPointerGetAttributes(&attr, ptr));
- // I measured this and it turns out prefetching towards the host slows
- // DualView syncs down. Probably because the latency is not too bad in the
- // first place for the pull down. If we want to change that provde
- // cudaCpuDeviceId as the device if to_device is false
-#if CUDA_VERSION < 10000
- bool is_managed = attr.isManaged;
-#else
- bool is_managed = attr.type == cudaMemoryTypeManaged;
-#endif
- if (to_device && is_managed &&
- space.cuda_device_prop().concurrentManagedAccess) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemPrefetchAsync(
- ptr, bytes, space.cuda_device(), space.cuda_stream()));
- }
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-//==============================================================================
-// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
-
-#include <impl/Kokkos_SharedAlloc_timpl.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-// To avoid additional compilation cost for something that's (mostly?) not
-// performance sensitive, we explicity instantiate these CRTP base classes here,
-// where we have access to the associated *_timpl.hpp header files.
-template class SharedAllocationRecordCommon<Kokkos::CudaSpace>;
-template class HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace>;
-template class SharedAllocationRecordCommon<Kokkos::CudaUVMSpace>;
-template class SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace>;
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
-//==============================================================================
-
-#else
-void KOKKOS_CORE_SRC_CUDA_CUDASPACE_PREVENT_LINK_ERROR() {}
-#endif // KOKKOS_ENABLE_CUDA
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_CUDA_ALLOCATION_TRACKING_HPP
-#define KOKKOS_CUDA_ALLOCATION_TRACKING_HPP
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_CUDA
-
-#include <impl/Kokkos_Traits.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class DestructFunctor>
-SharedAllocationRecord* shared_allocation_record(
- Kokkos::CudaSpace const& arg_space, void* const arg_alloc_ptr,
- DestructFunctor const& arg_destruct) {
- SharedAllocationRecord* const record =
- SharedAllocationRecord::get_record(arg_alloc_ptr);
-
- // assert: record != 0
-
- // assert: sizeof(DestructFunctor) <= record->m_destruct_size
-
- // assert: record->m_destruct_function == 0
-
- DestructFunctor* const functor = reinterpret_cast<DestructFunctor*>(
- reinterpret_cast<uintptr_t>(record) + sizeof(SharedAllocationRecord));
-
- new (functor) DestructFunctor(arg_destruct);
-
- record->m_destruct_functor = &shared_allocation_destroy<DestructFunctor>;
-
- return record;
-}
-
-/// class CudaUnmanagedAllocator
-/// does nothing when deallocate(ptr,size) is called
-struct CudaUnmanagedAllocator {
- static const char* name() { return "Cuda Unmanaged Allocator"; }
-
- static void deallocate(void* /*ptr*/, size_t /*size*/) {}
-
- static bool support_texture_binding() { return true; }
-};
-
-/// class CudaUnmanagedAllocator
-/// does nothing when deallocate(ptr,size) is called
-struct CudaUnmanagedUVMAllocator {
- static const char* name() { return "Cuda Unmanaged UVM Allocator"; }
-
- static void deallocate(void* /*ptr*/, size_t /*size*/) {}
-
- static bool support_texture_binding() { return true; }
-};
-
-/// class CudaUnmanagedHostAllocator
-/// does nothing when deallocate(ptr,size) is called
-class CudaUnmanagedHostAllocator {
- public:
- static const char* name() { return "Cuda Unmanaged Host Allocator"; }
- // Unmanaged deallocate does nothing
- static void deallocate(void* /*ptr*/, size_t /*size*/) {}
-};
-
-/// class CudaMallocAllocator
-class CudaMallocAllocator {
- public:
- static const char* name() { return "Cuda Malloc Allocator"; }
-
- static void* allocate(size_t size);
-
- static void deallocate(void* ptr, size_t);
-
- static void* reallocate(void* old_ptr, size_t old_size, size_t new_size);
-
- static bool support_texture_binding() { return true; }
-};
-
-/// class CudaUVMAllocator
-class CudaUVMAllocator {
- public:
- static const char* name() { return "Cuda UVM Allocator"; }
-
- static void* allocate(size_t size);
-
- static void deallocate(void* ptr, size_t);
-
- static void* reallocate(void* old_ptr, size_t old_size, size_t new_size);
-
- static bool support_texture_binding() { return true; }
-};
-
-/// class CudaHostAllocator
-class CudaHostAllocator {
- public:
- static const char* name() { return "Cuda Host Allocator"; }
-
- static void* allocate(size_t size);
-
- static void deallocate(void* ptr, size_t);
-
- static void* reallocate(void* old_ptr, size_t old_size, size_t new_size);
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif // KOKKOS_ENABLE_CUDA
-
-#endif // #ifndef KOKKOS_CUDA_ALLOCATION_TRACKING_HPP
+++ /dev/null
-/*
-@HEADER
-================================================================================
-
-ORIGINAL LICENSE
-----------------
-
-Copyright (c) 2018, NVIDIA Corporation
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-================================================================================
-
-LICENSE ASSOCIATED WITH SUBSEQUENT MODIFICATIONS
-------------------------------------------------
-
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-@HEADER
-*/
-
-#include <Kokkos_Macros.hpp>
-#if defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
-
-#include <cassert>
-
-#ifndef _SIMT_DETAILS_CONFIG
-#define _SIMT_DETAILS_CONFIG
-
-namespace Kokkos {
-namespace Impl {
-
-#ifndef __simt_scope
-// Modification: Kokkos GPU atomics should default to `gpu` scope
-#define __simt_scope "gpu"
-#endif
-
-#define __simt_fence_signal_() asm volatile("" ::: "memory")
-#define __simt_fence_sc_() \
- asm volatile("fence.sc." __simt_scope ";" ::: "memory")
-#define __simt_fence_() asm volatile("fence." __simt_scope ";" ::: "memory")
-
-#define __simt_load_acquire_8_as_32(ptr, ret) \
- asm volatile("ld.acquire." __simt_scope ".b8 %0, [%1];" \
- : "=r"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_load_relaxed_8_as_32(ptr, ret) \
- asm volatile("ld.relaxed." __simt_scope ".b8 %0, [%1];" \
- : "=r"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_store_release_8_as_32(ptr, desired) \
- asm volatile("st.release." __simt_scope ".b8 [%0], %1;" ::"l"(ptr), \
- "r"(desired) \
- : "memory")
-#define __simt_store_relaxed_8_as_32(ptr, desired) \
- asm volatile("st.relaxed." __simt_scope ".b8 [%0], %1;" ::"l"(ptr), \
- "r"(desired) \
- : "memory")
-
-#define __simt_load_acquire_16(ptr, ret) \
- asm volatile("ld.acquire." __simt_scope ".b16 %0, [%1];" \
- : "=h"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_load_relaxed_16(ptr, ret) \
- asm volatile("ld.relaxed." __simt_scope ".b16 %0, [%1];" \
- : "=h"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_store_release_16(ptr, desired) \
- asm volatile("st.release." __simt_scope ".b16 [%0], %1;" ::"l"(ptr), \
- "h"(desired) \
- : "memory")
-#define __simt_store_relaxed_16(ptr, desired) \
- asm volatile("st.relaxed." __simt_scope ".b16 [%0], %1;" ::"l"(ptr), \
- "h"(desired) \
- : "memory")
-
-#define __simt_load_acquire_32(ptr, ret) \
- asm volatile("ld.acquire." __simt_scope ".b32 %0, [%1];" \
- : "=r"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_load_relaxed_32(ptr, ret) \
- asm volatile("ld.relaxed." __simt_scope ".b32 %0, [%1];" \
- : "=r"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_store_release_32(ptr, desired) \
- asm volatile("st.release." __simt_scope ".b32 [%0], %1;" ::"l"(ptr), \
- "r"(desired) \
- : "memory")
-#define __simt_store_relaxed_32(ptr, desired) \
- asm volatile("st.relaxed." __simt_scope ".b32 [%0], %1;" ::"l"(ptr), \
- "r"(desired) \
- : "memory")
-#define __simt_exch_release_32(ptr, old, desired) \
- asm volatile("atom.exch.release." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(desired) \
- : "memory")
-#define __simt_exch_acquire_32(ptr, old, desired) \
- asm volatile("atom.exch.acquire." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(desired) \
- : "memory")
-#define __simt_exch_acq_rel_32(ptr, old, desired) \
- asm volatile("atom.exch.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(desired) \
- : "memory")
-#define __simt_exch_relaxed_32(ptr, old, desired) \
- asm volatile("atom.exch.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(desired) \
- : "memory")
-#define __simt_cas_release_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.release." __simt_scope ".b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define __simt_cas_acquire_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.acquire." __simt_scope ".b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define __simt_cas_acq_rel_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.acq_rel." __simt_scope ".b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define __simt_cas_relaxed_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.relaxed." __simt_scope ".b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define __simt_add_release_32(ptr, old, addend) \
- asm volatile("atom.add.release." __simt_scope ".u32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(addend) \
- : "memory")
-#define __simt_add_acquire_32(ptr, old, addend) \
- asm volatile("atom.add.acquire." __simt_scope ".u32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(addend) \
- : "memory")
-#define __simt_add_acq_rel_32(ptr, old, addend) \
- asm volatile("atom.add.acq_rel." __simt_scope ".u32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(addend) \
- : "memory")
-#define __simt_add_relaxed_32(ptr, old, addend) \
- asm volatile("atom.add.relaxed." __simt_scope ".u32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(addend) \
- : "memory")
-#define __simt_and_release_32(ptr, old, andend) \
- asm volatile("atom.and.release." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(andend) \
- : "memory")
-#define __simt_and_acquire_32(ptr, old, andend) \
- asm volatile("atom.and.acquire." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(andend) \
- : "memory")
-#define __simt_and_acq_rel_32(ptr, old, andend) \
- asm volatile("atom.and.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(andend) \
- : "memory")
-#define __simt_and_relaxed_32(ptr, old, andend) \
- asm volatile("atom.and.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(andend) \
- : "memory")
-#define __simt_or_release_32(ptr, old, orend) \
- asm volatile("atom.or.release." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(orend) \
- : "memory")
-#define __simt_or_acquire_32(ptr, old, orend) \
- asm volatile("atom.or.acquire." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(orend) \
- : "memory")
-#define __simt_or_acq_rel_32(ptr, old, orend) \
- asm volatile("atom.or.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(orend) \
- : "memory")
-#define __simt_or_relaxed_32(ptr, old, orend) \
- asm volatile("atom.or.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(orend) \
- : "memory")
-#define __simt_xor_release_32(ptr, old, xorend) \
- asm volatile("atom.xor.release." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(xorend) \
- : "memory")
-#define __simt_xor_acquire_32(ptr, old, xorend) \
- asm volatile("atom.xor.acquire." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(xorend) \
- : "memory")
-#define __simt_xor_acq_rel_32(ptr, old, xorend) \
- asm volatile("atom.xor.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(xorend) \
- : "memory")
-#define __simt_xor_relaxed_32(ptr, old, xorend) \
- asm volatile("atom.xor.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
- : "=r"(old) \
- : "l"(ptr), "r"(xorend) \
- : "memory")
-
-#define __simt_load_acquire_64(ptr, ret) \
- asm volatile("ld.acquire." __simt_scope ".b64 %0, [%1];" \
- : "=l"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_load_relaxed_64(ptr, ret) \
- asm volatile("ld.relaxed." __simt_scope ".b64 %0, [%1];" \
- : "=l"(ret) \
- : "l"(ptr) \
- : "memory")
-#define __simt_store_release_64(ptr, desired) \
- asm volatile("st.release." __simt_scope ".b64 [%0], %1;" ::"l"(ptr), \
- "l"(desired) \
- : "memory")
-#define __simt_store_relaxed_64(ptr, desired) \
- asm volatile("st.relaxed." __simt_scope ".b64 [%0], %1;" ::"l"(ptr), \
- "l"(desired) \
- : "memory")
-#define __simt_exch_release_64(ptr, old, desired) \
- asm volatile("atom.exch.release." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(desired) \
- : "memory")
-#define __simt_exch_acquire_64(ptr, old, desired) \
- asm volatile("atom.exch.acquire." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(desired) \
- : "memory")
-#define __simt_exch_acq_rel_64(ptr, old, desired) \
- asm volatile("atom.exch.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(desired) \
- : "memory")
-#define __simt_exch_relaxed_64(ptr, old, desired) \
- asm volatile("atom.exch.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(desired) \
- : "memory")
-#define __simt_cas_release_64(ptr, old, expected, desired) \
- asm volatile("atom.cas.release." __simt_scope ".b64 %0, [%1], %2, %3;" \
- : "=l"(old) \
- : "l"(ptr), "l"(expected), "l"(desired) \
- : "memory")
-#define __simt_cas_acquire_64(ptr, old, expected, desired) \
- asm volatile("atom.cas.acquire." __simt_scope ".b64 %0, [%1], %2, %3;" \
- : "=l"(old) \
- : "l"(ptr), "l"(expected), "l"(desired) \
- : "memory")
-#define __simt_cas_acq_rel_64(ptr, old, expected, desired) \
- asm volatile("atom.cas.acq_rel." __simt_scope ".b64 %0, [%1], %2, %3;" \
- : "=l"(old) \
- : "l"(ptr), "l"(expected), "l"(desired) \
- : "memory")
-#define __simt_cas_relaxed_64(ptr, old, expected, desired) \
- asm volatile("atom.cas.relaxed." __simt_scope ".b64 %0, [%1], %2, %3;" \
- : "=l"(old) \
- : "l"(ptr), "l"(expected), "l"(desired) \
- : "memory")
-#define __simt_add_release_64(ptr, old, addend) \
- asm volatile("atom.add.release." __simt_scope ".u64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(addend) \
- : "memory")
-#define __simt_add_acquire_64(ptr, old, addend) \
- asm volatile("atom.add.acquire." __simt_scope ".u64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(addend) \
- : "memory")
-#define __simt_add_acq_rel_64(ptr, old, addend) \
- asm volatile("atom.add.acq_rel." __simt_scope ".u64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(addend) \
- : "memory")
-#define __simt_add_relaxed_64(ptr, old, addend) \
- asm volatile("atom.add.relaxed." __simt_scope ".u64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(addend) \
- : "memory")
-#define __simt_and_release_64(ptr, old, andend) \
- asm volatile("atom.and.release." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(andend) \
- : "memory")
-#define __simt_and_acquire_64(ptr, old, andend) \
- asm volatile("atom.and.acquire." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(andend) \
- : "memory")
-#define __simt_and_acq_rel_64(ptr, old, andend) \
- asm volatile("atom.and.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(andend) \
- : "memory")
-#define __simt_and_relaxed_64(ptr, old, andend) \
- asm volatile("atom.and.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(andend) \
- : "memory")
-#define __simt_or_release_64(ptr, old, orend) \
- asm volatile("atom.or.release." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(orend) \
- : "memory")
-#define __simt_or_acquire_64(ptr, old, orend) \
- asm volatile("atom.or.acquire." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(orend) \
- : "memory")
-#define __simt_or_acq_rel_64(ptr, old, orend) \
- asm volatile("atom.or.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(orend) \
- : "memory")
-#define __simt_or_relaxed_64(ptr, old, orend) \
- asm volatile("atom.or.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(orend) \
- : "memory")
-#define __simt_xor_release_64(ptr, old, xorend) \
- asm volatile("atom.xor.release." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(xorend) \
- : "memory")
-#define __simt_xor_acquire_64(ptr, old, xorend) \
- asm volatile("atom.xor.acquire." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(xorend) \
- : "memory")
-#define __simt_xor_acq_rel_64(ptr, old, xorend) \
- asm volatile("atom.xor.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(xorend) \
- : "memory")
-#define __simt_xor_relaxed_64(ptr, old, xorend) \
- asm volatile("atom.xor.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
- : "=l"(old) \
- : "l"(ptr), "l"(xorend) \
- : "memory")
-
-#define __simt_nanosleep(timeout) \
- asm volatile("nanosleep.u32 %0;" ::"r"(unsigned(timeout)) :)
-
-/*
- definitions
-*/
-
-#ifndef __GCC_ATOMIC_BOOL_LOCK_FREE
-#define __GCC_ATOMIC_BOOL_LOCK_FREE 2
-#define __GCC_ATOMIC_CHAR_LOCK_FREE 2
-#define __GCC_ATOMIC_CHAR16_T_LOCK_FREE 2
-#define __GCC_ATOMIC_CHAR32_T_LOCK_FREE 2
-#define __GCC_ATOMIC_WCHAR_T_LOCK_FREE 2
-#define __GCC_ATOMIC_SHORT_LOCK_FREE 2
-#define __GCC_ATOMIC_INT_LOCK_FREE 2
-#define __GCC_ATOMIC_LONG_LOCK_FREE 2
-#define __GCC_ATOMIC_LLONG_LOCK_FREE 2
-#define __GCC_ATOMIC_POINTER_LOCK_FREE 2
-#endif
-
-#ifndef __ATOMIC_RELAXED
-#define __ATOMIC_RELAXED 0
-#define __ATOMIC_CONSUME 1
-#define __ATOMIC_ACQUIRE 2
-#define __ATOMIC_RELEASE 3
-#define __ATOMIC_ACQ_REL 4
-#define __ATOMIC_SEQ_CST 5
-#endif
-
-inline __device__ int __stronger_order_simt_(int a, int b) {
- if (b == __ATOMIC_SEQ_CST) return __ATOMIC_SEQ_CST;
- if (b == __ATOMIC_RELAXED) return a;
- switch (a) {
- case __ATOMIC_SEQ_CST:
- case __ATOMIC_ACQ_REL: return a;
- case __ATOMIC_CONSUME:
- case __ATOMIC_ACQUIRE:
- if (b != __ATOMIC_ACQUIRE)
- return __ATOMIC_ACQ_REL;
- else
- return __ATOMIC_ACQUIRE;
- case __ATOMIC_RELEASE:
- if (b != __ATOMIC_RELEASE)
- return __ATOMIC_ACQ_REL;
- else
- return __ATOMIC_RELEASE;
- case __ATOMIC_RELAXED: return b;
- default: assert(0);
- }
- return __ATOMIC_SEQ_CST;
-}
-
-/*
- base
-*/
-
-#define DO__atomic_load_simt_(bytes, bits) \
- template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- void __device__ __atomic_load_simt_(const type *ptr, type *ret, \
- int memorder) { \
- int##bits##_t tmp = 0; \
- switch (memorder) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: __simt_load_acquire_##bits(ptr, tmp); break; \
- case __ATOMIC_RELAXED: __simt_load_relaxed_##bits(ptr, tmp); break; \
- default: assert(0); \
- } \
- memcpy(ret, &tmp, bytes); \
- }
-DO__atomic_load_simt_(1, 32) DO__atomic_load_simt_(2, 16)
- DO__atomic_load_simt_(4, 32) DO__atomic_load_simt_(8, 64)
-
- template <class type>
- type __device__ __atomic_load_n_simt_(const type *ptr, int memorder) {
- type ret;
- __atomic_load_simt_(ptr, &ret, memorder);
- return ret;
-}
-
-#define DO__atomic_store_simt_(bytes, bits) \
- template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- void __device__ __atomic_store_simt_(type *ptr, type *val, int memorder) { \
- int##bits##_t tmp = 0; \
- memcpy(&tmp, val, bytes); \
- switch (memorder) { \
- case __ATOMIC_RELEASE: __simt_store_release_##bits(ptr, tmp); break; \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_RELAXED: __simt_store_relaxed_##bits(ptr, tmp); break; \
- default: assert(0); \
- } \
- }
-DO__atomic_store_simt_(1, 32) DO__atomic_store_simt_(2, 16)
- DO__atomic_store_simt_(4, 32) DO__atomic_store_simt_(8, 64)
-
- template <class type>
- void __device__
- __atomic_store_n_simt_(type *ptr, type val, int memorder) {
- __atomic_store_simt_(ptr, &val, memorder);
-}
-
-#define DO__atomic_compare_exchange_simt_(bytes, bits) \
- template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- bool __device__ __atomic_compare_exchange_simt_( \
- type *ptr, type *expected, const type *desired, bool, \
- int success_memorder, int failure_memorder) { \
- int##bits##_t tmp = 0, old = 0, old_tmp; \
- memcpy(&tmp, desired, bytes); \
- memcpy(&old, expected, bytes); \
- old_tmp = old; \
- switch (__stronger_order_simt_(success_memorder, failure_memorder)) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: \
- __simt_cas_acquire_##bits(ptr, old, old_tmp, tmp); \
- break; \
- case __ATOMIC_ACQ_REL: \
- __simt_cas_acq_rel_##bits(ptr, old, old_tmp, tmp); \
- break; \
- case __ATOMIC_RELEASE: \
- __simt_cas_release_##bits(ptr, old, old_tmp, tmp); \
- break; \
- case __ATOMIC_RELAXED: \
- __simt_cas_relaxed_##bits(ptr, old, old_tmp, tmp); \
- break; \
- default: assert(0); \
- } \
- bool const ret = old == old_tmp; \
- memcpy(expected, &old, bytes); \
- return ret; \
- }
-DO__atomic_compare_exchange_simt_(4, 32)
- DO__atomic_compare_exchange_simt_(8, 64)
-
- template <class type, std::enable_if_t<sizeof(type) <= 2, int> = 0>
- bool __device__
- __atomic_compare_exchange_simt_(type *ptr, type *expected,
- const type *desired, bool,
- int success_memorder,
- int failure_memorder) {
- using R = std::conditional_t<std::is_volatile<type>::value, volatile uint32_t,
- uint32_t>;
- auto const aligned = (R *)((intptr_t)ptr & ~(sizeof(uint32_t) - 1));
- auto const offset = uint32_t((intptr_t)ptr & (sizeof(uint32_t) - 1)) * 8;
- auto const mask = ((1 << sizeof(type) * 8) - 1) << offset;
-
- uint32_t old = *expected << offset, old_value;
- while (1) {
- old_value = (old & mask) >> offset;
- if (old_value != *expected) break;
- uint32_t const attempt = (old & ~mask) | (*desired << offset);
- if (__atomic_compare_exchange_simt_(aligned, &old, &attempt, true,
- success_memorder, failure_memorder))
- return true;
- }
- *expected = old_value;
- return false;
-}
-
-template <class type>
-bool __device__ __atomic_compare_exchange_n_simt_(type *ptr, type *expected,
- type desired, bool weak,
- int success_memorder,
- int failure_memorder) {
- return __atomic_compare_exchange_simt_(ptr, expected, &desired, weak,
- success_memorder, failure_memorder);
-}
-
-#define DO__atomic_exchange_simt_(bytes, bits) \
- template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- void __device__ __atomic_exchange_simt_(type *ptr, type *val, type *ret, \
- int memorder) { \
- int##bits##_t tmp = 0; \
- memcpy(&tmp, val, bytes); \
- switch (memorder) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: __simt_exch_acquire_##bits(ptr, tmp, tmp); break; \
- case __ATOMIC_ACQ_REL: __simt_exch_acq_rel_##bits(ptr, tmp, tmp); break; \
- case __ATOMIC_RELEASE: __simt_exch_release_##bits(ptr, tmp, tmp); break; \
- case __ATOMIC_RELAXED: __simt_exch_relaxed_##bits(ptr, tmp, tmp); break; \
- default: assert(0); \
- } \
- memcpy(ret, &tmp, bytes); \
- }
-DO__atomic_exchange_simt_(4, 32) DO__atomic_exchange_simt_(8, 64)
-
- template <class type, std::enable_if_t<sizeof(type) <= 2, int> = 0>
- void __device__
- __atomic_exchange_simt_(type *ptr, type *val, type *ret, int memorder) {
- type expected = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
- while (!__atomic_compare_exchange_simt_(ptr, &expected, val, true, memorder,
- memorder))
- ;
- *ret = expected;
-}
-
-template <class type>
-type __device__ __atomic_exchange_n_simt_(type *ptr, type val, int memorder) {
- type ret;
- __atomic_exchange_simt_(ptr, &val, &ret, memorder);
- return ret;
-}
-
-#define DO__atomic_fetch_add_simt_(bytes, bits) \
- template <class type, class delta, \
- std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- type __device__ __atomic_fetch_add_simt_(type *ptr, delta val, \
- int memorder) { \
- type ret; \
- switch (memorder) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: __simt_add_acquire_##bits(ptr, ret, val); break; \
- case __ATOMIC_ACQ_REL: __simt_add_acq_rel_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELEASE: __simt_add_release_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELAXED: __simt_add_relaxed_##bits(ptr, ret, val); break; \
- default: assert(0); \
- } \
- return ret; \
- }
-DO__atomic_fetch_add_simt_(4, 32) DO__atomic_fetch_add_simt_(8, 64)
-
- template <class type, class delta,
- std::enable_if_t<sizeof(type) <= 2, int> = 0>
- type __device__
- __atomic_fetch_add_simt_(type *ptr, delta val, int memorder) {
- type expected = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
- type const desired = expected + val;
- while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
- memorder, memorder))
- ;
- return expected;
-}
-
-#define DO__atomic_fetch_sub_simt_(bytes, bits) \
- template <class type, class delta, \
- std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- type __device__ __atomic_fetch_sub_simt_(type *ptr, delta val, \
- int memorder) { \
- type ret; \
- switch (memorder) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: __simt_add_acquire_##bits(ptr, ret, -val); break; \
- case __ATOMIC_ACQ_REL: __simt_add_acq_rel_##bits(ptr, ret, -val); break; \
- case __ATOMIC_RELEASE: __simt_add_release_##bits(ptr, ret, -val); break; \
- case __ATOMIC_RELAXED: __simt_add_relaxed_##bits(ptr, ret, -val); break; \
- default: assert(0); \
- } \
- return ret; \
- }
-DO__atomic_fetch_sub_simt_(4, 32) DO__atomic_fetch_sub_simt_(8, 64)
-
- template <class type, class delta,
- std::enable_if_t<sizeof(type) <= 2, int> = 0>
- type __device__
- __atomic_fetch_sub_simt_(type *ptr, delta val, int memorder) {
- type expected = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
- type const desired = expected - val;
- while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
- memorder, memorder))
- ;
- return expected;
-}
-
-#define DO__atomic_fetch_and_simt_(bytes, bits) \
- template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- type __device__ __atomic_fetch_and_simt_(type *ptr, type val, \
- int memorder) { \
- type ret; \
- switch (memorder) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: __simt_and_acquire_##bits(ptr, ret, val); break; \
- case __ATOMIC_ACQ_REL: __simt_and_acq_rel_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELEASE: __simt_and_release_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELAXED: __simt_and_relaxed_##bits(ptr, ret, val); break; \
- default: assert(0); \
- } \
- return ret; \
- }
-DO__atomic_fetch_and_simt_(4, 32) DO__atomic_fetch_and_simt_(8, 64)
-
- template <class type, class delta,
- std::enable_if_t<sizeof(type) <= 2, int> = 0>
- type __device__
- __atomic_fetch_and_simt_(type *ptr, delta val, int memorder) {
- type expected = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
- type const desired = expected & val;
- while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
- memorder, memorder))
- ;
- return expected;
-}
-
-#define DO__atomic_fetch_xor_simt_(bytes, bits) \
- template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- type __device__ __atomic_fetch_xor_simt_(type *ptr, type val, \
- int memorder) { \
- type ret; \
- switch (memorder) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: __simt_xor_acquire_##bits(ptr, ret, val); break; \
- case __ATOMIC_ACQ_REL: __simt_xor_acq_rel_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELEASE: __simt_xor_release_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELAXED: __simt_xor_relaxed_##bits(ptr, ret, val); break; \
- default: assert(0); \
- } \
- return ret; \
- }
-DO__atomic_fetch_xor_simt_(4, 32) DO__atomic_fetch_xor_simt_(8, 64)
-
- template <class type, class delta,
- std::enable_if_t<sizeof(type) <= 2, int> = 0>
- type __device__
- __atomic_fetch_xor_simt_(type *ptr, delta val, int memorder) {
- type expected = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
- type const desired = expected ^ val;
- while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
- memorder, memorder))
- ;
- return expected;
-}
-
-#define DO__atomic_fetch_or_simt_(bytes, bits) \
- template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
- type __device__ __atomic_fetch_or_simt_(type *ptr, type val, int memorder) { \
- type ret; \
- switch (memorder) { \
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); \
- case __ATOMIC_CONSUME: \
- case __ATOMIC_ACQUIRE: __simt_or_acquire_##bits(ptr, ret, val); break; \
- case __ATOMIC_ACQ_REL: __simt_or_acq_rel_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELEASE: __simt_or_release_##bits(ptr, ret, val); break; \
- case __ATOMIC_RELAXED: __simt_or_relaxed_##bits(ptr, ret, val); break; \
- default: assert(0); \
- } \
- return ret; \
- }
-DO__atomic_fetch_or_simt_(4, 32) DO__atomic_fetch_or_simt_(8, 64)
-
- template <class type, class delta,
- std::enable_if_t<sizeof(type) <= 2, int> = 0>
- type __device__
- __atomic_fetch_or_simt_(type *ptr, delta val, int memorder) {
- type expected = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
- type const desired = expected | val;
- while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
- memorder, memorder))
- ;
- return expected;
-}
-
-template <class type>
-inline bool __device__ __atomic_test_and_set_simt_(type *ptr, int memorder) {
- return __atomic_exchange_n_simt_((char *)ptr, (char)1, memorder) == 1;
-}
-template <class type>
-inline void __device__ __atomic_clear_simt_(type *ptr, int memorder) {
- return __atomic_store_n_simt_((char *)ptr, (char)0, memorder);
-}
-
-inline constexpr __device__ bool __atomic_always_lock_free_simt_(size_t size,
- void *) {
- return size <= 8;
-}
-inline __device__ bool __atomic_is_lock_free_simt_(size_t size, void *ptr) {
- return __atomic_always_lock_free_simt_(size, ptr);
-}
-
-/*
- fences
-*/
-
-inline void __device__ __atomic_thread_fence_simt(int memorder) {
- switch (memorder) {
- case __ATOMIC_SEQ_CST: __simt_fence_sc_(); break;
- case __ATOMIC_CONSUME:
- case __ATOMIC_ACQUIRE:
- case __ATOMIC_ACQ_REL:
- case __ATOMIC_RELEASE: __simt_fence_(); break;
- case __ATOMIC_RELAXED: break;
- default: assert(0);
- }
-}
-inline void __device__ __atomic_signal_fence_simt(int memorder) {
- __atomic_thread_fence_simt(memorder);
-}
-
-/*
- non-volatile
-*/
-
-template <class type>
-type __device__ __atomic_load_n_simt(const type *ptr, int memorder) {
- return __atomic_load_n_simt_(const_cast<const type *>(ptr), memorder);
-}
-template <class type>
-void __device__ __atomic_load_simt(const type *ptr, type *ret, int memorder) {
- __atomic_load_simt_(const_cast<const type *>(ptr), ret, memorder);
-}
-template <class type>
-void __device__ __atomic_store_n_simt(type *ptr, type val, int memorder) {
- __atomic_store_n_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-void __device__ __atomic_store_simt(type *ptr, type *val, int memorder) {
- __atomic_store_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_exchange_n_simt(type *ptr, type val, int memorder) {
- return __atomic_exchange_n_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-void __device__ __atomic_exchange_simt(type *ptr, type *val, type *ret,
- int memorder) {
- __atomic_exchange_simt_(const_cast<type *>(ptr), val, ret, memorder);
-}
-template <class type>
-bool __device__ __atomic_compare_exchange_n_simt(type *ptr, type *expected,
- type desired, bool weak,
- int success_memorder,
- int failure_memorder) {
- return __atomic_compare_exchange_n_simt_(const_cast<type *>(ptr), expected,
- desired, weak, success_memorder,
- failure_memorder);
-}
-template <class type>
-bool __device__ __atomic_compare_exchange_simt(type *ptr, type *expected,
- type *desired, bool weak,
- int success_memorder,
- int failure_memorder) {
- return __atomic_compare_exchange_simt_(const_cast<type *>(ptr), expected,
- desired, weak, success_memorder,
- failure_memorder);
-}
-template <class type, class delta>
-type __device__ __atomic_fetch_add_simt(type *ptr, delta val, int memorder) {
- return __atomic_fetch_add_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type, class delta>
-type __device__ __atomic_fetch_sub_simt(type *ptr, delta val, int memorder) {
- return __atomic_fetch_sub_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_fetch_and_simt(type *ptr, type val, int memorder) {
- return __atomic_fetch_and_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_fetch_xor_simt(type *ptr, type val, int memorder) {
- return __atomic_fetch_xor_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_fetch_or_simt(type *ptr, type val, int memorder) {
- return __atomic_fetch_or_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-bool __device__ __atomic_test_and_set_simt(void *ptr, int memorder) {
- return __atomic_test_and_set_simt_(const_cast<void *>(ptr), memorder);
-}
-template <class type>
-void __device__ __atomic_clear_simt(void *ptr, int memorder) {
- return __atomic_clear_simt_(const_cast<void *>(ptr), memorder);
-}
-inline bool __device__ __atomic_always_lock_free_simt(size_t size, void *ptr) {
- return __atomic_always_lock_free_simt_(size, const_cast<void *>(ptr));
-}
-inline bool __device__ __atomic_is_lock_free_simt(size_t size, void *ptr) {
- return __atomic_is_lock_free_simt_(size, const_cast<void *>(ptr));
-}
-
-/*
- volatile
-*/
-
-template <class type>
-type __device__ __atomic_load_n_simt(const volatile type *ptr, int memorder) {
- return __atomic_load_n_simt_(const_cast<const type *>(ptr), memorder);
-}
-template <class type>
-void __device__ __atomic_load_simt(const volatile type *ptr, type *ret,
- int memorder) {
- __atomic_load_simt_(const_cast<const type *>(ptr), ret, memorder);
-}
-template <class type>
-void __device__ __atomic_store_n_simt(volatile type *ptr, type val,
- int memorder) {
- __atomic_store_n_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-void __device__ __atomic_store_simt(volatile type *ptr, type *val,
- int memorder) {
- __atomic_store_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_exchange_n_simt(volatile type *ptr, type val,
- int memorder) {
- return __atomic_exchange_n_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-void __device__ __atomic_exchange_simt(volatile type *ptr, type *val, type *ret,
- int memorder) {
- __atomic_exchange_simt_(const_cast<type *>(ptr), val, ret, memorder);
-}
-template <class type>
-bool __device__ __atomic_compare_exchange_n_simt(volatile type *ptr,
- type *expected, type desired,
- bool weak,
- int success_memorder,
- int failure_memorder) {
- return __atomic_compare_exchange_n_simt_(const_cast<type *>(ptr), expected,
- desired, weak, success_memorder,
- failure_memorder);
-}
-template <class type>
-bool __device__ __atomic_compare_exchange_simt(volatile type *ptr,
- type *expected, type *desired,
- bool weak, int success_memorder,
- int failure_memorder) {
- return __atomic_compare_exchange_simt_(const_cast<type *>(ptr), expected,
- desired, weak, success_memorder,
- failure_memorder);
-}
-template <class type, class delta>
-type __device__ __atomic_fetch_add_simt(volatile type *ptr, delta val,
- int memorder) {
- return __atomic_fetch_add_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type, class delta>
-type __device__ __atomic_fetch_sub_simt(volatile type *ptr, delta val,
- int memorder) {
- return __atomic_fetch_sub_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_fetch_and_simt(volatile type *ptr, type val,
- int memorder) {
- return __atomic_fetch_and_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_fetch_xor_simt(volatile type *ptr, type val,
- int memorder) {
- return __atomic_fetch_xor_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-type __device__ __atomic_fetch_or_simt(volatile type *ptr, type val,
- int memorder) {
- return __atomic_fetch_or_simt_(const_cast<type *>(ptr), val, memorder);
-}
-template <class type>
-bool __device__ __atomic_test_and_set_simt(volatile void *ptr, int memorder) {
- return __atomic_test_and_set_simt_(const_cast<void *>(ptr), memorder);
-}
-template <class type>
-void __device__ __atomic_clear_simt(volatile void *ptr, int memorder) {
- return __atomic_clear_simt_(const_cast<void *>(ptr), memorder);
-}
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif //_SIMT_DETAILS_CONFIG
-
-#ifndef KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
-/*
- builtins
-*/
-
-#define __atomic_load_n __atomic_load_n_simt
-#define __atomic_load __atomic_load_simt
-#define __atomic_store_n __atomic_store_n_simt
-#define __atomic_store __atomic_store_simt
-#define __atomic_exchange_n __atomic_exchange_n_simt
-#define __atomic_exchange __atomic_exchange_simt
-#define __atomic_compare_exchange_n __atomic_compare_exchange_n_simt
-#define __atomic_compare_exchange __atomic_compare_exchange_simt
-#define __atomic_fetch_add __atomic_fetch_add_simt
-#define __atomic_fetch_sub __atomic_fetch_sub_simt
-#define __atomic_fetch_and __atomic_fetch_and_simt
-#define __atomic_fetch_xor __atomic_fetch_xor_simt
-#define __atomic_fetch_or __atomic_fetch_or_simt
-#define __atomic_test_and_set __atomic_test_and_set_simt
-#define __atomic_clear __atomic_clear_simt
-#define __atomic_always_lock_free __atomic_always_lock_free_simt
-#define __atomic_is_lock_free __atomic_is_lock_free_simt
-#define __atomic_thread_fence __atomic_thread_fence_simt
-#define __atomic_signal_fence __atomic_signal_fence_simt
-
-#define KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
-
-#endif //__CUDA_ARCH__ && KOKKOS_ENABLE_CUDA_ASM_ATOMICS
-#endif // KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifdef KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
-
-#undef __atomic_load_n
-#undef __atomic_load
-#undef __atomic_store_n
-#undef __atomic_store
-#undef __atomic_exchange_n
-#undef __atomic_exchange
-#undef __atomic_compare_exchange_n
-#undef __atomic_compare_exchange
-#undef __atomic_fetch_add
-#undef __atomic_fetch_sub
-#undef __atomic_fetch_and
-#undef __atomic_fetch_xor
-#undef __atomic_fetch_or
-#undef __atomic_test_and_set
-#undef __atomic_clear
-#undef __atomic_always_lock_free
-#undef __atomic_is_lock_free
-#undef __atomic_thread_fence
-#undef __atomic_signal_fence
-
-#undef KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
-
-#endif // KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_CUDA_ERROR_HPP
-#define KOKKOS_CUDA_ERROR_HPP
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_CUDA
-
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_Profiling.hpp>
-#include <iosfwd>
-
-namespace Kokkos {
-namespace Impl {
-
-void cuda_stream_synchronize(
- const cudaStream_t stream,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases reason,
- const std::string& name);
-void cuda_device_synchronize(const std::string& name);
-void cuda_stream_synchronize(const cudaStream_t stream,
- const std::string& name);
-
-[[noreturn]] void cuda_internal_error_throw(cudaError e, const char* name,
- const char* file = nullptr,
- const int line = 0);
-
-#ifndef KOKKOS_COMPILER_NVHPC
-[[noreturn]]
-#endif
- void cuda_internal_error_abort(cudaError e, const char* name,
- const char* file = nullptr,
- const int line = 0);
-
-inline void cuda_internal_safe_call(cudaError e, const char* name,
- const char* file = nullptr,
- const int line = 0) {
- // 1. Success -> normal continuation.
- // 2. Error codes for which, to continue using CUDA, the process must be
- // terminated and relaunched -> call abort on the host-side.
- // 3. Any other error code -> throw a runtime error.
- switch (e) {
- case cudaSuccess: break;
- case cudaErrorIllegalAddress:
- case cudaErrorAssert:
- case cudaErrorHardwareStackError:
- case cudaErrorIllegalInstruction:
- case cudaErrorMisalignedAddress:
- case cudaErrorInvalidAddressSpace:
- case cudaErrorInvalidPc:
- case cudaErrorLaunchFailure:
- cuda_internal_error_abort(e, name, file, line);
- break;
- default: cuda_internal_error_throw(e, name, file, line); break;
- }
-}
-
-#define KOKKOS_IMPL_CUDA_SAFE_CALL(call) \
- Kokkos::Impl::cuda_internal_safe_call(call, #call, __FILE__, __LINE__)
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-
-KOKKOS_DEPRECATED
-inline void cuda_internal_safe_call_deprecated(cudaError e, const char* name,
- const char* file = nullptr,
- const int line = 0) {
- cuda_internal_safe_call(e, name, file, line);
-}
-
-#define CUDA_SAFE_CALL(call) \
- Kokkos::Impl::cuda_internal_safe_call_deprecated(call, #call, __FILE__, \
- __LINE__)
-
-#endif
-
-} // namespace Impl
-
-namespace Experimental {
-
-class CudaRawMemoryAllocationFailure : public RawMemoryAllocationFailure {
- private:
- using base_t = RawMemoryAllocationFailure;
-
- cudaError_t m_error_code = cudaSuccess;
-
- static FailureMode get_failure_mode(cudaError_t error_code) {
- switch (error_code) {
- case cudaErrorMemoryAllocation: return FailureMode::OutOfMemoryError;
- case cudaErrorInvalidValue: return FailureMode::InvalidAllocationSize;
- // TODO handle cudaErrorNotSupported for cudaMallocManaged
- default: return FailureMode::Unknown;
- }
- }
-
- public:
- // using base_t::base_t;
- // would trigger
- //
- // error: cannot determine the exception specification of the default
- // constructor due to a circular dependency
- //
- // using NVCC 9.1 and gcc 7.4
- CudaRawMemoryAllocationFailure(
- size_t arg_attempted_size, size_t arg_attempted_alignment,
- FailureMode arg_failure_mode = FailureMode::OutOfMemoryError,
- AllocationMechanism arg_mechanism =
- AllocationMechanism::StdMalloc) noexcept
- : base_t(arg_attempted_size, arg_attempted_alignment, arg_failure_mode,
- arg_mechanism) {}
-
- CudaRawMemoryAllocationFailure(size_t arg_attempted_size,
- cudaError_t arg_error_code,
- AllocationMechanism arg_mechanism) noexcept
- : base_t(arg_attempted_size, /* CudaSpace doesn't handle alignment? */ 1,
- get_failure_mode(arg_error_code), arg_mechanism),
- m_error_code(arg_error_code) {}
-
- void append_additional_error_information(std::ostream& o) const override;
-};
-
-} // end namespace Experimental
-
-} // namespace Kokkos
-
-#endif // KOKKOS_ENABLE_CUDA
-#endif // KOKKOS_CUDA_ERROR_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_CUDA_HALF_IMPL_TYPE_HPP_
-#define KOKKOS_CUDA_HALF_IMPL_TYPE_HPP_
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_CUDA
-#if !(defined(KOKKOS_COMPILER_CLANG) && KOKKOS_COMPILER_CLANG < 900) && \
- !(defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL50) || \
- defined(KOKKOS_ARCH_MAXWELL52))
-#include <cuda_fp16.h>
-#if (CUDA_VERSION >= 11000)
-#include <cuda_bf16.h>
-#endif // CUDA_VERSION >= 11000
-
-#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
-// Make sure no one else tries to define half_t
-#define KOKKOS_IMPL_HALF_TYPE_DEFINED
-#define KOKKOS_IMPL_CUDA_HALF_TYPE_DEFINED
-
-namespace Kokkos {
-namespace Impl {
-struct half_impl_t {
- using type = __half;
-};
-#if (CUDA_VERSION >= 11000)
-#define KOKKOS_IMPL_BHALF_TYPE_DEFINED
-struct bhalf_impl_t {
- using type = __nv_bfloat16;
-};
-#endif // CUDA_VERSION >= 11000
-} // namespace Impl
-} // namespace Kokkos
-#endif // KOKKOS_IMPL_HALF_TYPE_DEFINED
-#endif // Disables for half_t on cuda:
- // Clang/8||KEPLER30||KEPLER32||KEPLER37||MAXWELL50||MAXWELL52
-#endif // KOKKOS_ENABLE_CUDA
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/*--------------------------------------------------------------------------*/
-/* Kokkos interfaces */
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_CUDA
-
-#include <Kokkos_Core.hpp>
-
-#include <Cuda/Kokkos_Cuda_Error.hpp>
-#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
-#include <Cuda/Kokkos_Cuda_Instance.hpp>
-#include <Cuda/Kokkos_Cuda_Locks.hpp>
-#include <Cuda/Kokkos_Cuda_UniqueToken.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_Tools.hpp>
-#include <impl/Kokkos_DeviceManagement.hpp>
-#include <impl/Kokkos_ExecSpaceManager.hpp>
-
-/*--------------------------------------------------------------------------*/
-/* Standard 'C' libraries */
-#include <cstdlib>
-
-/* Standard 'C++' libraries */
-#include <vector>
-#include <iostream>
-#include <sstream>
-#include <string>
-
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
-namespace Kokkos {
-namespace Impl {
-
-bool CudaInternal::kokkos_impl_cuda_use_serial_execution_v = false;
-
-void CudaInternal::cuda_set_serial_execution(bool val) {
- CudaInternal::kokkos_impl_cuda_use_serial_execution_v = val;
-}
-bool CudaInternal::cuda_use_serial_execution() {
- return CudaInternal::kokkos_impl_cuda_use_serial_execution_v;
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-void kokkos_impl_cuda_set_serial_execution(bool val) {
- Kokkos::Impl::CudaInternal::cuda_set_serial_execution(val);
-}
-bool kokkos_impl_cuda_use_serial_execution() {
- return Kokkos::Impl::CudaInternal::cuda_use_serial_execution();
-}
-#endif
-
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
-
-__device__ __constant__ unsigned long kokkos_impl_cuda_constant_memory_buffer
- [Kokkos::Impl::CudaTraits::ConstantMemoryUsage / sizeof(unsigned long)];
-
-#endif
-
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Impl {
-
-namespace {
-
-__global__ void query_cuda_kernel_arch(int *d_arch) {
-#ifdef _NVHPC_CUDA
- *d_arch = __builtin_current_device_sm() * 10;
-#else
-#if defined(__CUDA_ARCH__)
- *d_arch = __CUDA_ARCH__;
-#else
- *d_arch = 0;
-#endif
-#endif
-}
-
-/** Query what compute capability is actually launched to the device: */
-int cuda_kernel_arch() {
- int arch = 0;
- int *d_arch = nullptr;
-
- cudaMalloc(reinterpret_cast<void **>(&d_arch), sizeof(int));
- cudaMemcpy(d_arch, &arch, sizeof(int), cudaMemcpyDefault);
-
- query_cuda_kernel_arch<<<1, 1>>>(d_arch);
-
- cudaMemcpy(&arch, d_arch, sizeof(int), cudaMemcpyDefault);
- cudaFree(d_arch);
- return arch;
-}
-
-} // namespace
-
-Kokkos::View<uint32_t *, Kokkos::CudaSpace> cuda_global_unique_token_locks(
- bool deallocate) {
- static Kokkos::View<uint32_t *, Kokkos::CudaSpace> locks =
- Kokkos::View<uint32_t *, Kokkos::CudaSpace>();
- if (!deallocate && locks.extent(0) == 0)
- locks = Kokkos::View<uint32_t *, Kokkos::CudaSpace>(
- "Kokkos::UniqueToken<Cuda>::m_locks", Kokkos::Cuda().concurrency());
- if (deallocate) locks = Kokkos::View<uint32_t *, Kokkos::CudaSpace>();
- return locks;
-}
-
-void cuda_device_synchronize(const std::string &name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
- name,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- GlobalDeviceSynchronization,
- []() { // TODO: correct device ID
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
- });
-}
-
-void cuda_stream_synchronize(const cudaStream_t stream, const CudaInternal *ptr,
- const std::string &name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
- name,
- Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
- ptr->impl_get_instance_id()},
- [&]() { // TODO: correct device ID
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
- });
-}
-
-void cuda_stream_synchronize(
- const cudaStream_t stream,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases reason,
- const std::string &name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
- name, reason, [&]() { // TODO: correct device ID
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
- });
-}
-
-void cuda_internal_error_throw(cudaError e, const char *name, const char *file,
- const int line) {
- std::ostringstream out;
- out << name << " error( " << cudaGetErrorName(e)
- << "): " << cudaGetErrorString(e);
- if (file) {
- out << " " << file << ":" << line;
- }
- throw_runtime_exception(out.str());
-}
-
-void cuda_internal_error_abort(cudaError e, const char *name, const char *file,
- const int line) {
- std::ostringstream out;
- out << name << " error( " << cudaGetErrorName(e)
- << "): " << cudaGetErrorString(e);
- if (file) {
- out << " " << file << ":" << line;
- }
- abort(out.str().c_str());
-}
-
-//----------------------------------------------------------------------------
-// Some significant cuda device properties:
-//
-// cudaDeviceProp::name : Text label for device
-// cudaDeviceProp::major : Device major number
-// cudaDeviceProp::minor : Device minor number
-// cudaDeviceProp::warpSize : number of threads per warp
-// cudaDeviceProp::multiProcessorCount : number of multiprocessors
-// cudaDeviceProp::sharedMemPerBlock : capacity of shared memory per block
-// cudaDeviceProp::totalConstMem : capacity of constant memory
-// cudaDeviceProp::totalGlobalMem : capacity of global memory
-// cudaDeviceProp::maxGridSize[3] : maximum grid size
-
-//
-// Section 4.4.2.4 of the CUDA Toolkit Reference Manual
-//
-// struct cudaDeviceProp {
-// char name[256];
-// size_t totalGlobalMem;
-// size_t sharedMemPerBlock;
-// int regsPerBlock;
-// int warpSize;
-// size_t memPitch;
-// int maxThreadsPerBlock;
-// int maxThreadsDim[3];
-// int maxGridSize[3];
-// size_t totalConstMem;
-// int major;
-// int minor;
-// int clockRate;
-// size_t textureAlignment;
-// int deviceOverlap;
-// int multiProcessorCount;
-// int kernelExecTimeoutEnabled;
-// int integrated;
-// int canMapHostMemory;
-// int computeMode;
-// int concurrentKernels;
-// int ECCEnabled;
-// int pciBusID;
-// int pciDeviceID;
-// int tccDriver;
-// int asyncEngineCount;
-// int unifiedAddressing;
-// int memoryClockRate;
-// int memoryBusWidth;
-// int l2CacheSize;
-// int maxThreadsPerMultiProcessor;
-// };
-
-namespace {
-
-class CudaInternalDevices {
- public:
- enum { MAXIMUM_DEVICE_COUNT = 64 };
- struct cudaDeviceProp m_cudaProp[MAXIMUM_DEVICE_COUNT];
- int m_cudaDevCount;
-
- CudaInternalDevices();
-
- static const CudaInternalDevices &singleton();
-};
-
-CudaInternalDevices::CudaInternalDevices() {
- // See 'cudaSetDeviceFlags' for host-device thread interaction
- // Section 4.4.2.6 of the CUDA Toolkit Reference Manual
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceCount(&m_cudaDevCount));
-
- if (m_cudaDevCount > MAXIMUM_DEVICE_COUNT) {
- Kokkos::abort(
- "Sorry, you have more GPUs per node than we thought anybody would ever "
- "have. Please report this to github.com/kokkos/kokkos.");
- }
- for (int i = 0; i < m_cudaDevCount; ++i) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceProperties(m_cudaProp + i, i));
- }
-}
-
-const CudaInternalDevices &CudaInternalDevices::singleton() {
- static CudaInternalDevices self;
- return self;
-}
-
-} // namespace
-
-unsigned long *CudaInternal::constantMemHostStaging = nullptr;
-cudaEvent_t CudaInternal::constantMemReusable = nullptr;
-std::mutex CudaInternal::constantMemMutex;
-
-//----------------------------------------------------------------------------
-
-void CudaInternal::print_configuration(std::ostream &s) const {
- const CudaInternalDevices &dev_info = CudaInternalDevices::singleton();
-
-#if defined(KOKKOS_ENABLE_CUDA)
- s << "macro KOKKOS_ENABLE_CUDA : defined\n";
-#endif
-#if defined(CUDA_VERSION)
- s << "macro CUDA_VERSION = " << CUDA_VERSION << " = version "
- << CUDA_VERSION / 1000 << "." << (CUDA_VERSION % 1000) / 10 << '\n';
-#endif
-
- for (int i = 0; i < dev_info.m_cudaDevCount; ++i) {
- s << "Kokkos::Cuda[ " << i << " ] " << dev_info.m_cudaProp[i].name
- << " capability " << dev_info.m_cudaProp[i].major << "."
- << dev_info.m_cudaProp[i].minor << ", Total Global Memory: "
- << human_memory_size(dev_info.m_cudaProp[i].totalGlobalMem)
- << ", Shared Memory per Block: "
- << human_memory_size(dev_info.m_cudaProp[i].sharedMemPerBlock);
- if (m_cudaDev == i) s << " : Selected";
- s << std::endl;
- }
-}
-
-//----------------------------------------------------------------------------
-
-CudaInternal::~CudaInternal() {
- if (m_stream || m_scratchSpace || m_scratchFlags || m_scratchUnified) {
- std::cerr << "Kokkos::Cuda ERROR: Failed to call Kokkos::Cuda::finalize()"
- << std::endl;
- }
-
- m_cudaDev = -1;
- m_cudaArch = -1;
- m_multiProcCount = 0;
- m_maxWarpCount = 0;
- m_maxBlock = {0, 0, 0};
- m_maxSharedWords = 0;
- m_maxConcurrency = 0;
- m_scratchSpaceCount = 0;
- m_scratchFlagsCount = 0;
- m_scratchUnifiedCount = 0;
- m_scratchUnifiedSupported = 0;
- m_streamCount = 0;
- m_scratchSpace = nullptr;
- m_scratchFlags = nullptr;
- m_scratchUnified = nullptr;
- m_stream = nullptr;
- for (int i = 0; i < m_n_team_scratch; ++i) {
- m_team_scratch_current_size[i] = 0;
- m_team_scratch_ptr[i] = nullptr;
- }
-}
-
-int CudaInternal::verify_is_initialized(const char *const label) const {
- if (m_cudaDev < 0) {
- Kokkos::abort((std::string("Kokkos::Cuda::") + label +
- " : ERROR device not initialized\n")
- .c_str());
- }
- return 0 <= m_cudaDev;
-}
-uint32_t CudaInternal::impl_get_instance_id() const { return m_instance_id; }
-CudaInternal &CudaInternal::singleton() {
- static CudaInternal self;
- return self;
-}
-void CudaInternal::fence(const std::string &name) const {
- Impl::cuda_stream_synchronize(m_stream, this, name);
-}
-void CudaInternal::fence() const {
- fence("Kokkos::CudaInternal::fence(): Unnamed Instance Fence");
-}
-
-void CudaInternal::initialize(int cuda_device_id, cudaStream_t stream,
- bool manage_stream) {
- if (was_finalized)
- Kokkos::abort("Calling Cuda::initialize after Cuda::finalize is illegal\n");
- was_initialized = true;
- if (is_initialized()) return;
-
- enum { WordSize = sizeof(size_type) };
-
-#ifndef KOKKOS_IMPL_TURN_OFF_CUDA_HOST_INIT_CHECK
- if (!HostSpace::execution_space::impl_is_initialized()) {
- const std::string msg(
- "Cuda::initialize ERROR : HostSpace::execution_space is not "
- "initialized");
- throw_runtime_exception(msg);
- }
-#endif
-
- const CudaInternalDevices &dev_info = CudaInternalDevices::singleton();
-
- const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
-
- const bool ok_id =
- 0 <= cuda_device_id && cuda_device_id < dev_info.m_cudaDevCount;
-
- // Need device capability 3.0 or better
-
- const bool ok_dev =
- ok_id && (3 <= dev_info.m_cudaProp[cuda_device_id].major &&
- 0 <= dev_info.m_cudaProp[cuda_device_id].minor);
-
- if (ok_init && ok_dev) {
- const struct cudaDeviceProp &cudaProp = dev_info.m_cudaProp[cuda_device_id];
-
- m_cudaDev = cuda_device_id;
- m_deviceProp = cudaProp;
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_cudaDev));
- Kokkos::Impl::cuda_device_synchronize(
- "Kokkos::CudaInternal::initialize: Fence on space initialization");
-
- // Query what compute capability architecture a kernel executes:
- m_cudaArch = cuda_kernel_arch();
-
- if (m_cudaArch == 0) {
- std::stringstream ss;
- ss << "Kokkos::Cuda::initialize ERROR: likely mismatch of architecture\n";
- std::string msg = ss.str();
- Kokkos::abort(msg.c_str());
- }
-
- int compiled_major = m_cudaArch / 100;
- int compiled_minor = (m_cudaArch % 100) / 10;
-
- if (compiled_major != cudaProp.major || compiled_minor > cudaProp.minor) {
- std::stringstream ss;
- ss << "Kokkos::Cuda::initialize ERROR: running kernels compiled for "
- "compute capability "
- << compiled_major << "." << compiled_minor
- << " on device with compute capability " << cudaProp.major << "."
- << cudaProp.minor << " is not supported by CUDA!\n";
- std::string msg = ss.str();
- Kokkos::abort(msg.c_str());
- }
- if (Kokkos::show_warnings() && (compiled_major != cudaProp.major ||
- compiled_minor != cudaProp.minor)) {
- std::cerr << "Kokkos::Cuda::initialize WARNING: running kernels compiled "
- "for compute capability "
- << compiled_major << "." << compiled_minor
- << " on device with compute capability " << cudaProp.major
- << "." << cudaProp.minor
- << " , this will likely reduce potential performance."
- << std::endl;
- }
-
- // number of multiprocessors
-
- m_multiProcCount = cudaProp.multiProcessorCount;
-
- //----------------------------------
- // Maximum number of warps,
- // at most one warp per thread in a warp for reduction.
-
- m_maxWarpCount = cudaProp.maxThreadsPerBlock / Impl::CudaTraits::WarpSize;
-
- if (Impl::CudaTraits::WarpSize < m_maxWarpCount) {
- m_maxWarpCount = Impl::CudaTraits::WarpSize;
- }
-
- m_maxSharedWords = cudaProp.sharedMemPerBlock / WordSize;
-
- //----------------------------------
- // Maximum number of blocks:
-
- m_maxBlock[0] = cudaProp.maxGridSize[0];
- m_maxBlock[1] = cudaProp.maxGridSize[1];
- m_maxBlock[2] = cudaProp.maxGridSize[2];
-
- m_shmemPerSM = cudaProp.sharedMemPerMultiprocessor;
- m_maxShmemPerBlock = cudaProp.sharedMemPerBlock;
- m_regsPerSM = cudaProp.regsPerMultiprocessor;
- m_maxBlocksPerSM =
- m_cudaArch < 500
- ? 16
- : (m_cudaArch < 750 ? 32 : (m_cudaArch == 750 ? 16 : 32));
- m_maxThreadsPerSM = cudaProp.maxThreadsPerMultiProcessor;
- m_maxThreadsPerBlock = cudaProp.maxThreadsPerBlock;
-
- //----------------------------------
-
- m_scratchUnifiedSupported = cudaProp.unifiedAddressing;
-
- if (Kokkos::show_warnings() && !m_scratchUnifiedSupported) {
- std::cerr << "Kokkos::Cuda device " << cudaProp.name << " capability "
- << cudaProp.major << "." << cudaProp.minor
- << " does not support unified virtual address space"
- << std::endl;
- }
-
- //----------------------------------
- // Multiblock reduction uses scratch flags for counters
- // and scratch space for partial reduction values.
- // Allocate some initial space. This will grow as needed.
-
- {
- const unsigned reduce_block_count =
- m_maxWarpCount * Impl::CudaTraits::WarpSize;
-
- (void)scratch_unified(16 * sizeof(size_type));
- (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type));
- (void)scratch_space(reduce_block_count * 16 * sizeof(size_type));
- }
- //----------------------------------
- // Concurrent bitset for obtaining unique tokens from within
- // an executing kernel.
- {
- m_maxConcurrency = m_maxThreadsPerSM * cudaProp.multiProcessorCount;
-
- const int32_t buffer_bound =
- Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);
-
- // Allocate and initialize uint32_t[ buffer_bound ]
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
-
- Record *const r =
- Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchBitset",
- sizeof(uint32_t) * buffer_bound);
-
- Record::increment(r);
- }
- //----------------------------------
-
- } else {
- std::ostringstream msg;
- msg << "Kokkos::Cuda::initialize(" << cuda_device_id << ") FAILED";
-
- if (!ok_init) {
- msg << " : Already initialized";
- }
- if (!ok_id) {
- msg << " : Device identifier out of range "
- << "[0.." << dev_info.m_cudaDevCount << "]";
- } else if (!ok_dev) {
- msg << " : Device ";
- msg << dev_info.m_cudaProp[cuda_device_id].major;
- msg << ".";
- msg << dev_info.m_cudaProp[cuda_device_id].minor;
- msg << " has insufficient capability, required 3.0 or better";
- }
- Kokkos::Impl::throw_runtime_exception(msg.str());
- }
-
-#ifdef KOKKOS_ENABLE_CUDA_UVM
- const char *env_force_device_alloc =
- getenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC");
- bool force_device_alloc;
- if (env_force_device_alloc == nullptr)
- force_device_alloc = false;
- else
- force_device_alloc = std::stoi(env_force_device_alloc) != 0;
-
- const char *env_visible_devices = getenv("CUDA_VISIBLE_DEVICES");
- bool visible_devices_one = true;
- if (env_visible_devices == nullptr) visible_devices_one = false;
-
- if (Kokkos::show_warnings() &&
- (!visible_devices_one && !force_device_alloc)) {
- std::cerr << R"warning(
-Kokkos::Cuda::initialize WARNING: Cuda is allocating into UVMSpace by default
- without setting CUDA_MANAGED_FORCE_DEVICE_ALLOC=1 or
- setting CUDA_VISIBLE_DEVICES.
- This could on multi GPU systems lead to severe performance"
- penalties.)warning"
- << std::endl;
- }
-#endif
-
-#ifdef KOKKOS_ENABLE_PRE_CUDA_10_DEPRECATION_API
- cudaThreadSetCacheConfig(cudaFuncCachePreferShared);
-#else
- cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
-#endif
-
- // Init the array for used for arbitrarily sized atomics
- if (stream == nullptr) Impl::initialize_host_cuda_lock_arrays();
-
- // Allocate a staging buffer for constant mem in pinned host memory
- // and an event to avoid overwriting driver for previous kernel launches
- if (stream == nullptr) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMallocHost(reinterpret_cast<void **>(&constantMemHostStaging),
- CudaTraits::ConstantMemoryUsage));
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaEventCreate(&constantMemReusable));
- }
-
- m_stream = stream;
- m_manage_stream = manage_stream;
- for (int i = 0; i < m_n_team_scratch; ++i) {
- m_team_scratch_current_size[i] = 0;
- m_team_scratch_ptr[i] = nullptr;
- }
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMalloc(&m_scratch_locks, sizeof(int32_t) * m_maxConcurrency));
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMemset(m_scratch_locks, 0, sizeof(int32_t) * m_maxConcurrency));
-}
-
-//----------------------------------------------------------------------------
-
-using ScratchGrain = Cuda::size_type[Impl::CudaTraits::WarpSize];
-enum { sizeScratchGrain = sizeof(ScratchGrain) };
-
-Cuda::size_type *CudaInternal::scratch_flags(const std::size_t size) const {
- if (verify_is_initialized("scratch_flags") &&
- m_scratchFlagsCount * sizeScratchGrain < size) {
- m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
-
- if (m_scratchFlags) Record::decrement(Record::get_record(m_scratchFlags));
-
- Record *const r =
- Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchFlags",
- (sizeof(ScratchGrain) * m_scratchFlagsCount));
-
- Record::increment(r);
-
- m_scratchFlags = reinterpret_cast<size_type *>(r->data());
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMemset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain));
- }
-
- return m_scratchFlags;
-}
-
-Cuda::size_type *CudaInternal::scratch_space(const std::size_t size) const {
- if (verify_is_initialized("scratch_space") &&
- m_scratchSpaceCount * sizeScratchGrain < size) {
- m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
-
- if (m_scratchSpace) Record::decrement(Record::get_record(m_scratchSpace));
-
- Record *const r =
- Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchSpace",
- (sizeof(ScratchGrain) * m_scratchSpaceCount));
-
- Record::increment(r);
-
- m_scratchSpace = reinterpret_cast<size_type *>(r->data());
- }
-
- return m_scratchSpace;
-}
-
-Cuda::size_type *CudaInternal::scratch_unified(const std::size_t size) const {
- if (verify_is_initialized("scratch_unified") && m_scratchUnifiedSupported &&
- m_scratchUnifiedCount * sizeScratchGrain < size) {
- m_scratchUnifiedCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>;
-
- if (m_scratchUnified)
- Record::decrement(Record::get_record(m_scratchUnified));
-
- Record *const r = Record::allocate(
- Kokkos::CudaHostPinnedSpace(), "Kokkos::InternalScratchUnified",
- (sizeof(ScratchGrain) * m_scratchUnifiedCount));
-
- Record::increment(r);
-
- m_scratchUnified = reinterpret_cast<size_type *>(r->data());
- }
-
- return m_scratchUnified;
-}
-
-Cuda::size_type *CudaInternal::scratch_functor(const std::size_t size) const {
- if (verify_is_initialized("scratch_functor") && m_scratchFunctorSize < size) {
- m_scratchFunctorSize = size;
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
-
- if (m_scratchFunctor)
- Record::decrement(Record::get_record(m_scratchFunctor));
-
- Record *const r =
- Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchFunctor",
- m_scratchFunctorSize);
-
- Record::increment(r);
-
- m_scratchFunctor = reinterpret_cast<size_type *>(r->data());
- }
-
- return m_scratchFunctor;
-}
-
-std::pair<void *, int> CudaInternal::resize_team_scratch_space(
- std::int64_t bytes, bool force_shrink) {
- // Multiple ParallelFor/Reduce Teams can call this function at the same time
- // and invalidate the m_team_scratch_ptr. We use a pool to avoid any race
- // condition.
-
- int current_team_scratch = 0;
- int zero = 0;
- int one = 1;
- while (!m_team_scratch_pool[current_team_scratch].compare_exchange_weak(
- zero, one, std::memory_order_release, std::memory_order_relaxed)) {
- current_team_scratch = (current_team_scratch + 1) % m_n_team_scratch;
- }
- if (m_team_scratch_current_size[current_team_scratch] == 0) {
- m_team_scratch_current_size[current_team_scratch] = bytes;
- m_team_scratch_ptr[current_team_scratch] =
- Kokkos::kokkos_malloc<Kokkos::CudaSpace>(
- "Kokkos::CudaSpace::TeamScratchMemory",
- m_team_scratch_current_size[current_team_scratch]);
- }
- if ((bytes > m_team_scratch_current_size[current_team_scratch]) ||
- ((bytes < m_team_scratch_current_size[current_team_scratch]) &&
- (force_shrink))) {
- m_team_scratch_current_size[current_team_scratch] = bytes;
- m_team_scratch_ptr[current_team_scratch] =
- Kokkos::kokkos_realloc<Kokkos::CudaSpace>(
- m_team_scratch_ptr[current_team_scratch],
- m_team_scratch_current_size[current_team_scratch]);
- }
- return std::make_pair(m_team_scratch_ptr[current_team_scratch],
- current_team_scratch);
-}
-
-//----------------------------------------------------------------------------
-
-void CudaInternal::finalize() {
- // skip if finalize() has already been called
- if (was_finalized) return;
-
- was_finalized = true;
-
- // Only finalize this if we're the singleton
- if (this == &singleton()) {
- (void)Impl::cuda_global_unique_token_locks(true);
- Impl::finalize_host_cuda_lock_arrays();
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeHost(constantMemHostStaging));
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaEventDestroy(constantMemReusable));
- auto &deep_copy_space =
- Kokkos::Impl::cuda_get_deep_copy_space(/*initialize*/ false);
- if (deep_copy_space)
- deep_copy_space->impl_internal_space_instance()->finalize();
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamDestroy(cuda_get_deep_copy_stream()));
- }
-
- if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {
- using RecordCuda = Kokkos::Impl::SharedAllocationRecord<CudaSpace>;
- using RecordHost =
- Kokkos::Impl::SharedAllocationRecord<CudaHostPinnedSpace>;
-
- RecordCuda::decrement(RecordCuda::get_record(m_scratchFlags));
- RecordCuda::decrement(RecordCuda::get_record(m_scratchSpace));
- RecordHost::decrement(RecordHost::get_record(m_scratchUnified));
- if (m_scratchFunctorSize > 0)
- RecordCuda::decrement(RecordCuda::get_record(m_scratchFunctor));
- }
-
- for (int i = 0; i < m_n_team_scratch; ++i) {
- if (m_team_scratch_current_size[i] > 0)
- Kokkos::kokkos_free<Kokkos::CudaSpace>(m_team_scratch_ptr[i]);
- }
-
- if (m_manage_stream && m_stream != nullptr)
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamDestroy(m_stream));
-
- m_cudaDev = -1;
- m_multiProcCount = 0;
- m_maxWarpCount = 0;
- m_maxBlock = {0, 0, 0};
- m_maxSharedWords = 0;
- m_scratchSpaceCount = 0;
- m_scratchFlagsCount = 0;
- m_scratchUnifiedCount = 0;
- m_streamCount = 0;
- m_scratchSpace = nullptr;
- m_scratchFlags = nullptr;
- m_scratchUnified = nullptr;
- m_stream = nullptr;
- for (int i = 0; i < m_n_team_scratch; ++i) {
- m_team_scratch_current_size[i] = 0;
- m_team_scratch_ptr[i] = nullptr;
- }
-
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(m_scratch_locks));
- m_scratch_locks = nullptr;
-}
-
-//----------------------------------------------------------------------------
-
-Cuda::size_type cuda_internal_multiprocessor_count() {
- return CudaInternal::singleton().m_multiProcCount;
-}
-
-CudaSpace::size_type cuda_internal_maximum_concurrent_block_count() {
-#if defined(KOKKOS_ARCH_KEPLER)
- // Compute capability 3.0 through 3.7
- enum : int { max_resident_blocks_per_multiprocessor = 16 };
-#else
- // Compute capability 5.0 through 6.2
- enum : int { max_resident_blocks_per_multiprocessor = 32 };
-#endif
- return CudaInternal::singleton().m_multiProcCount *
- max_resident_blocks_per_multiprocessor;
-};
-
-Cuda::size_type cuda_internal_maximum_warp_count() {
- return CudaInternal::singleton().m_maxWarpCount;
-}
-
-std::array<Cuda::size_type, 3> cuda_internal_maximum_grid_count() {
- return CudaInternal::singleton().m_maxBlock;
-}
-
-Cuda::size_type cuda_internal_maximum_shared_words() {
- return CudaInternal::singleton().m_maxSharedWords;
-}
-
-Cuda::size_type *cuda_internal_scratch_space(const Cuda &instance,
- const std::size_t size) {
- return instance.impl_internal_space_instance()->scratch_space(size);
-}
-
-Cuda::size_type *cuda_internal_scratch_flags(const Cuda &instance,
- const std::size_t size) {
- return instance.impl_internal_space_instance()->scratch_flags(size);
-}
-
-Cuda::size_type *cuda_internal_scratch_unified(const Cuda &instance,
- const std::size_t size) {
- return instance.impl_internal_space_instance()->scratch_unified(size);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-Cuda::size_type Cuda::detect_device_count() {
- return Impl::CudaInternalDevices::singleton().m_cudaDevCount;
-}
-
-int Cuda::concurrency() {
- return Impl::CudaInternal::singleton().m_maxConcurrency;
-}
-
-int Cuda::impl_is_initialized() {
- return Impl::CudaInternal::singleton().is_initialized();
-}
-
-void Cuda::impl_initialize(InitializationSettings const &settings) {
- Impl::CudaInternal::singleton().initialize(Impl::get_gpu(settings));
-
- // In order to support setting an atexit hook for Kokkos::finalize
- // We need to ensure that the Cuda deep_copy instance is not destroyed
- // before that atexit hook is getting called.
- // Thus we create the static instance here, so that it will be deallocated
- // after the potential atexit call.
- // This is neccessary since we will access that instance in Kokkos::finalize
- (void)::Kokkos::Impl::cuda_get_deep_copy_space(true);
-}
-
-std::vector<unsigned> Cuda::detect_device_arch() {
- const Impl::CudaInternalDevices &s = Impl::CudaInternalDevices::singleton();
-
- std::vector<unsigned> output(s.m_cudaDevCount);
-
- for (int i = 0; i < s.m_cudaDevCount; ++i) {
- output[i] = s.m_cudaProp[i].major * 100 + s.m_cudaProp[i].minor;
- }
-
- return output;
-}
-
-Cuda::size_type Cuda::device_arch() {
- const int dev_id = Impl::CudaInternal::singleton().m_cudaDev;
-
- int dev_arch = 0;
-
- if (0 <= dev_id) {
- const struct cudaDeviceProp &cudaProp =
- Impl::CudaInternalDevices::singleton().m_cudaProp[dev_id];
-
- dev_arch = cudaProp.major * 100 + cudaProp.minor;
- }
-
- return dev_arch;
-}
-
-void Cuda::impl_finalize() { Impl::CudaInternal::singleton().finalize(); }
-
-Cuda::Cuda()
- : m_space_instance(&Impl::CudaInternal::singleton(),
- [](Impl::CudaInternal *) {}) {
- Impl::CudaInternal::singleton().verify_is_initialized(
- "Cuda instance constructor");
-}
-
-Cuda::Cuda(cudaStream_t stream, bool manage_stream)
- : m_space_instance(new Impl::CudaInternal, [](Impl::CudaInternal *ptr) {
- ptr->finalize();
- delete ptr;
- }) {
- Impl::CudaInternal::singleton().verify_is_initialized(
- "Cuda instance constructor");
- m_space_instance->initialize(Impl::CudaInternal::singleton().m_cudaDev,
- stream, manage_stream);
-}
-
-void Cuda::print_configuration(std::ostream &os, bool /*verbose*/) const {
- os << "Device Execution Space:\n";
- os << " KOKKOS_ENABLE_CUDA: yes\n";
-
- os << "Cuda Atomics:\n";
- os << " KOKKOS_ENABLE_CUDA_ATOMICS: ";
-#ifdef KOKKOS_ENABLE_CUDA_ATOMICS
- os << "yes\n";
-#else
- os << "no\n";
-#endif
-
- os << "Cuda Options:\n";
- os << " KOKKOS_ENABLE_CUDA_LAMBDA: ";
-#ifdef KOKKOS_ENABLE_CUDA_LAMBDA
- os << "yes\n";
-#else
- os << "no\n";
-#endif
- os << " KOKKOS_ENABLE_CUDA_LDG_INTRINSIC: ";
-#ifdef KOKKOS_ENABLE_CUDA_LDG_INTRINSIC
- os << "yes\n";
-#else
- os << "no\n";
-#endif
- os << " KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE: ";
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
- os << "yes\n";
-#else
- os << "no\n";
-#endif
- os << " KOKKOS_ENABLE_CUDA_UVM: ";
-#ifdef KOKKOS_ENABLE_CUDA_UVM
- os << "yes\n";
-#else
- os << "no\n";
-#endif
- os << " KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA: ";
-#ifdef KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA
- os << "yes\n";
-#else
- os << "no\n";
-#endif
-
- os << "\nCuda Runtime Configuration:\n";
-
- m_space_instance->print_configuration(os);
-}
-
-void Cuda::impl_static_fence(const std::string &name) {
- Kokkos::Impl::cuda_device_synchronize(name);
-}
-
-void Cuda::fence(const std::string &name) const {
- m_space_instance->fence(name);
-}
-
-const char *Cuda::name() { return "Cuda"; }
-uint32_t Cuda::impl_instance_id() const noexcept {
- return m_space_instance->impl_get_instance_id();
-}
-
-cudaStream_t Cuda::cuda_stream() const { return m_space_instance->m_stream; }
-int Cuda::cuda_device() const { return m_space_instance->m_cudaDev; }
-const cudaDeviceProp &Cuda::cuda_device_prop() const {
- return m_space_instance->m_deviceProp;
-}
-
-namespace Impl {
-
-int g_cuda_space_factory_initialized =
- initialize_space_factory<Cuda>("150_Cuda");
-
-} // namespace Impl
-
-#ifdef KOKKOS_ENABLE_CXX14
-namespace Tools {
-namespace Experimental {
-constexpr DeviceType DeviceTypeTraits<Cuda>::id;
-}
-} // namespace Tools
-#endif
-
-} // namespace Kokkos
-
-#else
-
-void KOKKOS_CORE_SRC_CUDA_IMPL_PREVENT_LINK_ERROR() {}
-
-#endif // KOKKOS_ENABLE_CUDA
+++ /dev/null
-#ifndef KOKKOS_CUDA_INSTANCE_HPP_
-#define KOKKOS_CUDA_INSTANCE_HPP_
-
-#include <vector>
-#include <impl/Kokkos_Tools.hpp>
-#include <atomic>
-#include <Cuda/Kokkos_Cuda_Error.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-// These functions fulfill the purpose of allowing to work around
-// a suspected system software issue, or to check for race conditions.
-// They are not currently a fully officially supported capability.
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
-extern "C" void kokkos_impl_cuda_set_serial_execution(bool);
-extern "C" bool kokkos_impl_cuda_use_serial_execution();
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-struct CudaTraits {
- static constexpr CudaSpace::size_type WarpSize = 32 /* 0x0020 */;
- static constexpr CudaSpace::size_type WarpIndexMask =
- 0x001f; /* Mask for warpindex */
- static constexpr CudaSpace::size_type WarpIndexShift =
- 5; /* WarpSize == 1 << WarpShift */
-
- static constexpr CudaSpace::size_type ConstantMemoryUsage =
- 0x008000; /* 32k bytes */
- static constexpr CudaSpace::size_type ConstantMemoryCache =
- 0x002000; /* 8k bytes */
- static constexpr CudaSpace::size_type KernelArgumentLimit =
- 0x001000; /* 4k bytes */
- static constexpr CudaSpace::size_type MaxHierarchicalParallelism =
- 1024; /* team_size * vector_length */
- using ConstantGlobalBufferType =
- unsigned long[ConstantMemoryUsage / sizeof(unsigned long)];
-
- static constexpr int ConstantMemoryUseThreshold = 0x000200 /* 512 bytes */;
-
- KOKKOS_INLINE_FUNCTION static CudaSpace::size_type warp_count(
- CudaSpace::size_type i) {
- return (i + WarpIndexMask) >> WarpIndexShift;
- }
-
- KOKKOS_INLINE_FUNCTION static CudaSpace::size_type warp_align(
- CudaSpace::size_type i) {
- constexpr CudaSpace::size_type Mask = ~WarpIndexMask;
- return (i + WarpIndexMask) & Mask;
- }
-};
-
-//----------------------------------------------------------------------------
-
-CudaSpace::size_type cuda_internal_multiprocessor_count();
-CudaSpace::size_type cuda_internal_maximum_warp_count();
-std::array<CudaSpace::size_type, 3> cuda_internal_maximum_grid_count();
-CudaSpace::size_type cuda_internal_maximum_shared_words();
-
-CudaSpace::size_type cuda_internal_maximum_concurrent_block_count();
-
-CudaSpace::size_type* cuda_internal_scratch_flags(const Cuda&,
- const std::size_t size);
-CudaSpace::size_type* cuda_internal_scratch_space(const Cuda&,
- const std::size_t size);
-CudaSpace::size_type* cuda_internal_scratch_unified(const Cuda&,
- const std::size_t size);
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-namespace Kokkos {
-namespace Impl {
-
-class CudaInternal {
- private:
- CudaInternal(const CudaInternal&);
- CudaInternal& operator=(const CudaInternal&);
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
- static bool kokkos_impl_cuda_use_serial_execution_v;
-#endif
-
- public:
- using size_type = Cuda::size_type;
-
- int m_cudaDev;
-
- // Device Properties
- int m_cudaArch;
- unsigned m_multiProcCount;
- unsigned m_maxWarpCount;
- std::array<size_type, 3> m_maxBlock;
- unsigned m_maxSharedWords;
- uint32_t m_maxConcurrency;
- int m_shmemPerSM;
- int m_maxShmemPerBlock;
- int m_regsPerSM;
- int m_maxBlocksPerSM;
- int m_maxThreadsPerSM;
- int m_maxThreadsPerBlock;
-
- cudaDeviceProp m_deviceProp;
-
- // Scratch Spaces for Reductions
- mutable std::size_t m_scratchSpaceCount;
- mutable std::size_t m_scratchFlagsCount;
- mutable std::size_t m_scratchUnifiedCount;
- mutable std::size_t m_scratchFunctorSize;
-
- size_type m_scratchUnifiedSupported;
- size_type m_streamCount;
- mutable size_type* m_scratchSpace;
- mutable size_type* m_scratchFlags;
- mutable size_type* m_scratchUnified;
- mutable size_type* m_scratchFunctor;
- cudaStream_t m_stream;
- uint32_t m_instance_id;
- bool m_manage_stream;
-
- // Team Scratch Level 1 Space
- int m_n_team_scratch = 10;
- mutable int64_t m_team_scratch_current_size[10];
- mutable void* m_team_scratch_ptr[10];
- mutable std::atomic_int m_team_scratch_pool[10];
- std::int32_t* m_scratch_locks;
-
- bool was_initialized = false;
- bool was_finalized = false;
-
- // FIXME_CUDA: these want to be per-device, not per-stream... use of 'static'
- // here will break once there are multiple devices though
- static unsigned long* constantMemHostStaging;
- static cudaEvent_t constantMemReusable;
- static std::mutex constantMemMutex;
-
- static CudaInternal& singleton();
-
- int verify_is_initialized(const char* const label) const;
-
- int is_initialized() const {
- return nullptr != m_scratchSpace && nullptr != m_scratchFlags;
- }
-
- void initialize(int cuda_device_id, cudaStream_t stream = nullptr,
- bool manage_stream = false);
- void finalize();
-
- void print_configuration(std::ostream&) const;
-
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
- static bool cuda_use_serial_execution();
- static void cuda_set_serial_execution(bool);
-#endif
-
- void fence(const std::string&) const;
- void fence() const;
-
- ~CudaInternal();
-
- CudaInternal()
- : m_cudaDev(-1),
- m_cudaArch(-1),
- m_multiProcCount(0),
- m_maxWarpCount(0),
- m_maxBlock({0, 0, 0}),
- m_maxSharedWords(0),
- m_maxConcurrency(0),
- m_shmemPerSM(0),
- m_maxShmemPerBlock(0),
- m_regsPerSM(0),
- m_maxBlocksPerSM(0),
- m_maxThreadsPerSM(0),
- m_maxThreadsPerBlock(0),
- m_scratchSpaceCount(0),
- m_scratchFlagsCount(0),
- m_scratchUnifiedCount(0),
- m_scratchFunctorSize(0),
- m_scratchUnifiedSupported(0),
- m_streamCount(0),
- m_scratchSpace(nullptr),
- m_scratchFlags(nullptr),
- m_scratchUnified(nullptr),
- m_scratchFunctor(nullptr),
- m_stream(nullptr),
- m_instance_id(
- Kokkos::Tools::Experimental::Impl::idForInstance<Kokkos::Cuda>(
- reinterpret_cast<uintptr_t>(this))) {
- for (int i = 0; i < m_n_team_scratch; ++i) {
- m_team_scratch_current_size[i] = 0;
- m_team_scratch_ptr[i] = nullptr;
- m_team_scratch_pool[i] = 0;
- }
- }
-
- // Resizing of reduction related scratch spaces
- size_type* scratch_space(const std::size_t size) const;
- size_type* scratch_flags(const std::size_t size) const;
- size_type* scratch_unified(const std::size_t size) const;
- size_type* scratch_functor(const std::size_t size) const;
- uint32_t impl_get_instance_id() const;
- // Resizing of team level 1 scratch
- std::pair<void*, int> resize_team_scratch_space(std::int64_t bytes,
- bool force_shrink = false);
-};
-
-} // Namespace Impl
-
-namespace Experimental {
-// Partitioning an Execution Space: expects space and integer arguments for
-// relative weight
-// Customization point for backends
-// Default behavior is to return the passed in instance
-
-namespace Impl {
-inline void create_Cuda_instances(std::vector<Cuda>& instances) {
- for (int s = 0; s < int(instances.size()); s++) {
- cudaStream_t stream;
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamCreate(&stream));
- instances[s] = Cuda(stream, true);
- }
-}
-} // namespace Impl
-
-template <class... Args>
-std::vector<Cuda> partition_space(const Cuda&, Args...) {
-#ifdef __cpp_fold_expressions
- static_assert(
- (... && std::is_arithmetic_v<Args>),
- "Kokkos Error: partitioning arguments must be integers or floats");
-#endif
- std::vector<Cuda> instances(sizeof...(Args));
- Impl::create_Cuda_instances(instances);
- return instances;
-}
-
-template <class T>
-std::vector<Cuda> partition_space(const Cuda&, std::vector<T>& weights) {
- static_assert(
- std::is_arithmetic<T>::value,
- "Kokkos Error: partitioning arguments must be integers or floats");
-
- std::vector<Cuda> instances(weights.size());
- Impl::create_Cuda_instances(instances);
- return instances;
-}
-} // namespace Experimental
-
-} // Namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Core.hpp>
-#ifdef KOKKOS_ENABLE_CUDA
-#include <Cuda/Kokkos_Cuda_Locks.hpp>
-#include <Cuda/Kokkos_Cuda_Error.hpp>
-
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
-namespace Kokkos {
-namespace Impl {
-__device__ __constant__ CudaLockArrays g_device_cuda_lock_arrays = {nullptr, 0};
-}
-} // namespace Kokkos
-#endif
-
-namespace Kokkos {
-
-namespace {
-
-__global__ void init_lock_array_kernel_atomic() {
- unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
- if (i < CUDA_SPACE_ATOMIC_MASK + 1) {
- Kokkos::Impl::g_device_cuda_lock_arrays.atomic[i] = 0;
- }
-}
-
-} // namespace
-
-namespace Impl {
-
-CudaLockArrays g_host_cuda_lock_arrays = {nullptr, 0};
-
-void initialize_host_cuda_lock_arrays() {
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
- desul::Impl::init_lock_arrays();
- desul::ensure_cuda_lock_arrays_on_device();
-#endif
- if (g_host_cuda_lock_arrays.atomic != nullptr) return;
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMalloc(&g_host_cuda_lock_arrays.atomic,
- sizeof(int) * (CUDA_SPACE_ATOMIC_MASK + 1)));
- Impl::cuda_device_synchronize(
- "Kokkos::Impl::initialize_host_cuda_lock_arrays: Pre Init Lock Arrays");
- g_host_cuda_lock_arrays.n = Cuda::concurrency();
- copy_cuda_lock_arrays_to_device();
- init_lock_array_kernel_atomic<<<(CUDA_SPACE_ATOMIC_MASK + 1 + 255) / 256,
- 256>>>();
- Impl::cuda_device_synchronize(
- "Kokkos::Impl::initialize_host_cuda_lock_arrays: Post Init Lock Arrays");
-}
-
-void finalize_host_cuda_lock_arrays() {
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
- desul::Impl::finalize_lock_arrays();
-#endif
-
- if (g_host_cuda_lock_arrays.atomic == nullptr) return;
- cudaFree(g_host_cuda_lock_arrays.atomic);
- g_host_cuda_lock_arrays.atomic = nullptr;
- g_host_cuda_lock_arrays.n = 0;
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
- copy_cuda_lock_arrays_to_device();
-#endif
-}
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-#else
-
-void KOKKOS_CORE_SRC_CUDA_CUDA_LOCKS_PREVENT_LINK_ERROR() {}
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_CUDA_LOCKS_HPP
-#define KOKKOS_CUDA_LOCKS_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#ifdef KOKKOS_ENABLE_CUDA
-
-#include <cstdint>
-
-#include <Cuda/Kokkos_Cuda_Error.hpp>
-
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-#include <desul/atomics/Lock_Array_Cuda.hpp>
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-struct CudaLockArrays {
- std::int32_t* atomic;
- std::int32_t n;
-};
-
-/// \brief This global variable in Host space is the central definition
-/// of these arrays.
-extern CudaLockArrays g_host_cuda_lock_arrays;
-
-/// \brief After this call, the g_host_cuda_lock_arrays variable has
-/// valid, initialized arrays.
-///
-/// This call is idempotent.
-void initialize_host_cuda_lock_arrays();
-
-/// \brief After this call, the g_host_cuda_lock_arrays variable has
-/// all null pointers, and all array memory has been freed.
-///
-/// This call is idempotent.
-void finalize_host_cuda_lock_arrays();
-
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-/// \brief This global variable in CUDA space is what kernels use
-/// to get access to the lock arrays.
-///
-/// When relocatable device code is enabled, there can be one single
-/// instance of this global variable for the entire executable,
-/// whose definition will be in Kokkos_Cuda_Locks.cpp (and whose declaration
-/// here must then be extern.
-/// This one instance will be initialized by initialize_host_cuda_lock_arrays
-/// and need not be modified afterwards.
-///
-/// When relocatable device code is disabled, an instance of this variable
-/// will be created in every translation unit that sees this header file
-/// (we make this clear by marking it static, meaning no other translation
-/// unit can link to it).
-/// Since the Kokkos_Cuda_Locks.cpp translation unit cannot initialize the
-/// instances in other translation units, we must update this CUDA global
-/// variable based on the Host global variable prior to running any kernels
-/// that will use it.
-/// That is the purpose of the ensure_cuda_lock_arrays_on_device function.
-__device__
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
- __constant__ extern
-#endif
- CudaLockArrays g_device_cuda_lock_arrays;
-
-#define CUDA_SPACE_ATOMIC_MASK 0x1FFFF
-
-/// \brief Acquire a lock for the address
-///
-/// This function tries to acquire the lock for the hash value derived
-/// from the provided ptr. If the lock is successfully acquired the
-/// function returns true. Otherwise it returns false.
-__device__ inline bool lock_address_cuda_space(void* ptr) {
- size_t offset = size_t(ptr);
- offset = offset >> 2;
- offset = offset & CUDA_SPACE_ATOMIC_MASK;
- return (0 == atomicCAS(&g_device_cuda_lock_arrays.atomic[offset], 0, 1));
-}
-
-/// \brief Release lock for the address
-///
-/// This function releases the lock for the hash value derived
-/// from the provided ptr. This function should only be called
-/// after previously successfully acquiring a lock with
-/// lock_address.
-__device__ inline void unlock_address_cuda_space(void* ptr) {
- size_t offset = size_t(ptr);
- offset = offset >> 2;
- offset = offset & CUDA_SPACE_ATOMIC_MASK;
- atomicExch(&g_device_cuda_lock_arrays.atomic[offset], 0);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-// Make lock_array_copied an explicit translation unit scope thingy
-namespace Kokkos {
-namespace Impl {
-namespace {
-static int lock_array_copied = 0;
-inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
-} // namespace
-
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
-inline
-#else
-static
-#endif
- void
- copy_cuda_lock_arrays_to_device() {
- if (lock_array_copied == 0) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemcpyToSymbol(g_device_cuda_lock_arrays,
- &g_host_cuda_lock_arrays,
- sizeof(CudaLockArrays)));
- }
- lock_array_copied = 1;
-}
-
-#ifndef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
-inline void ensure_cuda_lock_arrays_on_device() {}
-#else
-inline static void ensure_cuda_lock_arrays_on_device() {
- copy_cuda_lock_arrays_to_device();
-}
-#endif
-
-#else
-
-#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
-inline void ensure_cuda_lock_arrays_on_device() {}
-#else
-// Still Need COPY_CUDA_LOCK_ARRAYS for team scratch etc.
-inline static void ensure_cuda_lock_arrays_on_device() {
- copy_cuda_lock_arrays_to_device();
- desul::ensure_cuda_lock_arrays_on_device();
-}
-#endif
-
-#endif /* defined( KOKKOS_ENABLE_IMPL_DESUL_ATOMICS ) */
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* defined( KOKKOS_ENABLE_CUDA ) */
-
-#endif /* #ifndef KOKKOS_CUDA_LOCKS_HPP */
+++ /dev/null
-#ifndef KOKKOS_CUDA_MDRANGEPOLICY_HPP_
-#define KOKKOS_CUDA_MDRANGEPOLICY_HPP_
-
-#include <KokkosExp_MDRangePolicy.hpp>
-
-namespace Kokkos {
-
-template <>
-struct default_outer_direction<Kokkos::Cuda> {
- using type = Iterate;
- static constexpr Iterate value = Iterate::Left;
-};
-
-template <>
-struct default_inner_direction<Kokkos::Cuda> {
- using type = Iterate;
- static constexpr Iterate value = Iterate::Left;
-};
-
-namespace Impl {
-
-// Settings for MDRangePolicy
-template <>
-inline TileSizeProperties get_tile_size_properties<Kokkos::Cuda>(
- const Kokkos::Cuda& space) {
- TileSizeProperties properties;
- properties.max_threads =
- space.impl_internal_space_instance()->m_maxThreadsPerSM;
- properties.default_largest_tile_size = 16;
- properties.default_tile_size = 2;
- properties.max_total_tile_size = 512;
- return properties;
-}
-
-} // Namespace Impl
-} // Namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_ENABLE_TASKDAG)
-
-#include <Kokkos_Core.hpp>
-
-#include <impl/Kokkos_TaskQueue_impl.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template class TaskQueue<
- Kokkos::Cuda,
- Impl::default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda> >;
-template class TaskQueueMultiple<
- Kokkos::Cuda,
- Impl::default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda> >;
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-#else
-void KOKKOS_CORE_SRC_CUDA_KOKKOS_CUDA_TASK_PREVENT_LINK_ERROR() {}
-#endif /* #if defined( KOKKOS_ENABLE_CUDA ) && defined( KOKKOS_ENABLE_TASKDAG \
- ) */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_EXPERIMENTAL_CUDA_VIEW_HPP
-#define KOKKOS_EXPERIMENTAL_CUDA_VIEW_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_CUDA)
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-// Cuda Texture fetches can be performed for 4, 8 and 16 byte objects
-// (int,int2,int4) Via reinterpret_case this can be used to support all scalar
-// types of those sizes. Any other scalar type falls back to either normal reads
-// out of global memory, or using the __ldg intrinsic on Kepler GPUs or newer
-// (Compute Capability >= 3.0)
-
-template <typename ValueType, typename AliasType>
-struct CudaTextureFetch {
- ::cudaTextureObject_t m_obj;
- const ValueType* m_ptr;
- int m_offset;
-
- // Deference operator pulls through texture object and returns by value
- template <typename iType>
- KOKKOS_INLINE_FUNCTION ValueType operator[](const iType& i) const {
- KOKKOS_IF_ON_DEVICE(
- (AliasType v = tex1Dfetch<AliasType>(m_obj, i + m_offset);
- return *(reinterpret_cast<ValueType*>(&v));))
- KOKKOS_IF_ON_HOST((return m_ptr[i];))
- }
-
- // Pointer to referenced memory
- KOKKOS_INLINE_FUNCTION
- operator const ValueType*() const { return m_ptr; }
-
- KOKKOS_INLINE_FUNCTION
- CudaTextureFetch() : m_obj(), m_ptr(), m_offset() {}
-
- KOKKOS_DEFAULTED_FUNCTION
- ~CudaTextureFetch() = default;
-
- KOKKOS_INLINE_FUNCTION
- CudaTextureFetch(const CudaTextureFetch& rhs)
- : m_obj(rhs.m_obj), m_ptr(rhs.m_ptr), m_offset(rhs.m_offset) {}
-
- KOKKOS_INLINE_FUNCTION
- CudaTextureFetch(CudaTextureFetch&& rhs)
- : m_obj(rhs.m_obj), m_ptr(rhs.m_ptr), m_offset(rhs.m_offset) {}
-
- KOKKOS_INLINE_FUNCTION
- CudaTextureFetch& operator=(const CudaTextureFetch& rhs) {
- m_obj = rhs.m_obj;
- m_ptr = rhs.m_ptr;
- m_offset = rhs.m_offset;
- return *this;
- }
-
- KOKKOS_INLINE_FUNCTION
- CudaTextureFetch& operator=(CudaTextureFetch&& rhs) {
- m_obj = rhs.m_obj;
- m_ptr = rhs.m_ptr;
- m_offset = rhs.m_offset;
- return *this;
- }
-
- // Texture object spans the entire allocation.
- // This handle may view a subset of the allocation, so an offset is required.
- template <class CudaMemorySpace>
- inline explicit CudaTextureFetch(
- const ValueType* const arg_ptr,
- Kokkos::Impl::SharedAllocationRecord<CudaMemorySpace, void>* record)
- : m_obj(record->template attach_texture_object<AliasType>()),
- m_ptr(arg_ptr),
- m_offset(record->attach_texture_object_offset(
- reinterpret_cast<const AliasType*>(arg_ptr))) {}
-
- // Texture object spans the entire allocation.
- // This handle may view a subset of the allocation, so an offset is required.
- KOKKOS_INLINE_FUNCTION
- CudaTextureFetch(const CudaTextureFetch& rhs, size_t offset)
- : m_obj(rhs.m_obj),
- m_ptr(rhs.m_ptr + offset),
- m_offset(offset + rhs.m_offset) {}
-};
-
-#if defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
-
-template <typename ValueType, typename AliasType>
-struct CudaLDGFetch {
- const ValueType* m_ptr;
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION ValueType operator[](const iType& i) const {
-#if defined(KOKKOS_ARCH_KEPLER30) || defined(KOKKOS_ARCH_KEPLER32)
- return m_ptr[i];
-#else
- KOKKOS_IF_ON_DEVICE(
- (AliasType v = __ldg(reinterpret_cast<const AliasType*>(&m_ptr[i]));
- return *(reinterpret_cast<ValueType*>(&v));))
- KOKKOS_IF_ON_HOST((return m_ptr[i];))
-#endif
- }
-
- KOKKOS_INLINE_FUNCTION
- operator const ValueType*() const { return m_ptr; }
-
- KOKKOS_INLINE_FUNCTION
- CudaLDGFetch() : m_ptr() {}
-
- KOKKOS_DEFAULTED_FUNCTION
- ~CudaLDGFetch() = default;
-
- KOKKOS_INLINE_FUNCTION
- CudaLDGFetch(const CudaLDGFetch& rhs) : m_ptr(rhs.m_ptr) {}
-
- KOKKOS_INLINE_FUNCTION
- CudaLDGFetch(CudaLDGFetch&& rhs) : m_ptr(rhs.m_ptr) {}
-
- KOKKOS_INLINE_FUNCTION
- CudaLDGFetch& operator=(const CudaLDGFetch& rhs) {
- m_ptr = rhs.m_ptr;
- return *this;
- }
-
- KOKKOS_INLINE_FUNCTION
- CudaLDGFetch& operator=(CudaLDGFetch&& rhs) {
- m_ptr = rhs.m_ptr;
- return *this;
- }
-
- template <class CudaMemorySpace>
- inline explicit CudaLDGFetch(
- const ValueType* const arg_ptr,
- Kokkos::Impl::SharedAllocationRecord<CudaMemorySpace, void>*)
- : m_ptr(arg_ptr) {}
-
- KOKKOS_INLINE_FUNCTION
- CudaLDGFetch(CudaLDGFetch const rhs, size_t offset)
- : m_ptr(rhs.m_ptr + offset) {}
-};
-
-#endif
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-/** \brief Replace Default ViewDataHandle with Cuda texture fetch
- * specialization if 'const' value type, CudaSpace and random access.
- */
-template <class Traits>
-class ViewDataHandle<
- Traits, std::enable_if_t<(
- // Is Cuda memory space
- (std::is_same<typename Traits::memory_space,
- Kokkos::CudaSpace>::value ||
- std::is_same<typename Traits::memory_space,
- Kokkos::CudaUVMSpace>::value) &&
- // Is a trivial const value of 4, 8, or 16 bytes
- std::is_trivial<typename Traits::const_value_type>::value &&
- std::is_same<typename Traits::const_value_type,
- typename Traits::value_type>::value &&
- (sizeof(typename Traits::const_value_type) == 4 ||
- sizeof(typename Traits::const_value_type) == 8 ||
- sizeof(typename Traits::const_value_type) == 16) &&
- // Random access trait
- (Traits::memory_traits::is_random_access != 0))>> {
- public:
- using track_type = Kokkos::Impl::SharedAllocationTracker;
-
- using value_type = typename Traits::const_value_type;
- using return_type = typename Traits::const_value_type; // NOT a reference
-
- using alias_type = std::conditional_t<
- (sizeof(value_type) == 4), int,
- std::conditional_t<
- (sizeof(value_type) == 8), ::int2,
- std::conditional_t<(sizeof(value_type) == 16), ::int4, void>>>;
-
-#if defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
- using handle_type = Kokkos::Impl::CudaLDGFetch<value_type, alias_type>;
-#else
- using handle_type = Kokkos::Impl::CudaTextureFetch<value_type, alias_type>;
-#endif
-
- KOKKOS_INLINE_FUNCTION
- static handle_type const& assign(handle_type const& arg_handle,
- track_type const& /* arg_tracker */) {
- return arg_handle;
- }
-
- KOKKOS_INLINE_FUNCTION
- static handle_type const assign(handle_type const& arg_handle,
- size_t offset) {
- return handle_type(arg_handle, offset);
- }
-
- KOKKOS_INLINE_FUNCTION
- static handle_type assign(value_type* arg_data_ptr,
- track_type const& arg_tracker) {
- if (arg_data_ptr == nullptr) return handle_type();
-
-#if !defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
- KOKKOS_IF_ON_HOST((
- // Assignment of texture = non-texture requires creation of a texture
- // object which can only occur on the host. In addition, 'get_record'
- // is only valid if called in a host execution space
-
- using memory_space = typename Traits::memory_space;
- using record =
- typename Impl::SharedAllocationRecord<memory_space, void>;
-
- record* const r = arg_tracker.template get_record<memory_space>();
-
- if (0 == r) {
- Kokkos::abort(
- "Cuda const random access View using Cuda texture memory "
- "requires "
- "Kokkos to allocate the View's memory");
- }
-
- return handle_type(arg_data_ptr, r);))
-#else
- KOKKOS_IF_ON_HOST((
- // Assignment of texture = non-texture requires creation of a texture
- // object which can only occur on the host. In addition, 'get_record'
- // is only valid if called in a host execution space
-
- using memory_space = typename Traits::memory_space;
- using record =
- typename Impl::SharedAllocationRecord<memory_space, void>;
-
- record* const r = arg_tracker.template get_record<memory_space>();
-
- return handle_type(arg_data_ptr, r);))
-#endif
-
- KOKKOS_IF_ON_DEVICE(
- ((void)arg_tracker; Kokkos::Impl::cuda_abort(
- "Cannot create Cuda texture object from within a Cuda kernel");
- return handle_type();))
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
-#endif /* #ifndef KOKKOS_CUDA_VIEW_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_CUDA_ABORT_HPP
-#define KOKKOS_CUDA_ABORT_HPP
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_CUDA)
-
-#include <cuda.h>
-
-extern "C" {
-/* Cuda runtime function, declared in <crt/device_runtime.h>
- * Requires capability 2.x or better.
- */
-extern __device__ void __assertfail(const void *message, const void *file,
- unsigned int line, const void *function,
- size_t charsize);
-}
-
-namespace Kokkos {
-namespace Impl {
-
-#if !defined(__APPLE__)
-// required to workaround failures in random number generator unit tests with
-// pre-volta architectures
-#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
-__device__ inline void cuda_abort(const char *const message) {
-#else
-[[noreturn]] __device__ inline void cuda_abort(const char *const message) {
-#endif
- const char empty[] = "";
-
- __assertfail((const void *)message, (const void *)empty, (unsigned int)0,
- (const void *)empty, sizeof(char));
-
- // This loop is never executed. It's intended to suppress warnings that the
- // function returns, even though it does not. This is necessary because
- // __assertfail is not marked as [[noreturn]], even though it does not return.
- // Disable with KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK to workaround failures
- // in random number generator unit tests with pre-volta architectures
-#if !defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
- while (true)
- ;
-#endif
-}
-#else
-__device__ inline void cuda_abort(const char *const message) {
- // __assertfail is not supported on MAC
-}
-#endif
-
-} // namespace Impl
-} // namespace Kokkos
-#else
-void KOKKOS_CORE_SRC_CUDA_ABORT_PREVENT_LINK_ERROR() {}
-#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
-#endif /* #ifndef KOKKOS_CUDA_ABORT_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_ABORT_HPP
-#define KOKKOS_HIP_ABORT_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_HIP)
-
-#include <hip/hip_runtime.h>
-
-// FIXME_HIP ROCm 4.5 version header include would be <rocm/rocm_version.h>
-#if __has_include(<rocm_version.h>)
-#include <rocm_version.h>
-#define KOKKOS_IMPL_ROCM_VERSION \
- ROCM_VERSION_MAJOR * 10000 + ROCM_VERSION_MINOR * 100 + ROCM_VERSION_PATCH
-#endif
-
-// FIXME_HIP workaround for ROCm version less than 5.0.2
-#if KOKKOS_IMPL_ROCM_VERSION < 50002
-#define KOKKOS_IMPL_HIP_ABORT_DOES_NOT_PRINT_MESSAGE
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-// The two keywords below are not contradictory. `noinline` is a
-// directive to the optimizer.
-[[noreturn]] __device__ __attribute__((noinline)) inline void hip_abort(
- char const *msg) {
- const char empty[] = "";
- __assert_fail(msg, empty, 0, empty);
- // This loop is never executed. It's intended to suppress warnings that the
- // function returns, even though it does not. This is necessary because
- // abort() is not marked as [[noreturn]], even though it does not return.
- while (true)
- ;
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_ATOMIC_HPP
-#define KOKKOS_HIP_ATOMIC_HPP
-
-#include <impl/Kokkos_Atomic_Memory_Order.hpp>
-#include <impl/Kokkos_Memory_Fence.hpp>
-#include <HIP/Kokkos_HIP_Locks.hpp>
-
-#if defined(KOKKOS_ENABLE_HIP_ATOMICS)
-namespace Kokkos {
-// HIP can do:
-// Types int/unsigned int
-// variants:
-// atomic_exchange/compare_exchange/fetch_add/fetch_sub/fetch_max/fetch_min/fetch_and/fetch_or/fetch_xor/fetch_inc/fetch_dec
-
-// atomic_exchange -------------------------------------------------------------
-
-__inline__ __device__ int atomic_exchange(volatile int *const dest,
- const int val) {
- return atomicExch(const_cast<int *>(dest), val);
-}
-
-__inline__ __device__ unsigned int atomic_exchange(
- volatile unsigned int *const dest, const unsigned int val) {
- return atomicExch(const_cast<unsigned int *>(dest), val);
-}
-
-__inline__ __device__ unsigned long long int atomic_exchange(
- volatile unsigned long long int *const dest,
- const unsigned long long int val) {
- return atomicExch(const_cast<unsigned long long *>(dest), val);
-}
-
-__inline__ __device__ float atomic_exchange(volatile float *const dest,
- const float val) {
- return atomicExch(const_cast<float *>(dest), val);
-}
-
-template <typename T>
-__inline__ __device__ T
-atomic_exchange(volatile T *const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T &> val) {
- int tmp = atomicExch(reinterpret_cast<int *>(const_cast<T *>(dest)),
- *reinterpret_cast<int *>(const_cast<T *>(&val)));
- return reinterpret_cast<T &>(tmp);
-}
-
-template <typename T>
-__inline__ __device__ T atomic_exchange(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T &>
- val) {
- using type = unsigned long long int;
-
- type tmp = atomicExch(reinterpret_cast<type *>(const_cast<T *>(dest)),
- *reinterpret_cast<type *>(const_cast<T *>(&val)));
- return reinterpret_cast<T &>(tmp);
-}
-
-template <typename T>
-__inline__ __device__ T atomic_exchange(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
- const T> &val) {
- T return_val;
- int done = 0;
- unsigned int active = __ballot(1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip_space((void *)dest)) {
- return_val = *dest;
- *dest = val;
- Impl::unlock_address_hip_space((void *)dest);
- done = 1;
- }
- }
- done_active = __ballot(done);
- }
- return return_val;
-}
-
-// atomic_assign ---------------------------------------------------------------
-
-template <typename T>
-__inline__ __device__ void atomic_assign(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T &> val) {
- atomicExch(reinterpret_cast<int *>(const_cast<T *>(dest)),
- *reinterpret_cast<int *>(const_cast<T *>(&val)));
-}
-
-template <typename T>
-__inline__ __device__ void atomic_assign(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T &>
- val) {
- using type = unsigned long long int;
- atomicExch(reinterpret_cast<type *>(const_cast<T *>(dest)),
- *reinterpret_cast<type *>(const_cast<T *>(&val)));
-}
-
-template <typename T>
-__inline__ __device__ void atomic_assign(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) != sizeof(unsigned long long int),
- const T &>
- val) {
- atomic_exchange(dest, val);
-}
-
-// atomic_compare_exchange -----------------------------------------------------
-
-inline __device__ int atomic_compare_exchange(volatile int *dest, int compare,
- const int &val) {
- return atomicCAS(const_cast<int *>(dest), compare, val);
-}
-
-inline __device__ unsigned int atomic_compare_exchange(
- volatile unsigned int *dest, unsigned int compare,
- const unsigned int &val) {
- return atomicCAS(const_cast<unsigned int *>(dest), compare, val);
-}
-
-inline __device__ unsigned long long int atomic_compare_exchange(
- volatile unsigned long long int *dest, unsigned long long int compare,
- const unsigned long long int &val) {
- return atomicCAS(const_cast<unsigned long long int *>(dest), compare, val);
-}
-
-template <class T>
-__inline__ __device__ T atomic_compare_exchange(
- volatile T *dest, T compare,
- std::enable_if_t<sizeof(T) == sizeof(int), const T &> val) {
- // FIXME_HIP UB
- union U {
- int i;
- T f;
- __inline__ __device__ U() {}
- } idest, icompare, ival;
- icompare.f = compare;
- ival.f = val;
- idest.i = atomicCAS(reinterpret_cast<int *>(const_cast<T *>(dest)),
- icompare.i, ival.i);
- return idest.f;
-}
-
-template <class T>
-__inline__ __device__ T atomic_compare_exchange(
- volatile T *dest, T compare,
- std::enable_if_t<sizeof(T) == sizeof(unsigned long long int), const T &>
- val) {
- // FIXME_HIP UB
- union U {
- unsigned long long int i;
- T f;
- __inline__ __device__ U() {}
- } idest, icompare, ival;
- icompare.f = compare;
- ival.f = val;
- idest.i = atomicCAS(
- reinterpret_cast<unsigned long long int *>(const_cast<T *>(dest)),
- icompare.i, ival.i);
- return idest.f;
-}
-
-template <typename T>
-__inline__ __device__ T atomic_compare_exchange(
- volatile T *const dest, const T &compare,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
- const T> &val) {
- T return_val;
- int done = 0;
- unsigned int active = __ballot(1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip_space((void *)dest)) {
- return_val = *dest;
- if (return_val == compare) *dest = val;
- Impl::unlock_address_hip_space((void *)dest);
- done = 1;
- }
- }
- done_active = __ballot(done);
- }
- return return_val;
-}
-
-// atomic_fetch_add ------------------------------------------------------------
-
-inline __device__ int atomic_fetch_add(volatile int *dest, const int &val) {
- return atomicAdd(const_cast<int *>(dest), val);
-}
-
-inline __device__ unsigned int atomic_fetch_add(volatile unsigned int *dest,
- const unsigned int &val) {
- return atomicAdd(const_cast<unsigned int *>(dest), val);
-}
-
-inline __device__ unsigned long long atomic_fetch_add(
- volatile unsigned long long *dest, const unsigned long long &val) {
- return atomicAdd(const_cast<unsigned long long *>(dest), val);
-}
-
-inline __device__ float atomic_fetch_add(volatile float *dest,
- const float &val) {
- return atomicAdd(const_cast<float *>(dest), val);
-}
-
-template <typename T>
-inline __device__ T
-atomic_fetch_add(volatile T *const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
- // FIXME_HIP UB
- union U {
- int i;
- T t;
- __inline__ __device__ U() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t + val;
- oldval.i = atomicCAS(reinterpret_cast<int *>(const_cast<T *>(dest)),
- assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <typename T>
-inline __device__ T atomic_fetch_add(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) == sizeof(long long), const T> val) {
- // FIXME_HIP UB
- union U {
- unsigned long long i;
- T t;
- __inline__ __device__ U() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t + val;
- oldval.i = atomic_compare_exchange(
- reinterpret_cast<volatile unsigned long long *>(dest), assume.i,
- newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-__inline__ __device__ char atomic_fetch_add(volatile char *dest,
- const char &val) {
- unsigned int oldval, newval, assume;
- oldval = *reinterpret_cast<volatile unsigned int *>(&dest);
-
- do {
- assume = oldval;
- newval = assume & 0x7fffff00 + ((assume & 0xff) + val) & 0xff;
- oldval =
- atomicCAS(reinterpret_cast<unsigned int *>(const_cast<char *>(dest)),
- assume, newval);
- } while (assume != oldval);
-
- return oldval;
-}
-
-__inline__ __device__ short atomic_fetch_add(volatile short *dest,
- const short &val) {
- unsigned int oldval, newval, assume;
- oldval = *reinterpret_cast<volatile unsigned int *>(&dest);
-
- do {
- assume = oldval;
- newval = assume & 0x7fff0000 + ((assume & 0xffff) + val) & 0xffff;
- oldval =
- atomicCAS(reinterpret_cast<unsigned int *>(const_cast<short *>(dest)),
- assume, newval);
- } while (assume != oldval);
-
- return oldval;
-}
-
-__inline__ __device__ long long atomic_fetch_add(volatile long long *dest,
- const long long &val) {
- return atomicAdd(
- reinterpret_cast<unsigned long long *>(const_cast<long long *>(dest)),
- val);
-}
-
-template <class T>
-__inline__ __device__ T atomic_fetch_add(
- volatile T *dest,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
- const T &>
- val) {
- T return_val;
- int done = 0;
- unsigned int active = __ballot(1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Kokkos::Impl::lock_address_hip_space((void *)dest)) {
- return_val = *dest;
- *dest = return_val + val;
- Kokkos::Impl::unlock_address_hip_space((void *)dest);
- done = 1;
- }
- }
- done_active = __ballot(done);
- }
- return return_val;
-}
-
-// atmic_fetch_sub -------------------------------------------------------------
-
-__inline__ __device__ int atomic_fetch_sub(volatile int *dest, int const &val) {
- return atomicSub(const_cast<int *>(dest), val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_sub(volatile unsigned int *dest,
- unsigned int const &val) {
- return atomicSub(const_cast<unsigned int *>(dest), val);
-}
-
-__inline__ __device__ unsigned long long atomic_fetch_sub(
- unsigned long long *dest, int64_t const &val) {
- return atomicAdd(reinterpret_cast<unsigned long long *>(dest),
- -reinterpret_cast<unsigned long long const &>(val));
-}
-
-__inline__ __device__ char atomic_fetch_sub(volatile char *dest,
- const char &val) {
- unsigned int oldval, newval, assume;
- oldval = *reinterpret_cast<volatile unsigned int *>(dest);
-
- do {
- assume = oldval;
- newval = assume & 0x7fffff00 + ((assume & 0xff) - val) & 0xff;
- oldval =
- atomicCAS(reinterpret_cast<unsigned int *>(const_cast<char *>(dest)),
- assume, newval);
- } while (assume != oldval);
-
- return oldval;
-}
-
-__inline__ __device__ short atomic_fetch_sub(volatile short *dest,
- const short &val) {
- unsigned int oldval, newval, assume;
- oldval = *reinterpret_cast<volatile unsigned int *>(dest);
-
- do {
- assume = oldval;
- newval = assume & 0x7fff0000 + ((assume & 0xffff) - val) & 0xffff;
- oldval =
- atomicCAS(reinterpret_cast<unsigned int *>(const_cast<short *>(dest)),
- assume, newval);
- } while (assume != oldval);
-
- return oldval;
-}
-
-__inline__ __device__ long long atomic_fetch_sub(volatile long long *dest,
- const long long &val) {
- return static_cast<long long>(atomicAdd(
- reinterpret_cast<unsigned long long int *>(const_cast<long long *>(dest)),
- -reinterpret_cast<unsigned long long int const &>(val)));
-}
-
-template <class T>
-__inline__ __device__ T atomic_fetch_sub(
- volatile T *dest, std::enable_if_t<sizeof(T) == sizeof(int), T> val) {
- // FIXME_HIP UB
- union U {
- int i;
- T t;
- __inline__ __device__ U() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t - val;
- oldval.i = atomic_compare_exchange(reinterpret_cast<volatile int *>(dest),
- assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <typename T>
-inline __device__ T atomic_fetch_sub(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) == sizeof(long long), const T> val) {
- // FIXME_HIP UB
- union U {
- unsigned long long i;
- T t;
- __inline__ __device__ U() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t - val;
- oldval.i = atomic_compare_exchange(
- reinterpret_cast<volatile unsigned long long *>(dest), assume.i,
- newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <class T>
-__inline__ __device__ T atomic_fetch_sub(
- volatile T *dest, std::enable_if_t<sizeof(T) == sizeof(char), T> val) {
- unsigned int oldval, newval, assume;
- oldval = *reinterpret_cast<volatile unsigned int *>(dest);
-
- do {
- assume = oldval;
- newval = assume & 0x7fffff00 + ((assume & 0xff) - val) & 0xff;
- oldval = atomicCAS(reinterpret_cast<unsigned int *>(dest), assume, newval);
- } while (assume != oldval);
-
- return reinterpret_cast<T>(oldval) & 0xff;
-}
-
-template <class T>
-__inline__ __device__ T atomic_fetch_sub(
- volatile T *dest, std::enable_if_t<sizeof(T) == sizeof(short), T> val) {
- unsigned int oldval, newval, assume;
- oldval = *reinterpret_cast<int *>(dest);
-
- do {
- assume = oldval;
- newval = assume & 0x7fff0000 + ((assume & 0xffff) - val) & 0xffff;
- oldval = atomicCAS(reinterpret_cast<unsigned int *>(dest), assume, newval);
- } while (assume != oldval);
-
- return reinterpret_cast<T>(oldval) & 0xffff;
-}
-
-template <typename T>
-__inline__ __device__ T atomic_fetch_sub(
- volatile T *const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
- const T> &val) {
- T return_val;
- int done = 0;
- unsigned int active = __ballot(1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip_space((void *)dest)) {
- return_val = *dest;
- *dest = return_val - val;
- Impl::unlock_address_hip_space((void *)dest);
- done = 1;
- }
- }
- done_active = __ballot(done);
- }
- return return_val;
-}
-
-// atomic_fetch_or -------------------------------------------------------------
-
-__inline__ __device__ int atomic_fetch_or(volatile int *const dest,
- int const val) {
- return atomicOr(const_cast<int *>(dest), val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_or(
- volatile unsigned int *const dest, unsigned int const val) {
- return atomicOr(const_cast<unsigned int *>(dest), val);
-}
-
-__inline__ __device__ unsigned long long int atomic_fetch_or(
- volatile unsigned long long int *const dest,
- unsigned long long int const val) {
- return atomicOr(const_cast<unsigned long long int *>(dest), val);
-}
-
-// atomic_fetch_and ------------------------------------------------------------
-
-__inline__ __device__ int atomic_fetch_and(volatile int *const dest,
- int const val) {
- return atomicAnd(const_cast<int *>(dest), val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_and(
- volatile unsigned int *const dest, unsigned int const val) {
- return atomicAnd(const_cast<unsigned int *>(dest), val);
-}
-
-__inline__ __device__ unsigned long long int atomic_fetch_and(
- volatile unsigned long long int *const dest,
- unsigned long long int const val) {
- return atomicAnd(const_cast<unsigned long long int *>(dest), val);
-}
-
-namespace Impl {
-
-template <typename T>
-__inline__ __device__ void _atomic_store(T *ptr, T val,
- memory_order_relaxed_t) {
- (void)atomic_exchange(ptr, val);
-}
-
-template <typename T>
-__inline__ __device__ void _atomic_store(T *ptr, T val,
- memory_order_seq_cst_t) {
- memory_fence();
- atomic_store(ptr, val, memory_order_relaxed);
- memory_fence();
-}
-
-template <typename T>
-__inline__ __device__ void _atomic_store(T *ptr, T val,
- memory_order_release_t) {
- memory_fence();
- atomic_store(ptr, val, memory_order_relaxed);
-}
-
-template <typename T>
-__inline__ __device__ void _atomic_store(T *ptr, T val) {
- atomic_store(ptr, val, memory_order_relaxed);
-}
-
-template <typename T>
-__inline__ __device__ T _atomic_load(T *ptr, memory_order_relaxed_t) {
- T dummy{};
- return atomic_compare_exchange(ptr, dummy, dummy);
-}
-
-template <typename T>
-__inline__ __device__ T _atomic_load(T *ptr, memory_order_seq_cst_t) {
- memory_fence();
- T rv = atomic_load(ptr, memory_order_relaxed);
- memory_fence();
- return rv;
-}
-
-template <typename T>
-__inline__ __device__ T _atomic_load(T *ptr, memory_order_acquire_t) {
- T rv = atomic_load(ptr, memory_order_relaxed);
- memory_fence();
- return rv;
-}
-
-template <typename T>
-__inline__ __device__ T _atomic_load(T *ptr) {
- return atomic_load(ptr, memory_order_relaxed);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_ERROR_HPP
-#define KOKKOS_HIP_ERROR_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <impl/Kokkos_Error.hpp>
-
-#include <hip/hip_runtime.h>
-
-#include <ostream>
-
-namespace Kokkos {
-namespace Impl {
-
-void hip_internal_error_throw(hipError_t e, const char* name,
- const char* file = nullptr, const int line = 0);
-
-inline void hip_internal_safe_call(hipError_t e, const char* name,
- const char* file = nullptr,
- const int line = 0) {
- if (hipSuccess != e) {
- hip_internal_error_throw(e, name, file, line);
- }
-}
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-
-KOKKOS_DEPRECATED
-inline void hip_internal_safe_call_deprecated(hipError_t e, const char* name,
- const char* file = nullptr,
- const int line = 0) {
- hip_internal_safe_call(e, name, file, line);
-}
-
-#endif
-
-} // namespace Impl
-} // namespace Kokkos
-
-#define KOKKOS_IMPL_HIP_SAFE_CALL(call) \
- Kokkos::Impl::hip_internal_safe_call(call, #call, __FILE__, __LINE__)
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-#define HIP_SAFE_CALL(call) \
- Kokkos::Impl::hip_internal_safe_call_deprecated(call, #call, __FILE__, \
- __LINE__)
-
-#endif
-
-namespace Kokkos {
-namespace Experimental {
-
-class HIPRawMemoryAllocationFailure : public RawMemoryAllocationFailure {
- private:
- hipError_t m_error_code = hipSuccess;
-
- static FailureMode get_failure_mode(hipError_t error_code) {
- switch (error_code) {
- case hipErrorMemoryAllocation: return FailureMode::OutOfMemoryError;
- case hipErrorInvalidValue: return FailureMode::InvalidAllocationSize;
- default: return FailureMode::Unknown;
- }
- }
-
- public:
- HIPRawMemoryAllocationFailure(size_t arg_attempted_size,
- hipError_t arg_error_code,
- AllocationMechanism arg_mechanism) noexcept
- : RawMemoryAllocationFailure(
- arg_attempted_size, /* HIPSpace doesn't handle alignment? */ 1,
- get_failure_mode(arg_error_code), arg_mechanism),
- m_error_code(arg_error_code) {}
-
- void append_additional_error_information(std::ostream& o) const override {
- if (m_error_code != hipSuccess) {
- o << " The HIP allocation returned the error code \"\""
- << hipGetErrorName(m_error_code) << "\".";
- }
- }
-};
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_HALF_IMPL_TYPE_HPP_
-#define KOKKOS_HIP_HALF_IMPL_TYPE_HPP_
-
-#include <hip/hip_fp16.h>
-
-#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
-// Make sure no one else tries to define half_t
-#define KOKKOS_IMPL_HALF_TYPE_DEFINED
-#define KOKKOS_IMPL_HIP_HALF_TYPE_DEFINED
-
-namespace Kokkos {
-namespace Impl {
-struct half_impl_t {
- using type = __half;
-};
-} // namespace Impl
-} // namespace Kokkos
-#endif // KOKKOS_IMPL_HALF_TYPE_DEFINED
-#endif // KOKKOS_ENABLE_HIP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/*--------------------------------------------------------------------------*/
-/* Kokkos interfaces */
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Core.hpp>
-
-#include <HIP/Kokkos_HIP_Instance.hpp>
-#include <Kokkos_HIP.hpp>
-#include <Kokkos_HIP_Space.hpp>
-#include <impl/Kokkos_Error.hpp>
-
-/*--------------------------------------------------------------------------*/
-/* Standard 'C' libraries */
-#include <stdlib.h>
-
-/* Standard 'C++' libraries */
-#include <iostream>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
-__device__ __constant__ unsigned long kokkos_impl_hip_constant_memory_buffer
- [Kokkos::Experimental::Impl::HIPTraits::ConstantMemoryUsage /
- sizeof(unsigned long)];
-#endif
-
-namespace Kokkos {
-namespace Impl {
-Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>
-hip_global_unique_token_locks(bool deallocate) {
- static Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace> locks =
- Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>();
- if (!deallocate && locks.extent(0) == 0)
- locks = Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>(
- "Kokkos::UniqueToken<HIP>::m_locks",
- Kokkos::Experimental::HIP().concurrency());
- if (deallocate)
- locks = Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>();
- return locks;
-}
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Experimental {
-namespace {
-class HIPInternalDevices {
- public:
- enum { MAXIMUM_DEVICE_COUNT = 64 };
- struct hipDeviceProp_t m_hipProp[MAXIMUM_DEVICE_COUNT];
- int m_hipDevCount;
-
- HIPInternalDevices();
-
- static HIPInternalDevices const &singleton();
-};
-
-HIPInternalDevices::HIPInternalDevices() {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipGetDeviceCount(&m_hipDevCount));
-
- if (m_hipDevCount > MAXIMUM_DEVICE_COUNT) {
- Kokkos::abort(
- "Sorry, you have more GPUs per node than we thought anybody would ever "
- "have. Please report this to github.com/kokkos/kokkos.");
- }
- for (int i = 0; i < m_hipDevCount; ++i) {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipGetDeviceProperties(m_hipProp + i, i));
- }
-}
-
-const HIPInternalDevices &HIPInternalDevices::singleton() {
- static HIPInternalDevices self;
- return self;
-}
-} // namespace
-
-unsigned long *Impl::HIPInternal::constantMemHostStaging = nullptr;
-hipEvent_t Impl::HIPInternal::constantMemReusable = nullptr;
-std::mutex Impl::HIPInternal::constantMemMutex;
-
-namespace Impl {
-
-//----------------------------------------------------------------------------
-
-void HIPInternal::print_configuration(std::ostream &s) const {
- const HIPInternalDevices &dev_info = HIPInternalDevices::singleton();
-
- s << "macro KOKKOS_ENABLE_HIP : defined" << '\n';
-#if defined(HIP_VERSION)
- s << "macro HIP_VERSION = " << HIP_VERSION << " = version "
- << HIP_VERSION_MAJOR << '.' << HIP_VERSION_MINOR << '.' << HIP_VERSION_PATCH
- << '\n';
-#endif
-
- for (int i = 0; i < dev_info.m_hipDevCount; ++i) {
- s << "Kokkos::Experimental::HIP[ " << i << " ] "
- << dev_info.m_hipProp[i].name << " version "
- << (dev_info.m_hipProp[i].major) << "." << dev_info.m_hipProp[i].minor
- << ", Total Global Memory: "
- << ::Kokkos::Impl::human_memory_size(dev_info.m_hipProp[i].totalGlobalMem)
- << ", Shared Memory per Block: "
- << ::Kokkos::Impl::human_memory_size(
- dev_info.m_hipProp[i].sharedMemPerBlock);
- if (m_hipDev == i) s << " : Selected";
- s << '\n';
- }
-}
-
-//----------------------------------------------------------------------------
-
-HIPInternal::~HIPInternal() {
- if (m_scratchSpace || m_scratchFlags) {
- std::cerr << "Kokkos::Experimental::HIP ERROR: Failed to call "
- "Kokkos::Experimental::HIP::finalize()"
- << std::endl;
- std::cerr.flush();
- }
-
- m_hipDev = -1;
- m_hipArch = -1;
- m_multiProcCount = 0;
- m_maxWarpCount = 0;
- m_maxSharedWords = 0;
- m_maxShmemPerBlock = 0;
- m_scratchSpaceCount = 0;
- m_scratchFlagsCount = 0;
- m_scratchSpace = nullptr;
- m_scratchFlags = nullptr;
- m_stream = nullptr;
-}
-
-int HIPInternal::verify_is_initialized(const char *const label) const {
- if (m_hipDev < 0) {
- Kokkos::abort((std::string("Kokkos::Experimental::HIP::") + label +
- " : ERROR device not initialized\n")
- .c_str());
- }
- return 0 <= m_hipDev;
-}
-
-uint32_t HIPInternal::impl_get_instance_id() const noexcept {
- return m_instance_id;
-}
-HIPInternal &HIPInternal::singleton() {
- static HIPInternal *self = nullptr;
- if (!self) {
- self = new HIPInternal();
- }
- return *self;
-}
-
-void HIPInternal::fence() const {
- fence("Kokkos::HIPInternal::fence: Unnamed Internal Fence");
-}
-void HIPInternal::fence(const std::string &name) const {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::HIP>(
- name,
- Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
- impl_get_instance_id()},
- [&]() {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(m_stream));
- // can reset our cycle id now as well
- m_cycleId = 0;
- });
-}
-
-void HIPInternal::initialize(int hip_device_id, hipStream_t stream,
- bool manage_stream) {
- if (was_finalized)
- Kokkos::abort("Calling HIP::initialize after HIP::finalize is illegal\n");
-
- if (is_initialized()) return;
-
- int constexpr WordSize = sizeof(size_type);
-
- if (!HostSpace::execution_space::impl_is_initialized()) {
- const std::string msg(
- "HIP::initialize ERROR : HostSpace::execution_space "
- "is not initialized");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-
- const HIPInternalDevices &dev_info = HIPInternalDevices::singleton();
-
- const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
-
- // Need at least a GPU device
- const bool ok_id =
- 0 <= hip_device_id && hip_device_id < dev_info.m_hipDevCount;
-
- if (ok_init && ok_id) {
- const struct hipDeviceProp_t &hipProp = dev_info.m_hipProp[hip_device_id];
-
- m_hipDev = hip_device_id;
- m_deviceProp = hipProp;
-
- KOKKOS_IMPL_HIP_SAFE_CALL(hipSetDevice(m_hipDev));
-
- m_stream = stream;
- m_manage_stream = manage_stream;
- m_team_scratch_current_size = 0;
- m_team_scratch_ptr = nullptr;
-
- // number of multiprocessors
- m_multiProcCount = hipProp.multiProcessorCount;
-
- //----------------------------------
- // Maximum number of warps,
- // at most one warp per thread in a warp for reduction.
- m_maxWarpCount = hipProp.maxThreadsPerBlock / Impl::HIPTraits::WarpSize;
- if (HIPTraits::WarpSize < m_maxWarpCount) {
- m_maxWarpCount = Impl::HIPTraits::WarpSize;
- }
- m_maxSharedWords = hipProp.sharedMemPerBlock / WordSize;
-
- //----------------------------------
- // Maximum number of blocks
- m_maxBlock[0] = hipProp.maxGridSize[0];
- m_maxBlock[1] = hipProp.maxGridSize[1];
- m_maxBlock[2] = hipProp.maxGridSize[2];
-
- // theoretically, we can get 40 WF's / CU, but only can sustain 32
- // see
- // https://github.com/ROCm-Developer-Tools/HIP/blob/a0b5dfd625d99af7e288629747b40dd057183173/vdi/hip_platform.cpp#L742
- m_maxWavesPerCU = 32;
- // FIXME_HIP - Nick to implement this upstream
- // Register count comes from Sec. 2.2. "Data Sharing" of the
- // Vega 7nm ISA document (see the diagram)
- // https://developer.amd.com/wp-content/resources/Vega_7nm_Shader_ISA.pdf
- // VGPRS = 4 (SIMD/CU) * 256 VGPR/SIMD * 64 registers / VGPR =
- // 65536 VGPR/CU
- m_regsPerSM = 65536;
- m_shmemPerSM = hipProp.maxSharedMemoryPerMultiProcessor;
- m_maxShmemPerBlock = hipProp.sharedMemPerBlock;
- m_maxThreadsPerSM = m_maxWavesPerCU * HIPTraits::WarpSize;
- //----------------------------------
- // Multiblock reduction uses scratch flags for counters
- // and scratch space for partial reduction values.
- // Allocate some initial space. This will grow as needed.
- {
- const unsigned reduce_block_count =
- m_maxWarpCount * Impl::HIPTraits::WarpSize;
-
- (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type));
- (void)scratch_space(reduce_block_count * 16 * sizeof(size_type));
- }
- //----------------------------------
- // Concurrent bitset for obtaining unique tokens from within
- // an executing kernel.
- {
- const int32_t buffer_bound =
- Kokkos::Impl::concurrent_bitset::buffer_bound(HIP::concurrency());
-
- // Allocate and initialize uint32_t[ buffer_bound ]
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
- void>;
-
- Record *const r = Record::allocate(Kokkos::Experimental::HIPSpace(),
- "Kokkos::InternalScratchBitset",
- sizeof(uint32_t) * buffer_bound);
-
- Record::increment(r);
- }
- //----------------------------------
-
- } else {
- std::ostringstream msg;
- msg << "Kokkos::Experimental::HIP::initialize(" << hip_device_id
- << ") FAILED";
-
- if (!ok_init) {
- msg << " : Already initialized";
- }
- if (!ok_id) {
- msg << " : Device identifier out of range "
- << "[0.." << dev_info.m_hipDevCount - 1 << "]";
- }
- Kokkos::Impl::throw_runtime_exception(msg.str());
- }
-
- // Init the array for used for arbitrarily sized atomics
- if (m_stream == nullptr) ::Kokkos::Impl::initialize_host_hip_lock_arrays();
-
- // Allocate a staging buffer for constant mem in pinned host memory
- // and an event to avoid overwriting driver for previous kernel launches
- if (m_stream == nullptr) {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipHostMalloc((void **)&constantMemHostStaging,
- HIPTraits::ConstantMemoryUsage));
-
- KOKKOS_IMPL_HIP_SAFE_CALL(hipEventCreate(&constantMemReusable));
- }
-
- KOKKOS_IMPL_HIP_SAFE_CALL(
- hipMalloc(&m_scratch_locks, sizeof(int32_t) * HIP::concurrency()));
- KOKKOS_IMPL_HIP_SAFE_CALL(
- hipMemset(m_scratch_locks, 0, sizeof(int32_t) * HIP::concurrency()));
-}
-
-//----------------------------------------------------------------------------
-
-using ScratchGrain =
- Kokkos::Experimental::HIP::size_type[Impl::HIPTraits::WarpSize];
-enum { sizeScratchGrain = sizeof(ScratchGrain) };
-
-Kokkos::Experimental::HIP::size_type *HIPInternal::scratch_space(
- const std::size_t size) {
- if (verify_is_initialized("scratch_space") &&
- m_scratchSpaceCount * sizeScratchGrain < size) {
- m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
- void>;
-
- if (m_scratchSpace) Record::decrement(Record::get_record(m_scratchSpace));
-
- Record *const r = Record::allocate(
- Kokkos::Experimental::HIPSpace(), "Kokkos::InternalScratchSpace",
- (sizeScratchGrain * m_scratchSpaceCount));
-
- Record::increment(r);
-
- m_scratchSpace = reinterpret_cast<size_type *>(r->data());
- }
-
- return m_scratchSpace;
-}
-
-Kokkos::Experimental::HIP::size_type *HIPInternal::scratch_flags(
- const std::size_t size) {
- if (verify_is_initialized("scratch_flags") &&
- m_scratchFlagsCount * sizeScratchGrain < size) {
- m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
-
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
- void>;
-
- if (m_scratchFlags) Record::decrement(Record::get_record(m_scratchFlags));
-
- Record *const r = Record::allocate(
- Kokkos::Experimental::HIPSpace(), "Kokkos::InternalScratchFlags",
- (sizeScratchGrain * m_scratchFlagsCount));
-
- Record::increment(r);
-
- m_scratchFlags = reinterpret_cast<size_type *>(r->data());
-
- KOKKOS_IMPL_HIP_SAFE_CALL(
- hipMemset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain));
- }
-
- return m_scratchFlags;
-}
-
-void *HIPInternal::resize_team_scratch_space(std::int64_t bytes,
- bool force_shrink) {
- if (m_team_scratch_current_size == 0) {
- m_team_scratch_current_size = bytes;
- m_team_scratch_ptr = Kokkos::kokkos_malloc<Kokkos::Experimental::HIPSpace>(
- "Kokkos::HIPSpace::TeamScratchMemory", m_team_scratch_current_size);
- }
- if ((bytes > m_team_scratch_current_size) ||
- ((bytes < m_team_scratch_current_size) && (force_shrink))) {
- m_team_scratch_current_size = bytes;
- m_team_scratch_ptr = Kokkos::kokkos_realloc<Kokkos::Experimental::HIPSpace>(
- m_team_scratch_ptr, m_team_scratch_current_size);
- }
- return m_team_scratch_ptr;
-}
-
-//----------------------------------------------------------------------------
-
-void HIPInternal::finalize() {
- this->fence("Kokkos::HIPInternal::finalize: fence on finalization");
- was_finalized = true;
-
- if (this == &singleton()) {
- (void)Kokkos::Impl::hip_global_unique_token_locks(true);
- KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(constantMemHostStaging));
- KOKKOS_IMPL_HIP_SAFE_CALL(hipEventDestroy(constantMemReusable));
- }
-
- if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {
- using RecordHIP =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace>;
-
- RecordHIP::decrement(RecordHIP::get_record(m_scratchFlags));
- RecordHIP::decrement(RecordHIP::get_record(m_scratchSpace));
-
- if (m_team_scratch_current_size > 0)
- Kokkos::kokkos_free<Kokkos::Experimental::HIPSpace>(m_team_scratch_ptr);
-
- if (m_manage_stream && m_stream != nullptr)
- KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamDestroy(m_stream));
- }
-
- m_hipDev = -1;
- m_hipArch = -1;
- m_multiProcCount = 0;
- m_maxWarpCount = 0;
- m_maxBlock = {0, 0, 0};
- m_maxSharedWords = 0;
- m_maxShmemPerBlock = 0;
- m_scratchSpaceCount = 0;
- m_scratchFlagsCount = 0;
- m_scratchSpace = nullptr;
- m_scratchFlags = nullptr;
- m_stream = nullptr;
- m_team_scratch_current_size = 0;
- m_team_scratch_ptr = nullptr;
-
- KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(m_scratch_locks));
- m_scratch_locks = nullptr;
-
- if (nullptr != d_driverWorkArray) {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(d_driverWorkArray));
- d_driverWorkArray = nullptr;
- }
-}
-
-char *HIPInternal::get_next_driver(size_t driverTypeSize) const {
- if (d_driverWorkArray == nullptr) {
- KOKKOS_IMPL_HIP_SAFE_CALL(
- hipHostMalloc(&d_driverWorkArray,
- m_maxDriverCycles * m_maxDriverTypeSize * sizeof(char),
- hipHostMallocNonCoherent));
- }
- if (driverTypeSize > m_maxDriverTypeSize) {
- // fence handles the cycle id reset for us
- fence(
- "Kokkos::HIPInternal::get_next_driver: fence before reallocating "
- "resources");
- KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(d_driverWorkArray));
- m_maxDriverTypeSize = driverTypeSize;
- if (m_maxDriverTypeSize % 128 != 0)
- m_maxDriverTypeSize =
- m_maxDriverTypeSize + 128 - m_maxDriverTypeSize % 128;
- KOKKOS_IMPL_HIP_SAFE_CALL(
- hipHostMalloc(&d_driverWorkArray,
- m_maxDriverCycles * m_maxDriverTypeSize * sizeof(char),
- hipHostMallocNonCoherent));
- } else {
- m_cycleId = (m_cycleId + 1) % m_maxDriverCycles;
- if (m_cycleId == 0) {
- // ensure any outstanding kernels are completed before we wrap around
- fence(
- "Kokkos::HIPInternal::get_next_driver: fence before reusing first "
- "driver");
- }
- }
- return &d_driverWorkArray[m_maxDriverTypeSize * m_cycleId];
-}
-
-//----------------------------------------------------------------------------
-
-Kokkos::Experimental::HIP::size_type hip_internal_multiprocessor_count() {
- return HIPInternal::singleton().m_multiProcCount;
-}
-
-Kokkos::Experimental::HIP::size_type hip_internal_maximum_warp_count() {
- return HIPInternal::singleton().m_maxWarpCount;
-}
-
-std::array<Kokkos::Experimental::HIP::size_type, 3>
-hip_internal_maximum_grid_count() {
- return HIPInternal::singleton().m_maxBlock;
-}
-
-Kokkos::Experimental::HIP::size_type *hip_internal_scratch_space(
- const HIP &instance, const std::size_t size) {
- return instance.impl_internal_space_instance()->scratch_space(size);
-}
-
-Kokkos::Experimental::HIP::size_type *hip_internal_scratch_flags(
- const HIP &instance, const std::size_t size) {
- return instance.impl_internal_space_instance()->scratch_flags(size);
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-void hip_device_synchronize(const std::string &name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::HIP>(
- name,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- GlobalDeviceSynchronization,
- [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceSynchronize()); });
-}
-
-void hip_internal_error_throw(hipError_t e, const char *name, const char *file,
- const int line) {
- std::ostringstream out;
- out << name << " error( " << hipGetErrorName(e)
- << "): " << hipGetErrorString(e);
- if (file) {
- out << " " << file << ":" << line;
- }
- throw_runtime_exception(out.str());
-}
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Experimental {
-HIP::size_type HIP::detect_device_count() {
- return HIPInternalDevices::singleton().m_hipDevCount;
-}
-} // namespace Experimental
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/*--------------------------------------------------------------------------*/
-
-#ifndef KOKKOS_HIP_INSTANCE_HPP
-#define KOKKOS_HIP_INSTANCE_HPP
-
-#include <Kokkos_HIP_Space.hpp>
-#include <HIP/Kokkos_HIP_Error.hpp>
-
-#include <mutex>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-struct HIPTraits {
- static int constexpr WarpSize = 64;
- static int constexpr WarpIndexMask = 0x003f; /* hexadecimal for 63 */
- static int constexpr WarpIndexShift = 6; /* WarpSize == 1 << WarpShift*/
- static int constexpr ConservativeThreadsPerBlock =
- 256; // conservative fallback blocksize in case of spills
- static int constexpr MaxThreadsPerBlock =
- 1024; // the maximum we can fit in a block
- static int constexpr ConstantMemoryUsage = 0x008000; /* 32k bytes */
- static int constexpr KernelArgumentLimit = 0x001000; /* 4k bytes */
- static int constexpr ConstantMemoryUseThreshold = 0x000200; /* 512 bytes */
-};
-
-//----------------------------------------------------------------------------
-
-HIP::size_type hip_internal_maximum_warp_count();
-std::array<HIP::size_type, 3> hip_internal_maximum_grid_count();
-HIP::size_type hip_internal_multiprocessor_count();
-
-HIP::size_type *hip_internal_scratch_space(const HIP &instance,
- const std::size_t size);
-HIP::size_type *hip_internal_scratch_flags(const HIP &instance,
- const std::size_t size);
-
-//----------------------------------------------------------------------------
-
-class HIPInternal {
- private:
- HIPInternal(const HIPInternal &);
- HIPInternal &operator=(const HIPInternal &);
-
- public:
- using size_type = ::Kokkos::Experimental::HIP::size_type;
-
- int m_hipDev = -1;
- int m_hipArch = -1;
- unsigned m_multiProcCount = 0;
- unsigned m_maxWarpCount = 0;
- std::array<size_type, 3> m_maxBlock = {0, 0, 0};
- unsigned m_maxWavesPerCU = 0;
- unsigned m_maxSharedWords = 0;
- int m_regsPerSM;
- int m_shmemPerSM = 0;
- int m_maxShmemPerBlock = 0;
- int m_maxThreadsPerSM = 0;
-
- // array of DriverTypes to be allocated in host-pinned memory for async
- // kernel launches
- mutable char *d_driverWorkArray = nullptr;
- // number of kernel launches that can be in-flight w/o synchronization
- const int m_maxDriverCycles = 100;
- // max size of a DriverType [bytes]
- mutable size_t m_maxDriverTypeSize = 1024 * 10;
- // the current index in the driverWorkArray
- mutable int m_cycleId = 0;
- // mutex to access d_driverWorkArray
- mutable std::mutex m_mutexWorkArray;
- // mutex to access shared memory
- mutable std::mutex m_mutexSharedMemory;
-
- // Scratch Spaces for Reductions
- std::size_t m_scratchSpaceCount = 0;
- std::size_t m_scratchFlagsCount = 0;
-
- size_type *m_scratchSpace = nullptr;
- size_type *m_scratchFlags = nullptr;
-
- hipDeviceProp_t m_deviceProp;
-
- hipStream_t m_stream = nullptr;
- uint32_t m_instance_id = Kokkos::Tools::Experimental::Impl::idForInstance<
- Kokkos::Experimental::HIP>(reinterpret_cast<uintptr_t>(this));
- bool m_manage_stream = false;
-
- // Team Scratch Level 1 Space
- mutable int64_t m_team_scratch_current_size = 0;
- mutable void *m_team_scratch_ptr = nullptr;
- mutable std::mutex m_team_scratch_mutex;
- std::int32_t *m_scratch_locks;
-
- bool was_finalized = false;
-
- // FIXME_HIP: these want to be per-device, not per-stream... use of 'static'
- // here will break once there are multiple devices though
- static unsigned long *constantMemHostStaging;
- static hipEvent_t constantMemReusable;
- static std::mutex constantMemMutex;
-
- static HIPInternal &singleton();
-
- int verify_is_initialized(const char *const label) const;
-
- int is_initialized() const { return m_hipDev >= 0; }
-
- void initialize(int hip_device_id, hipStream_t stream = nullptr,
- bool manage_stream = false);
- void finalize();
-
- void print_configuration(std::ostream &) const;
-
- void fence() const;
- void fence(const std::string &) const;
-
- // returns the next driver type pointer in our work array
- char *get_next_driver(size_t driverTypeSize) const;
-
- ~HIPInternal();
-
- HIPInternal() = default;
-
- // Resizing of reduction related scratch spaces
- size_type *scratch_space(const std::size_t size);
- size_type *scratch_flags(const std::size_t size);
- uint32_t impl_get_instance_id() const noexcept;
- // Resizing of team level 1 scratch
- void *resize_team_scratch_space(std::int64_t bytes,
- bool force_shrink = false);
-};
-
-} // namespace Impl
-
-// Partitioning an Execution Space: expects space and integer arguments for
-// relative weight
-// Customization point for backends
-// Default behavior is to return the passed in instance
-
-namespace Impl {
-inline void create_HIP_instances(std::vector<HIP> &instances) {
- for (int s = 0; s < int(instances.size()); s++) {
- hipStream_t stream;
- KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamCreate(&stream));
- instances[s] = HIP(stream, true);
- }
-}
-} // namespace Impl
-
-template <class... Args>
-std::vector<HIP> partition_space(const HIP &, Args...) {
-#ifdef __cpp_fold_expressions
- static_assert(
- (... && std::is_arithmetic_v<Args>),
- "Kokkos Error: partitioning arguments must be integers or floats");
-#endif
-
- std::vector<HIP> instances(sizeof...(Args));
- Impl::create_HIP_instances(instances);
- return instances;
-}
-
-template <class T>
-std::vector<HIP> partition_space(const HIP &, std::vector<T> &weights) {
- static_assert(
- std::is_arithmetic<T>::value,
- "Kokkos Error: partitioning arguments must be integers or floats");
-
- std::vector<HIP> instances(weights.size());
- Impl::create_HIP_instances(instances);
- return instances;
-}
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <HIP/Kokkos_HIP_Locks.hpp>
-#include <HIP/Kokkos_HIP_Error.hpp>
-#include <Kokkos_HIP_Space.hpp>
-
-#include <hip/hip_runtime.h>
-
-#include <iostream>
-
-namespace Kokkos {
-
-#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
-namespace Impl {
-__device__ __constant__ HIPLockArrays g_device_hip_lock_arrays = {nullptr, 0};
-}
-#endif
-
-namespace {
-
-__global__ void init_lock_array_kernel_atomic() {
- unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
- if (i < KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1) {
- Kokkos::Impl::g_device_hip_lock_arrays.atomic[i] = 0;
- }
-}
-
-} // namespace
-
-namespace Impl {
-
-HIPLockArrays g_host_hip_lock_arrays = {nullptr, 0};
-
-void initialize_host_hip_lock_arrays() {
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
- desul::Impl::init_lock_arrays();
-
- DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE();
-#endif
-
- if (g_host_hip_lock_arrays.atomic != nullptr) return;
- KOKKOS_IMPL_HIP_SAFE_CALL(hipMalloc(
- &g_host_hip_lock_arrays.atomic,
- sizeof(std::int32_t) * (KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1)));
-
- g_host_hip_lock_arrays.n = ::Kokkos::Experimental::HIP::concurrency();
-
- KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
- init_lock_array_kernel_atomic<<<
- (KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1 + 255) / 256, 256, 0, nullptr>>>();
-}
-
-void finalize_host_hip_lock_arrays() {
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
- desul::Impl::finalize_lock_arrays();
-#endif
-
- if (g_host_hip_lock_arrays.atomic == nullptr) return;
- KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(g_host_hip_lock_arrays.atomic));
- g_host_hip_lock_arrays.atomic = nullptr;
- g_host_hip_lock_arrays.n = 0;
-#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
- KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
-#endif
-}
-
-} // namespace Impl
-
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_LOCKS_HPP
-#define KOKKOS_HIP_LOCKS_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#include <cstdint>
-
-#include <HIP/Kokkos_HIP_Error.hpp>
-
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-#include <desul/atomics/Lock_Array_HIP.hpp>
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-struct HIPLockArrays {
- std::int32_t* atomic;
- std::int32_t n;
-};
-
-/// \brief This global variable in Host space is the central definition
-/// of these arrays.
-extern HIPLockArrays g_host_hip_lock_arrays;
-
-/// \brief After this call, the g_host_hip_lock_arrays variable has
-/// valid, initialized arrays.
-///
-/// This call is idempotent.
-void initialize_host_hip_lock_arrays();
-
-/// \brief After this call, the g_host_hip_lock_arrays variable has
-/// all null pointers, and all array memory has been freed.
-///
-/// This call is idempotent.
-void finalize_host_hip_lock_arrays();
-
-#if defined(__HIPCC__)
-
-/// \brief This global variable in HIP space is what kernels use
-/// to get access to the lock arrays.
-///
-/// When relocatable device code is enabled, there can be one single
-/// instance of this global variable for the entire executable,
-/// whose definition will be in Kokkos_HIP_Locks.cpp (and whose declaration
-/// here must then be extern).
-/// This one instance will be initialized by initialize_host_HIP_lock_arrays
-/// and need not be modified afterwards.
-///
-/// When relocatable device code is disabled, an instance of this variable
-/// will be created in every translation unit that sees this header file.
-/// Since the Kokkos_HIP_Locks.cpp translation unit cannot initialize the
-/// instances in other translation units, we must update this HIP global
-/// variable based on the Host global variable prior to running any kernels
-/// that will use it.
-/// That is the purpose of the KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE macro.
-__device__
-#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
- __constant__ extern
-#endif
- HIPLockArrays g_device_hip_lock_arrays;
-
-#define KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK 0x1FFFF
-
-/// \brief Acquire a lock for the address
-///
-/// This function tries to acquire the lock for the hash value derived
-/// from the provided ptr. If the lock is successfully acquired the
-/// function returns true. Otherwise it returns false.
-__device__ inline bool lock_address_hip_space(void* ptr) {
- auto offset = reinterpret_cast<size_t>(ptr);
- offset = offset >> 2;
- offset = offset & KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK;
- return (0 == atomicCAS(&g_device_hip_lock_arrays.atomic[offset], 0, 1));
-}
-
-/// \brief Release lock for the address
-///
-/// This function releases the lock for the hash value derived
-/// from the provided ptr. This function should only be called
-/// after previously successfully aquiring a lock with
-/// lock_address.
-__device__ inline void unlock_address_hip_space(void* ptr) {
- auto offset = reinterpret_cast<size_t>(ptr);
- offset = offset >> 2;
- offset = offset & KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK;
- atomicExch(&g_device_hip_lock_arrays.atomic[offset], 0);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-// Make lock_array_copied an explicit translation unit scope thingy
-namespace Kokkos {
-namespace Impl {
-namespace {
-static int lock_array_copied = 0;
-inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
-} // namespace
-} // namespace Impl
-} // namespace Kokkos
-
-/* Dan Ibanez: it is critical that this code be a macro, so that it will
- capture the right address for g_device_hip_lock_arrays!
- putting this in an inline function will NOT do the right thing! */
-#define KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE() \
- { \
- if (::Kokkos::Impl::lock_array_copied == 0) { \
- KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyToSymbol( \
- HIP_SYMBOL(::Kokkos::Impl::g_device_hip_lock_arrays), \
- &::Kokkos::Impl::g_host_hip_lock_arrays, \
- sizeof(::Kokkos::Impl::HIPLockArrays))); \
- } \
- ::Kokkos::Impl::lock_array_copied = 1; \
- }
-
-#ifndef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-
-#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
-#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
-#else
-#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE() \
- KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE()
-#endif
-
-#else
-
-#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
-#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
-#else
-// Still Need COPY_CUDA_LOCK_ARRAYS for team scratch etc.
-#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE() \
- KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE() \
- DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
-#endif
-
-#endif /* defined( KOKKOS_ENABLE_IMPL_DESUL_ATOMICS ) */
-
-#endif /* defined( __HIPCC__ ) */
-
-#endif /* #ifndef KOKKOS_HIP_LOCKS_HPP */
+++ /dev/null
-#ifndef KOKKOS_HIP_MDRANGEPOLICY_HPP_
-#define KOKKOS_HIP_MDRANGEPOLICY_HPP_
-
-#include <KokkosExp_MDRangePolicy.hpp>
-
-namespace Kokkos {
-
-template <>
-struct default_outer_direction<Kokkos::Experimental::HIP> {
- using type = Iterate;
- static constexpr Iterate value = Iterate::Left;
-};
-
-template <>
-struct default_inner_direction<Kokkos::Experimental::HIP> {
- using type = Iterate;
- static constexpr Iterate value = Iterate::Left;
-};
-
-namespace Impl {
-
-// Settings for MDRangePolicy
-template <>
-inline TileSizeProperties get_tile_size_properties<Kokkos::Experimental::HIP>(
- const Kokkos::Experimental::HIP& space) {
- TileSizeProperties properties;
- properties.max_threads =
- space.impl_internal_space_instance()->m_maxThreadsPerSM;
- properties.default_largest_tile_size = 16;
- properties.default_tile_size = 4;
- properties.max_total_tile_size =
- Kokkos::Experimental::Impl::HIPTraits::MaxThreadsPerBlock;
- return properties;
-}
-
-} // Namespace Impl
-} // Namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_PARALLEL_MDRANGE_HPP
-#define KOKKOS_HIP_PARALLEL_MDRANGE_HPP
-
-#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
-#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
-#include <HIP/Kokkos_HIP_ReduceScan.hpp>
-#include <KokkosExp_MDRangePolicy.hpp>
-#include <impl/KokkosExp_IterateTileGPU.hpp>
-#include <Kokkos_Parallel.hpp>
-
-namespace Kokkos {
-namespace Impl {
-// ParallelFor
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::Experimental::HIP> {
- public:
- using Policy = Kokkos::MDRangePolicy<Traits...>;
-
- private:
- using array_index_type = typename Policy::array_index_type;
- using index_type = typename Policy::index_type;
- using LaunchBounds = typename Policy::launch_bounds;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- ParallelFor() = delete;
- ParallelFor& operator=(ParallelFor const&) = delete;
-
- public:
- inline __device__ void operator()() const {
- Kokkos::Impl::DeviceIterateTile<Policy::rank, Policy, FunctorType,
- typename Policy::work_tag>(m_policy,
- m_functor)
- .exec_range();
- }
-
- inline void execute() const {
- using ClosureType =
- ParallelFor<FunctorType, Policy, Kokkos::Experimental::HIP>;
- if (m_policy.m_num_tiles == 0) return;
- auto const maxblocks =
- Kokkos::Experimental::Impl::hip_internal_maximum_grid_count();
- if (Policy::rank == 2) {
- dim3 const block(m_policy.m_tile[0], m_policy.m_tile[1], 1);
- dim3 const grid(
- std::min<array_index_type>(
- (m_policy.m_upper[0] - m_policy.m_lower[0] + block.x - 1) /
- block.x,
- maxblocks[0]),
- std::min<array_index_type>(
- (m_policy.m_upper[1] - m_policy.m_lower[1] + block.y - 1) /
- block.y,
- maxblocks[1]),
- 1);
- Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
- LaunchBounds>(
- *this, grid, block, 0,
- m_policy.space().impl_internal_space_instance(), false);
- } else if (Policy::rank == 3) {
- dim3 const block(m_policy.m_tile[0], m_policy.m_tile[1],
- m_policy.m_tile[2]);
- dim3 const grid(
- std::min<array_index_type>(
- (m_policy.m_upper[0] - m_policy.m_lower[0] + block.x - 1) /
- block.x,
- maxblocks[0]),
- std::min<array_index_type>(
- (m_policy.m_upper[1] - m_policy.m_lower[1] + block.y - 1) /
- block.y,
- maxblocks[1]),
- std::min<array_index_type>(
- (m_policy.m_upper[2] - m_policy.m_lower[2] + block.z - 1) /
- block.z,
- maxblocks[2]));
- Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
- LaunchBounds>(
- *this, grid, block, 0,
- m_policy.space().impl_internal_space_instance(), false);
- } else if (Policy::rank == 4) {
- // id0,id1 encoded within threadIdx.x; id2 to threadIdx.y; id3 to
- // threadIdx.z
- dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
- m_policy.m_tile[2], m_policy.m_tile[3]);
- dim3 const grid(
- std::min<array_index_type>(
- m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
- std::min<array_index_type>(
- (m_policy.m_upper[2] - m_policy.m_lower[2] + block.y - 1) /
- block.y,
- maxblocks[1]),
- std::min<array_index_type>(
- (m_policy.m_upper[3] - m_policy.m_lower[3] + block.z - 1) /
- block.z,
- maxblocks[2]));
- Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
- LaunchBounds>(
- *this, grid, block, 0,
- m_policy.space().impl_internal_space_instance(), false);
- } else if (Policy::rank == 5) {
- // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y; id4
- // to threadIdx.z
- dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
- m_policy.m_tile[2] * m_policy.m_tile[3],
- m_policy.m_tile[4]);
- dim3 const grid(
- std::min<array_index_type>(
- m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
- std::min<array_index_type>(
- m_policy.m_tile_end[2] * m_policy.m_tile_end[3], maxblocks[1]),
- std::min<array_index_type>(
- (m_policy.m_upper[4] - m_policy.m_lower[4] + block.z - 1) /
- block.z,
- maxblocks[2]));
- Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
- LaunchBounds>(
- *this, grid, block, 0,
- m_policy.space().impl_internal_space_instance(), false);
- } else if (Policy::rank == 6) {
- // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y;
- // id4,id5 to threadIdx.z
- dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
- m_policy.m_tile[2] * m_policy.m_tile[3],
- m_policy.m_tile[4] * m_policy.m_tile[5]);
- dim3 const grid(
- std::min<array_index_type>(
- m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
- std::min<array_index_type>(
- m_policy.m_tile_end[2] * m_policy.m_tile_end[3], maxblocks[1]),
- std::min<array_index_type>(
- m_policy.m_tile_end[4] * m_policy.m_tile_end[5], maxblocks[2]));
- Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
- LaunchBounds>(
- *this, grid, block, 0,
- m_policy.space().impl_internal_space_instance(), false);
- } else {
- Kokkos::abort("Kokkos::MDRange Error: Exceeded rank bounds with HIP\n");
- }
-
- } // end execute
-
- ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- using closure_type =
- ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::Experimental::HIP>;
- unsigned block_size =
- Kokkos::Experimental::Impl::hip_get_max_blocksize<closure_type,
- LaunchBounds>();
- if (block_size == 0)
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelFor< HIP > could not find a valid "
- "tile size."));
- return block_size;
- }
-};
-
-// ParallelReduce
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::HIP> {
- public:
- using Policy = Kokkos::MDRangePolicy<Traits...>;
-
- private:
- using array_index_type = typename Policy::array_index_type;
- using index_type = typename Policy::index_type;
-
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
- using LaunchBounds = typename Policy::launch_bounds;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis =
- Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
- ReducerTypeFwd>;
-
- public:
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
- using functor_type = FunctorType;
- using size_type = Experimental::HIP::size_type;
-
- // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
- // blockDim.z == 1
-
- const FunctorType m_functor;
- const Policy m_policy; // used for workrange and nwork
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const bool m_result_ptr_device_accessible;
- size_type* m_scratch_space;
- size_type* m_scratch_flags;
- // Only let one Parallel/Scan modify the shared memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::lock_guard<std::mutex> m_shared_memory_lock;
-
- using DeviceIteratePattern = typename Kokkos::Impl::Reduce::DeviceIterateTile<
- Policy::rank, Policy, FunctorType, WorkTag, reference_type>;
-
- public:
- inline __device__ void exec_range(reference_type update) const {
- DeviceIteratePattern(m_policy, m_functor, update).exec_range();
- }
-
- inline __device__ void operator()() const {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)) /
- sizeof(size_type));
-
- {
- reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
- Experimental::kokkos_impl_hip_shared_memory<size_type>() +
- threadIdx.y * word_count.value));
-
- // Number of blocks is bounded so that the reduction can be limited to two
- // passes. Each thread block is given an approximately equal amount of
- // work to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmatically
- // equivalent.
-
- this->exec_range(value);
- }
-
- // Reduce with final value at blockDim.y - 1 location.
- // Problem: non power-of-two blockDim
- if (::Kokkos::Impl::hip_single_inter_block_reduce_scan<false>(
- final_reducer, blockIdx.x, gridDim.x,
- Experimental::kokkos_impl_hip_shared_memory<size_type>(),
- m_scratch_space, m_scratch_flags)) {
- // This is the final block with the final result at the final threads'
- // location
- size_type* const shared =
- Experimental::kokkos_impl_hip_shared_memory<size_type>() +
- (blockDim.y - 1) * word_count.value;
- size_type* const global = m_result_ptr_device_accessible
- ? reinterpret_cast<size_type*>(m_result_ptr)
- : m_scratch_space;
-
- if (threadIdx.y == 0) {
- final_reducer.final(reinterpret_cast<value_type*>(shared));
- }
-
- if (Experimental::Impl::HIPTraits::WarpSize < word_count.value) {
- __syncthreads();
- }
-
- for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
- global[i] = shared[i];
- }
- }
- }
-
- // Determine block size constrained by shared memory:
- // This is copy/paste from Kokkos_HIP_Parallel_Range
- inline unsigned local_block_size(const FunctorType& f) {
- const auto& instance = m_policy.space().impl_internal_space_instance();
- auto shmem_functor = [&f](unsigned n) {
- return hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(f, n);
- };
- using closure_type = ParallelReduce<FunctorType, Policy, ReducerType,
- Kokkos::Experimental::HIP>;
-
- unsigned block_size =
- Kokkos::Experimental::Impl::hip_get_preferred_blocksize<closure_type,
- LaunchBounds>(
- instance, shmem_functor);
- if (block_size == 0) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
- "valid tile size."));
- }
- return block_size;
- }
-
- inline void execute() {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- using ClosureType = ParallelReduce<FunctorType, Policy, ReducerType,
- Kokkos::Experimental::HIP>;
- const auto nwork = m_policy.m_num_tiles;
- if (nwork) {
- int block_size = m_policy.m_prod_tile_dims;
- // CONSTRAINT: Algorithm requires block_size >= product of tile dimensions
- // Nearest power of two
- int exponent_pow_two = std::ceil(std::log2(block_size));
- block_size = std::pow(2, exponent_pow_two);
- int suggested_blocksize = local_block_size(m_functor);
-
- block_size = (block_size > suggested_blocksize)
- ? block_size
- : suggested_blocksize; // Note: block_size must be less
- // than or equal to 512
-
- m_scratch_space =
- ::Kokkos::Experimental::Impl::hip_internal_scratch_space(
- m_policy.space(),
- Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)) *
- block_size /* block_size == max block_count */);
- m_scratch_flags =
- ::Kokkos::Experimental::Impl::hip_internal_scratch_flags(
- m_policy.space(), sizeof(size_type));
-
- // REQUIRED ( 1 , N , 1 )
- const dim3 block(1, block_size, 1);
- // Required grid.x <= block.y
- const dim3 grid(std::min(static_cast<uint32_t>(block.y),
- static_cast<uint32_t>(nwork)),
- 1, 1);
-
- const int shmem =
- ::Kokkos::Impl::hip_single_inter_block_reduce_scan_shmem<
- false, FunctorType, WorkTag>(m_functor, block.y);
-
- Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
- LaunchBounds>(
- *this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
-
- if (!m_result_ptr_device_accessible && m_result_ptr) {
- const int size = Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer));
- DeepCopy<HostSpace, Experimental::HIPSpace, Experimental::HIP>(
- m_policy.space(), m_result_ptr, m_scratch_space, size);
- }
- } else {
- if (m_result_ptr) {
- final_reducer.init(m_result_ptr);
- }
- }
- }
-
- template <class ViewType>
- ParallelReduce(
- const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- typename ViewType::memory_space>::accessible),
- m_scratch_space(nullptr),
- m_scratch_flags(nullptr),
- m_shared_memory_lock(m_policy.space()
- .impl_internal_space_instance()
- ->m_mutexSharedMemory) {}
-
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_scratch_space(nullptr),
- m_scratch_flags(nullptr),
- m_shared_memory_lock(m_policy.space()
- .impl_internal_space_instance()
- ->m_mutexSharedMemory) {}
-
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- using closure_type =
- ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- ReducerType, Kokkos::Experimental::HIP>;
- unsigned block_size =
- Kokkos::Experimental::Impl::hip_get_max_blocksize<closure_type,
- LaunchBounds>();
- if (block_size == 0) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
- "valid tile size."));
- }
- return block_size;
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKO_HIP_PARALLEL_RANGE_HPP
-#define KOKKO_HIP_PARALLEL_RANGE_HPP
-
-#include <Kokkos_Parallel.hpp>
-
-#if defined(__HIPCC__)
-
-#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
-#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
-#include <HIP/Kokkos_HIP_ReduceScan.hpp>
-#include <HIP/Kokkos_HIP_Shuffle_Reduce.hpp>
-#include <impl/Kokkos_Traits.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::HIP> {
- public:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- private:
- using Member = typename Policy::member_type;
- using WorkTag = typename Policy::work_tag;
- using LaunchBounds = typename Policy::launch_bounds;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- ParallelFor() = delete;
- ParallelFor& operator=(const ParallelFor&) = delete;
-
- template <class TagType>
- inline __device__ std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const Member i) const {
- m_functor(i);
- }
-
- template <class TagType>
- inline __device__ std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const Member i) const {
- m_functor(TagType(), i);
- }
-
- public:
- using functor_type = FunctorType;
-
- inline __device__ void operator()() const {
- const Member work_stride = blockDim.y * gridDim.x;
- const Member work_end = m_policy.end();
-
- for (Member iwork =
- m_policy.begin() + threadIdx.y + blockDim.y * blockIdx.x;
- iwork < work_end;
- iwork = iwork < work_end - work_stride ? iwork + work_stride
- : work_end) {
- this->template exec_range<WorkTag>(iwork);
- }
- }
-
- inline void execute() const {
- const typename Policy::index_type nwork = m_policy.end() - m_policy.begin();
-
- using DriverType =
- ParallelFor<FunctorType, Policy, Kokkos::Experimental::HIP>;
- const int block_size =
- Kokkos::Experimental::Impl::hip_get_preferred_blocksize<DriverType,
- LaunchBounds>();
- const dim3 block(1, block_size, 1);
- const dim3 grid(
- typename Policy::index_type((nwork + block.y - 1) / block.y), 1, 1);
-
- if (block_size == 0) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelFor< HIP > could not find a "
- "valid execution configuration."));
- }
- Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
- *this, grid, block, 0, m_policy.space().impl_internal_space_instance(),
- false);
- }
-
- ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-};
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::HIP> {
- public:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- private:
- using WorkRange = typename Policy::WorkRange;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
- using LaunchBounds = typename Policy::launch_bounds;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis =
- Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
- ReducerTypeFwd>;
-
- public:
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
- using functor_type = FunctorType;
- using size_type = Kokkos::Experimental::HIP::size_type;
- using index_type = typename Policy::index_type;
-
- // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
- // blockDim.z == 1
-
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const bool m_result_ptr_device_accessible;
- const bool m_result_ptr_host_accessible;
- size_type* m_scratch_space = nullptr;
- size_type* m_scratch_flags = nullptr;
- // Only let one ParallelReduce/Scan modify the shared memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::lock_guard<std::mutex> m_shared_memory_lock;
-
- static bool constexpr UseShflReduction =
- static_cast<bool>(Analysis::StaticValueSize);
-
- private:
- struct ShflReductionTag {};
- struct SHMEMReductionTag {};
-
- // Make the exec_range calls call to Reduce::DeviceIterateTile
- template <class TagType>
- __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const Member& i, reference_type update) const {
- m_functor(i, update);
- }
-
- template <class TagType>
- __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const Member& i, reference_type update) const {
- m_functor(TagType(), i, update);
- }
-
- public:
- __device__ inline void operator()() const {
- using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
- SHMEMReductionTag>;
- run(ReductionTag{});
- }
-
- __device__ inline void run(SHMEMReductionTag) const {
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)) /
- sizeof(size_type));
-
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
- {
- reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
- ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
- threadIdx.y * word_count.value));
-
- // Number of blocks is bounded so that the reduction can be limited to two
- // passes. Each thread block is given an approximately equal amount of
- // work to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmetically
- // equivalent.
-
- const WorkRange range(m_policy, blockIdx.x, gridDim.x);
-
- for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
- iwork < iwork_end; iwork += blockDim.y) {
- this->template exec_range<WorkTag>(iwork, value);
- }
- }
-
- // Reduce with final value at blockDim.y - 1 location.
- // Shortcut for length zero reduction
- bool do_final_reduction = m_policy.begin() == m_policy.end();
- if (!do_final_reduction)
- do_final_reduction = hip_single_inter_block_reduce_scan<false>(
- final_reducer, blockIdx.x, gridDim.x,
- ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>(),
- m_scratch_space, m_scratch_flags);
- if (do_final_reduction) {
- // This is the final block with the final result at the final threads'
- // location
-
- size_type* const shared =
- ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
- (blockDim.y - 1) * word_count.value;
- size_type* const global = m_result_ptr_device_accessible
- ? reinterpret_cast<size_type*>(m_result_ptr)
- : m_scratch_space;
-
- if (threadIdx.y == 0) {
- final_reducer.final(reinterpret_cast<value_type*>(shared));
- }
-
- if (::Kokkos::Experimental::Impl::HIPTraits::WarpSize <
- word_count.value) {
- __syncthreads();
- }
-
- for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
- global[i] = shared[i];
- }
- }
- }
-
- __device__ inline void run(ShflReductionTag) const {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- value_type value;
- final_reducer.init(&value);
- // Number of blocks is bounded so that the reduction can be limited to two
- // passes. Each thread block is given an approximately equal amount of work
- // to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmetically equivalent.
-
- WorkRange const range(m_policy, blockIdx.x, gridDim.x);
-
- for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
- iwork < iwork_end; iwork += blockDim.y) {
- this->template exec_range<WorkTag>(iwork, value);
- }
-
- pointer_type const result = reinterpret_cast<pointer_type>(m_scratch_space);
-
- int max_active_thread = static_cast<int>(range.end() - range.begin()) <
- static_cast<int>(blockDim.y)
- ? range.end() - range.begin()
- : blockDim.y;
-
- max_active_thread =
- (max_active_thread == 0) ? blockDim.y : max_active_thread;
-
- value_type init;
- final_reducer.init(&init);
- if (m_policy.begin() == m_policy.end()) {
- final_reducer.final(&value);
- pointer_type const final_result =
- m_result_ptr_device_accessible ? m_result_ptr : result;
- *final_result = value;
- } else if (Impl::hip_inter_block_shuffle_reduction<>(
- value, init, final_reducer, m_scratch_space, result,
- m_scratch_flags, max_active_thread)) {
- unsigned int const id = threadIdx.y * blockDim.x + threadIdx.x;
- if (id == 0) {
- final_reducer.final(&value);
- pointer_type const final_result =
- m_result_ptr_device_accessible ? m_result_ptr : result;
- *final_result = value;
- }
- }
- }
-
- // Determine block size constrained by shared memory:
- inline unsigned local_block_size(const FunctorType& f) {
- const auto& instance = m_policy.space().impl_internal_space_instance();
- auto shmem_functor = [&f](unsigned n) {
- return hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(f, n);
- };
- using DriverType = ParallelReduce<FunctorType, Policy, ReducerType,
- Kokkos::Experimental::HIP>;
- return Kokkos::Experimental::Impl::hip_get_preferred_blocksize<
- DriverType, LaunchBounds>(instance, shmem_functor);
- }
-
- inline void execute() {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- const index_type nwork = m_policy.end() - m_policy.begin();
- const bool need_device_set = Analysis::has_init_member_function ||
- Analysis::has_final_member_function ||
- !m_result_ptr_host_accessible ||
- !std::is_same<ReducerType, InvalidType>::value;
- if ((nwork > 0) || need_device_set) {
- const int block_size = local_block_size(m_functor);
- if (block_size == 0) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
- "valid execution configuration."));
- }
-
- m_scratch_space =
- ::Kokkos::Experimental::Impl::hip_internal_scratch_space(
- m_policy.space(),
- Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)) *
- block_size /* block_size == max block_count */);
- m_scratch_flags =
- ::Kokkos::Experimental::Impl::hip_internal_scratch_flags(
- m_policy.space(), sizeof(size_type));
-
- // REQUIRED ( 1 , N , 1 )
- dim3 block(1, block_size, 1);
- // Required grid.x <= block.y
- dim3 grid(std::min(block.y, static_cast<uint32_t>((nwork + block.y - 1) /
- block.y)),
- 1, 1);
-
- if (nwork == 0) {
- block = dim3(1, 1, 1);
- grid = dim3(1, 1, 1);
- }
- const int shmem =
- UseShflReduction
- ? 0
- : hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(m_functor,
- block.y);
-
- using DriverType = ParallelReduce<FunctorType, Policy, ReducerType,
- Kokkos::Experimental::HIP>;
- Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
- *this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
-
- if (!m_result_ptr_device_accessible && m_result_ptr) {
- const int size = Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer));
- DeepCopy<HostSpace, ::Kokkos::Experimental::HIPSpace,
- ::Kokkos::Experimental::HIP>(m_policy.space(), m_result_ptr,
- m_scratch_space, size);
- }
- } else {
- if (m_result_ptr) {
- final_reducer.init(m_result_ptr);
- }
- }
- }
-
- template <class ViewType>
- ParallelReduce(
- const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- typename ViewType::memory_space>::accessible),
- m_result_ptr_host_accessible(
- MemorySpaceAccess<Kokkos::HostSpace,
- typename ViewType::memory_space>::accessible),
- m_shared_memory_lock(m_policy.space()
- .impl_internal_space_instance()
- ->m_mutexSharedMemory) {}
-
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_result_ptr_host_accessible(
- MemorySpaceAccess<Kokkos::HostSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_shared_memory_lock(m_policy.space()
- .impl_internal_space_instance()
- ->m_mutexSharedMemory) {}
-};
-
-template <class FunctorType, class... Traits>
-class ParallelScanHIPBase {
- public:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- protected:
- using Member = typename Policy::member_type;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using LaunchBounds = typename Policy::launch_bounds;
-
- using Analysis = Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN,
- Policy, FunctorType>;
-
- public:
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
- using functor_type = FunctorType;
- using size_type = Kokkos::Experimental::HIP::size_type;
- using index_type = typename Policy::index_type;
-
- protected:
- // Algorithmic constraints:
- // (a) blockDim.y is a power of two
- // (b) blockDim.x == blockDim.z == 1
- // (c) gridDim.x <= blockDim.y * blockDim.y
- // (d) gridDim.y == gridDim.z == 1
-
- const FunctorType m_functor;
- const Policy m_policy;
- size_type* m_scratch_space = nullptr;
- size_type* m_scratch_flags = nullptr;
- size_type m_final = false;
- int m_grid_x = 0;
- // Only let one ParallelReduce/Scan modify the shared memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::lock_guard<std::mutex> m_shared_memory_lock;
-
- private:
- template <class TagType>
- __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const Member& i, reference_type update, const bool final_result) const {
- m_functor(i, update, final_result);
- }
-
- template <class TagType>
- __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const Member& i, reference_type update, const bool final_result) const {
- m_functor(TagType(), i, update, final_result);
- }
-
- //----------------------------------------
-
- __device__ inline void initial() const {
- typename Analysis::Reducer final_reducer(&m_functor);
-
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(m_functor) / sizeof(size_type));
-
- pointer_type const shared_value = reinterpret_cast<pointer_type>(
- Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
- word_count.value * threadIdx.y);
-
- final_reducer.init(shared_value);
-
- // Number of blocks is bounded so that the reduction can be limited to two
- // passes. Each thread block is given an approximately equal amount of work
- // to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmetically equivalent.
-
- const WorkRange range(m_policy, blockIdx.x, gridDim.x);
-
- for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
- iwork < iwork_end; iwork += blockDim.y) {
- this->template exec_range<WorkTag>(
- iwork, final_reducer.reference(shared_value), false);
- }
-
- // Reduce and scan, writing out scan of blocks' totals and block-groups'
- // totals. Blocks' scan values are written to 'blockIdx.x' location.
- // Block-groups' scan values are at: i = ( j * blockDim.y - 1 ) for i <
- // gridDim.x
- hip_single_inter_block_reduce_scan<true>(
- final_reducer, blockIdx.x, gridDim.x,
- Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>(),
- m_scratch_space, m_scratch_flags);
- }
-
- //----------------------------------------
-
- __device__ inline void final() const {
- typename Analysis::Reducer final_reducer(&m_functor);
-
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(m_functor) / sizeof(size_type));
-
- // Use shared memory as an exclusive scan: { 0 , value[0] , value[1] ,
- // value[2] , ... }
- size_type* const shared_data =
- Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>();
- size_type* const shared_prefix =
- shared_data + word_count.value * threadIdx.y;
- size_type* const shared_accum =
- shared_data + word_count.value * (blockDim.y + 1);
-
- // Starting value for this thread block is the previous block's total.
- if (blockIdx.x) {
- size_type* const block_total =
- m_scratch_space + word_count.value * (blockIdx.x - 1);
- for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
- shared_accum[i] = block_total[i];
- }
- } else if (0 == threadIdx.y) {
- final_reducer.init(reinterpret_cast<pointer_type>(shared_accum));
- }
-
- const WorkRange range(m_policy, blockIdx.x, gridDim.x);
-
- for (typename Policy::member_type iwork_base = range.begin();
- iwork_base < range.end(); iwork_base += blockDim.y) {
- const typename Policy::member_type iwork = iwork_base + threadIdx.y;
-
- __syncthreads(); // Don't overwrite previous iteration values until they
- // are used
-
- final_reducer.init(
- reinterpret_cast<pointer_type>(shared_prefix + word_count.value));
-
- // Copy previous block's accumulation total into thread[0] prefix and
- // inclusive scan value of this block
- for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
- shared_data[i + word_count.value] = shared_data[i] = shared_accum[i];
- }
-
- // Make sure the write is seen by all threads
- __threadfence_block();
-
- // Call functor to accumulate inclusive scan value for this work item
- const bool doWork = (iwork < range.end());
- if (doWork) {
- this->template exec_range<WorkTag>(
- iwork,
- final_reducer.reference(reinterpret_cast<pointer_type>(
- shared_prefix + word_count.value)),
- false);
- }
-
- // Scan block values into locations shared_data[1..blockDim.y]
- hip_intra_block_reduce_scan<true>(
- final_reducer,
- typename Analysis::pointer_type(shared_data + word_count.value));
-
- {
- size_type* const block_total =
- shared_data + word_count.value * blockDim.y;
- for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
- shared_accum[i] = block_total[i];
- }
- }
-
- // Call functor with exclusive scan value
- if (doWork) {
- this->template exec_range<WorkTag>(
- iwork,
- final_reducer.reference(
- reinterpret_cast<pointer_type>(shared_prefix)),
- true);
- }
- }
- }
-
- public:
- //----------------------------------------
-
- __device__ inline void operator()() const {
- if (!m_final) {
- initial();
- } else {
- final();
- }
- }
-
- // Determine block size constrained by shared memory:
- virtual inline unsigned local_block_size(const FunctorType& f) = 0;
-
- inline void impl_execute() {
- const index_type nwork = m_policy.end() - m_policy.begin();
- if (nwork) {
- // FIXME_HIP we cannot choose it larger for large work sizes to work
- // correctly, the unit tests fail with wrong results
- const int gridMaxComputeCapability_2x = 0x01fff;
-
- const int block_size = static_cast<int>(local_block_size(m_functor));
- if (block_size == 0) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelScan< HIP > could not find a "
- "valid execution configuration."));
- }
-
- const int grid_max =
- std::min(block_size * block_size, gridMaxComputeCapability_2x);
-
- // At most 'max_grid' blocks:
- const int max_grid =
- std::min<int>(grid_max, (nwork + block_size - 1) / block_size);
-
- // How much work per block:
- const int work_per_block = (nwork + max_grid - 1) / max_grid;
-
- // How many block are really needed for this much work:
- m_grid_x = (nwork + work_per_block - 1) / work_per_block;
-
- m_scratch_space = Kokkos::Experimental::Impl::hip_internal_scratch_space(
- m_policy.space(), Analysis::value_size(m_functor) * m_grid_x);
- m_scratch_flags = Kokkos::Experimental::Impl::hip_internal_scratch_flags(
- m_policy.space(), sizeof(size_type) * 1);
-
- dim3 grid(m_grid_x, 1, 1);
- dim3 block(1, block_size, 1); // REQUIRED DIMENSIONS ( 1 , N , 1 )
- const int shmem = Analysis::value_size(m_functor) * (block_size + 2);
-
- m_final = false;
- // these ones are OK to be just the base because the specializations
- // do not modify the kernel at all
- using DriverType = ParallelScanHIPBase<FunctorType, Traits...>;
- Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
- *this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
-
- m_final = true;
- Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
- *this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
- }
- }
-
- ParallelScanHIPBase(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_shared_memory_lock(m_policy.space()
- .impl_internal_space_instance()
- ->m_mutexSharedMemory) {}
-};
-
-template <class FunctorType, class... Traits>
-class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::HIP>
- : public ParallelScanHIPBase<FunctorType, Traits...> {
- public:
- using Base = ParallelScanHIPBase<FunctorType, Traits...>;
- using Base::operator();
-
- inline void execute() { Base::impl_execute(); }
-
- ParallelScan(const FunctorType& arg_functor,
- const typename Base::Policy& arg_policy)
- : Base(arg_functor, arg_policy) {}
-
- inline unsigned local_block_size(const FunctorType& f) {
- // blockDim.y must be power of two = 128 (2 warps) or 256 (4 warps) or
- // 512 (8 warps) gridDim.x <= blockDim.y * blockDim.y
-
- const auto& instance =
- Base::m_policy.space().impl_internal_space_instance();
- auto shmem_functor = [&f](unsigned n) {
- return hip_single_inter_block_reduce_scan_shmem<true, FunctorType,
- typename Base::WorkTag>(
- f, n);
- };
- using DriverType = ParallelScan<FunctorType, typename Base::Policy,
- Kokkos::Experimental::HIP>;
- return Kokkos::Experimental::Impl::hip_get_preferred_blocksize<
- DriverType, typename Base::LaunchBounds>(instance, shmem_functor);
- }
-};
-
-//----------------------------------------------------------------------------
-
-template <class FunctorType, class ReturnType, class... Traits>
-class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
- ReturnType, Kokkos::Experimental::HIP>
- : public ParallelScanHIPBase<FunctorType, Traits...> {
- public:
- using Base = ParallelScanHIPBase<FunctorType, Traits...>;
- using Base::operator();
-
- ReturnType& m_returnvalue;
-
- inline void execute() {
- Base::impl_execute();
-
- const auto nwork = Base::m_policy.end() - Base::m_policy.begin();
- if (nwork) {
- const int size = Base::Analysis::value_size(Base::m_functor);
- DeepCopy<HostSpace, Kokkos::Experimental::HIPSpace,
- Kokkos::Experimental::HIP>(
- Base::m_policy.space(), &m_returnvalue,
- Base::m_scratch_space + (Base::m_grid_x - 1) * size / sizeof(int),
- size);
- }
- }
-
- ParallelScanWithTotal(const FunctorType& arg_functor,
- const typename Base::Policy& arg_policy,
- ReturnType& arg_returnvalue)
- : Base(arg_functor, arg_policy), m_returnvalue(arg_returnvalue) {}
-
- inline unsigned local_block_size(const FunctorType& f) {
- // blockDim.y must be power of two = 128 (2 warps) or 256 (4 warps) or
- // 512 (8 warps) gridDim.x <= blockDim.y * blockDim.y
-
- const auto& instance =
- Base::m_policy.space().impl_internal_space_instance();
- auto shmem_functor = [&f](unsigned n) {
- return hip_single_inter_block_reduce_scan_shmem<true, FunctorType,
- typename Base::WorkTag>(
- f, n);
- };
- using DriverType =
- ParallelScanWithTotal<FunctorType, typename Base::Policy, ReturnType,
- Kokkos::Experimental::HIP>;
- return Kokkos::Experimental::Impl::hip_get_preferred_blocksize<
- DriverType, typename Base::LaunchBounds>(instance, shmem_functor);
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKO_HIP_PARALLEL_TEAM_HPP
-#define KOKKO_HIP_PARALLEL_TEAM_HPP
-
-#include <Kokkos_Parallel.hpp>
-
-#if defined(__HIPCC__)
-
-#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
-#include <HIP/Kokkos_HIP_Locks.hpp>
-#include <HIP/Kokkos_HIP_Team.hpp>
-#include <HIP/Kokkos_HIP_Instance.hpp>
-#include <Kokkos_MinMaxClamp.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <typename... Properties>
-class TeamPolicyInternal<Kokkos::Experimental::HIP, Properties...>
- : public PolicyTraits<Properties...> {
- public:
- using execution_policy = TeamPolicyInternal;
-
- using traits = PolicyTraits<Properties...>;
- using BlockType = Kokkos::Experimental::Impl::BlockType;
-
- template <typename ExecSpace, typename... OtherProperties>
- friend class TeamPolicyInternal;
-
- private:
- typename traits::execution_space m_space;
- int m_league_size;
- int m_team_size;
- int m_vector_length;
- size_t m_team_scratch_size[2];
- size_t m_thread_scratch_size[2];
- int m_chunk_size;
- bool m_tune_team_size;
- bool m_tune_vector_length;
-
- public:
- using execution_space = Kokkos::Experimental::HIP;
-
- template <class... OtherProperties>
- TeamPolicyInternal(TeamPolicyInternal<OtherProperties...> const& p) {
- m_league_size = p.m_league_size;
- m_team_size = p.m_team_size;
- m_vector_length = p.m_vector_length;
- m_team_scratch_size[0] = p.m_team_scratch_size[0];
- m_team_scratch_size[1] = p.m_team_scratch_size[1];
- m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
- m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
- m_chunk_size = p.m_chunk_size;
- m_space = p.m_space;
- m_tune_team_size = p.m_tune_team_size;
- m_tune_vector_length = p.m_tune_vector_length;
- }
-
- template <typename FunctorType>
- int team_size_max(FunctorType const& f, ParallelForTag const&) const {
- using closure_type =
- Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
-
- return internal_team_size_common<BlockType::Max, closure_type>(f);
- }
-
- template <class FunctorType>
- inline int team_size_max(const FunctorType& f,
- const ParallelReduceTag&) const {
- using functor_analysis_type =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- TeamPolicyInternal, FunctorType>;
- using reducer_type = typename Impl::ParallelReduceReturnValue<
- void, typename functor_analysis_type::value_type,
- FunctorType>::reducer_type;
- using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- reducer_type>;
- return internal_team_size_max<closure_type>(f);
- }
-
- template <typename FunctorType, typename ReducerType>
- inline int team_size_max(const FunctorType& f, const ReducerType&,
- const ParallelReduceTag&) const {
- using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- ReducerType>;
- return internal_team_size_max<closure_type>(f);
- }
-
- template <typename FunctorType>
- int team_size_recommended(FunctorType const& f, ParallelForTag const&) const {
- using closure_type =
- Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
-
- return internal_team_size_common<BlockType::Preferred, closure_type>(f);
- }
-
- template <typename FunctorType>
- inline int team_size_recommended(FunctorType const& f,
- ParallelReduceTag const&) const {
- using functor_analysis_type =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- TeamPolicyInternal, FunctorType>;
- using reducer_type = typename Impl::ParallelReduceReturnValue<
- void, typename functor_analysis_type::value_type,
- FunctorType>::reducer_type;
- using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- reducer_type>;
- return internal_team_size_recommended<closure_type>(f);
- }
-
- template <typename FunctorType, typename ReducerType>
- int team_size_recommended(FunctorType const& f, ReducerType const&,
- ParallelReduceTag const&) const {
- using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- ReducerType>;
- return internal_team_size_recommended<closure_type>(f);
- }
-
- inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
- inline bool impl_auto_team_size() const { return m_tune_team_size; }
- static int vector_length_max() {
- return ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
- }
-
- static int verify_requested_vector_length(int requested_vector_length) {
- int test_vector_length =
- std::min(requested_vector_length, vector_length_max());
-
- // Allow only power-of-two vector_length
- if (!(is_integral_power_of_two(test_vector_length))) {
- int test_pow2 = 1;
- int constexpr warp_size = Experimental::Impl::HIPTraits::WarpSize;
- while (test_pow2 < warp_size) {
- test_pow2 <<= 1;
- if (test_pow2 > test_vector_length) {
- break;
- }
- }
- test_vector_length = test_pow2 >> 1;
- }
-
- return test_vector_length;
- }
-
- static int scratch_size_max(int level) {
- return (
- level == 0 ? 1024 * 40 : // FIXME_HIP arbitrarily setting this to 48kB
- 20 * 1024 * 1024); // FIXME_HIP arbitrarily setting this to 20MB
- }
- inline void impl_set_vector_length(size_t size) { m_vector_length = size; }
- inline void impl_set_team_size(size_t size) { m_team_size = size; }
- int impl_vector_length() const { return m_vector_length; }
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- KOKKOS_DEPRECATED int vector_length() const { return impl_vector_length(); }
-#endif
-
- int team_size() const { return m_team_size; }
-
- int league_size() const { return m_league_size; }
-
- size_t scratch_size(int level, int team_size_ = -1) const {
- if (team_size_ < 0) team_size_ = m_team_size;
- return m_team_scratch_size[level] +
- team_size_ * m_thread_scratch_size[level];
- }
-
- size_t team_scratch_size(int level) const {
- return m_team_scratch_size[level];
- }
-
- size_t thread_scratch_size(int level) const {
- return m_thread_scratch_size[level];
- }
-
- typename traits::execution_space space() const { return m_space; }
-
- TeamPolicyInternal()
- : m_space(typename traits::execution_space()),
- m_league_size(0),
- m_team_size(-1),
- m_vector_length(0),
- m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(::Kokkos::Experimental::Impl::HIPTraits::WarpSize),
- m_tune_team_size(false),
- m_tune_vector_length(false) {}
-
- /** \brief Specify league size, request team size */
- TeamPolicyInternal(const execution_space space_, int league_size_,
- int team_size_request, int vector_length_request = 1)
- : m_space(space_),
- m_league_size(league_size_),
- m_team_size(team_size_request),
- m_vector_length(
- (vector_length_request > 0)
- ? verify_requested_vector_length(vector_length_request)
- : (verify_requested_vector_length(1))),
- m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(::Kokkos::Experimental::Impl::HIPTraits::WarpSize),
- m_tune_team_size(bool(team_size_request <= 0)),
- m_tune_vector_length(bool(vector_length_request <= 0)) {
- // Make sure league size is permissible
- if (league_size_ >=
- static_cast<int>(
- ::Kokkos::Experimental::Impl::hip_internal_maximum_grid_count()[0]))
- Impl::throw_runtime_exception(
- "Requested too large league_size for TeamPolicy on HIP execution "
- "space.");
-
- // Make sure total block size is permissible
- if (m_team_size * m_vector_length >
- ::Kokkos::Experimental::Impl::HIPTraits::MaxThreadsPerBlock) {
- Impl::throw_runtime_exception(
- std::string("Kokkos::TeamPolicy< HIP > the team size is too large. "
- "Team size x vector length must be smaller than 1024."));
- }
- }
-
- /** \brief Specify league size, request team size */
- TeamPolicyInternal(const execution_space space_, int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- int vector_length_request = 1)
- : TeamPolicyInternal(space_, league_size_, -1, vector_length_request) {}
- // FLAG
- /** \brief Specify league size and team size, request vector length*/
- TeamPolicyInternal(const execution_space space_, int league_size_,
- int team_size_request,
- const Kokkos::AUTO_t& /* vector_length_request */
- )
- : TeamPolicyInternal(space_, league_size_, team_size_request, -1)
-
- {}
-
- /** \brief Specify league size, request team size and vector length*/
- TeamPolicyInternal(const execution_space space_, int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- const Kokkos::AUTO_t& /* vector_length_request */
-
- )
- : TeamPolicyInternal(space_, league_size_, -1, -1)
-
- {}
-
- TeamPolicyInternal(int league_size_, int team_size_request,
- int vector_length_request = 1)
- : TeamPolicyInternal(typename traits::execution_space(), league_size_,
- team_size_request, vector_length_request) {}
-
- TeamPolicyInternal(int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- int vector_length_request = 1)
- : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
- vector_length_request) {}
-
- /** \brief Specify league size and team size, request vector length*/
- TeamPolicyInternal(int league_size_, int team_size_request,
- const Kokkos::AUTO_t& /* vector_length_request */
-
- )
- : TeamPolicyInternal(typename traits::execution_space(), league_size_,
- team_size_request, -1)
-
- {}
-
- /** \brief Specify league size, request team size and vector length*/
- TeamPolicyInternal(int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- const Kokkos::AUTO_t& /* vector_length_request */
-
- )
- : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
- -1) {}
-
- int chunk_size() const { return m_chunk_size; }
-
- TeamPolicyInternal& set_chunk_size(typename traits::index_type chunk_size_) {
- m_chunk_size = chunk_size_;
- return *this;
- }
-
- /** \brief set per team scratch size for a specific level of the scratch
- * hierarchy */
- TeamPolicyInternal& set_scratch_size(int level,
- PerTeamValue const& per_team) {
- m_team_scratch_size[level] = per_team.value;
- return *this;
- }
-
- /** \brief set per thread scratch size for a specific level of the scratch
- * hierarchy */
- TeamPolicyInternal& set_scratch_size(int level,
- PerThreadValue const& per_thread) {
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-
- /** \brief set per thread and per team scratch size for a specific level of
- * the scratch hierarchy */
- TeamPolicyInternal& set_scratch_size(int level, PerTeamValue const& per_team,
- PerThreadValue const& per_thread) {
- m_team_scratch_size[level] = per_team.value;
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-
- using member_type = Kokkos::Impl::HIPTeamMember;
-
- protected:
- template <BlockType BlockSize, class ClosureType, class FunctorType>
- int internal_team_size_common(const FunctorType& f) const {
- // FIXME_HIP: this could be unified with the
- // internal_team_size_common_reduce
- // once we can turn c++17 constexpr on by default.
- // The problem right now is that we can't turn off the evaluation
- // of the Analysis' valuesize / StaticValueSize
-
- const unsigned shmem_block = team_scratch_size(0) + 2 * sizeof(double);
- const unsigned shmem_thread = thread_scratch_size(0) + sizeof(double);
- const int vector_length = impl_vector_length();
-
- const auto functor = [&f, shmem_block, shmem_thread, vector_length](
- const hipFuncAttributes& attr, int block_size) {
- int functor_shmem =
- ::Kokkos::Impl::FunctorTeamShmemSize<FunctorType>::value(
- f, block_size / vector_length);
- return shmem_block + shmem_thread * (block_size / vector_length) +
- functor_shmem + attr.sharedSizeBytes;
- };
- int block_size;
- // FIXME_HIP - could be if constexpr for c++17
- if (BlockSize == BlockType::Max) {
- block_size = ::Kokkos::Experimental::Impl::hip_get_max_team_blocksize<
- ClosureType, typename traits::launch_bounds>(
- space().impl_internal_space_instance(), functor);
- } else {
- block_size =
- ::Kokkos::Experimental::Impl::hip_get_preferred_team_blocksize<
- ClosureType, typename traits::launch_bounds>(
- space().impl_internal_space_instance(), functor);
- }
- if (block_size == 0) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelFor< HIP > could not find a valid "
- "team size."));
- }
- return block_size / impl_vector_length();
- }
-
- template <BlockType BlockSize, class ClosureType, class FunctorType>
- int internal_team_size_common_reduce(const FunctorType& f) const {
- using Interface =
- typename Impl::DeduceFunctorPatternInterface<ClosureType>::type;
- using Analysis =
- Impl::FunctorAnalysis<Interface, typename ClosureType::Policy,
- FunctorType>;
-
- const unsigned shmem_block = team_scratch_size(0) + 2 * sizeof(double);
- const unsigned shmem_thread =
- thread_scratch_size(0) + sizeof(double) +
- ((Analysis::StaticValueSize != 0) ? 0 : Analysis::value_size(f));
- const int vector_length = impl_vector_length();
-
- const auto functor = [&f, shmem_block, shmem_thread, vector_length](
- const hipFuncAttributes& attr, int block_size) {
- int functor_shmem =
- ::Kokkos::Impl::FunctorTeamShmemSize<FunctorType>::value(
- f, block_size / vector_length);
- return shmem_block + shmem_thread * (block_size / vector_length) +
- functor_shmem + attr.sharedSizeBytes;
- };
- int block_size;
- // FIXME_HIP - could be if constexpr for c++17
- if (BlockSize == BlockType::Max) {
- block_size = ::Kokkos::Experimental::Impl::hip_get_max_team_blocksize<
- ClosureType, typename traits::launch_bounds>(
- space().impl_internal_space_instance(), functor);
- } else {
- block_size =
- ::Kokkos::Experimental::Impl::hip_get_preferred_team_blocksize<
- ClosureType, typename traits::launch_bounds>(
- space().impl_internal_space_instance(), functor);
- }
-
- if (block_size == 0) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
- "valid team size."));
- }
- // Currently we require Power-of-2 team size for reductions.
- int p2 = 1;
- while (p2 <= block_size) p2 *= 2;
- p2 /= 2;
- return p2 / impl_vector_length();
- }
-
- template <class ClosureType, class FunctorType>
- int internal_team_size_max(const FunctorType& f) const {
- return internal_team_size_common_reduce<BlockType::Max, ClosureType>(f);
- }
-
- template <class ClosureType, class FunctorType>
- int internal_team_size_recommended(const FunctorType& f) const {
- return internal_team_size_common_reduce<BlockType::Preferred, ClosureType>(
- f);
- }
-};
-
-__device__ inline int64_t hip_get_scratch_index(
- Experimental::HIP::size_type league_size, int32_t* scratch_locks) {
- int64_t threadid = 0;
- __shared__ int64_t base_thread_id;
- if (threadIdx.x == 0 && threadIdx.y == 0) {
- int64_t const wraparound_len =
- Kokkos::min(int64_t(league_size),
- (int64_t(Kokkos::Impl::g_device_hip_lock_arrays.n)) /
- (blockDim.x * blockDim.y));
- threadid = (blockIdx.x * blockDim.z + threadIdx.z) % wraparound_len;
- threadid *= blockDim.x * blockDim.y;
- int done = 0;
- while (!done) {
- done = (0 == atomicCAS(&scratch_locks[threadid], 0, 1));
- if (!done) {
- threadid += blockDim.x * blockDim.y;
- if (int64_t(threadid + blockDim.x * blockDim.y) >=
- wraparound_len * blockDim.x * blockDim.y)
- threadid = 0;
- }
- }
- base_thread_id = threadid;
- }
- __syncthreads();
- threadid = base_thread_id;
- return threadid;
-}
-
-__device__ inline void hip_release_scratch_index(int32_t* scratch_locks,
- int64_t threadid) {
- __syncthreads();
- if (threadIdx.x == 0 && threadIdx.y == 0) {
- scratch_locks[threadid] = 0;
- }
-}
-
-template <typename FunctorType, typename... Properties>
-class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
- Kokkos::Experimental::HIP> {
- public:
- using Policy = TeamPolicyInternal<Kokkos::Experimental::HIP, Properties...>;
- using functor_type = FunctorType;
- using size_type = ::Kokkos::Experimental::HIP::size_type;
-
- private:
- using member_type = typename Policy::member_type;
- using work_tag = typename Policy::work_tag;
- using launch_bounds = typename Policy::launch_bounds;
-
- // Algorithmic constraints: blockDim.y is a power of two AND
- // blockDim.y == blockDim.z == 1 shared memory utilization:
- //
- // [ team reduce space ]
- // [ team shared space ]
-
- FunctorType const m_functor;
- Policy const m_policy;
- size_type const m_league_size;
- int m_team_size;
- size_type const m_vector_size;
- int m_shmem_begin;
- int m_shmem_size;
- void* m_scratch_ptr[2];
- size_t m_scratch_size[2];
- int32_t* m_scratch_locks;
- // Only let one ParallelFor/Reduce modify the team scratch memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::lock_guard<std::mutex> m_scratch_lock_guard;
-
- template <typename TagType>
- __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
- const member_type& member) const {
- m_functor(member);
- }
-
- template <typename TagType>
- __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
- const member_type& member) const {
- m_functor(TagType(), member);
- }
-
- public:
- __device__ inline void operator()() const {
- // Iterate this block through the league
- int64_t threadid = 0;
- if (m_scratch_size[1] > 0) {
- threadid = hip_get_scratch_index(m_league_size, m_scratch_locks);
- }
-
- int const int_league_size = static_cast<int>(m_league_size);
- for (int league_rank = blockIdx.x; league_rank < int_league_size;
- league_rank += gridDim.x) {
- this->template exec_team<work_tag>(typename Policy::member_type(
- ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<void>(),
- m_shmem_begin, m_shmem_size,
- static_cast<void*>(static_cast<char*>(m_scratch_ptr[1]) +
- ptrdiff_t(threadid / (blockDim.x * blockDim.y)) *
- m_scratch_size[1]),
- m_scratch_size[1], league_rank, m_league_size));
- }
- if (m_scratch_size[1] > 0) {
- hip_release_scratch_index(m_scratch_locks, threadid);
- }
- }
-
- inline void execute() const {
- int64_t const shmem_size_total = m_shmem_begin + m_shmem_size;
- dim3 const grid(static_cast<int>(m_league_size), 1, 1);
- dim3 const block(static_cast<int>(m_vector_size),
- static_cast<int>(m_team_size), 1);
-
- using closure_type =
- ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
- Kokkos::Experimental::HIP>;
- ::Kokkos::Experimental::Impl::hip_parallel_launch<closure_type,
- launch_bounds>(
- *this, grid, block, shmem_size_total,
- m_policy.space().impl_internal_space_instance(),
- true); // copy to device and execute
- }
-
- ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_league_size(arg_policy.league_size()),
- m_team_size(arg_policy.team_size()),
- m_vector_size(arg_policy.impl_vector_length()),
- m_scratch_lock_guard(m_policy.space()
- .impl_internal_space_instance()
- ->m_team_scratch_mutex) {
- m_team_size = m_team_size >= 0 ? m_team_size
- : arg_policy.team_size_recommended(
- arg_functor, ParallelForTag());
-
- m_shmem_begin = (sizeof(double) * (m_team_size + 2));
- m_shmem_size =
- (m_policy.scratch_size(0, m_team_size) +
- FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
- m_scratch_size[0] = m_policy.scratch_size(0, m_team_size);
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
- m_scratch_locks =
- m_policy.space().impl_internal_space_instance()->m_scratch_locks;
-
- // Functor's reduce memory, team scan memory, and team shared memory depend
- // upon team size.
- m_scratch_ptr[0] = nullptr;
- m_scratch_ptr[1] =
- m_team_size <= 0
- ? nullptr
- : m_policy.space()
- .impl_internal_space_instance()
- ->resize_team_scratch_space(
- static_cast<std::int64_t>(m_scratch_size[1]) *
- (std::min(static_cast<std::int64_t>(
- Kokkos::Experimental::HIP::concurrency() /
- (m_team_size * m_vector_size)),
- static_cast<std::int64_t>(m_league_size))));
-
- int const shmem_size_total = m_shmem_begin + m_shmem_size;
- if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size_total) {
- Kokkos::Impl::throw_runtime_exception(std::string(
- "Kokkos::Impl::ParallelFor< HIP > insufficient shared memory"));
- }
-
- size_t max_size = arg_policy.team_size_max(arg_functor, ParallelForTag());
- if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
- Kokkos::Impl::throw_runtime_exception(std::string(
- "Kokkos::Impl::ParallelFor< HIP > requested too large team size."));
- }
- }
-};
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Experimental::HIP> {
- public:
- using Policy = TeamPolicyInternal<Kokkos::Experimental::HIP, Properties...>;
-
- private:
- using member_type = typename Policy::member_type;
- using work_tag = typename Policy::work_tag;
- using launch_bounds = typename Policy::launch_bounds;
-
- using reducer_conditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using reducer_type_fwd = typename reducer_conditional::type;
- using work_tag_fwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- work_tag, void>::type;
-
- using analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- Policy, reducer_type_fwd>;
-
- using pointer_type = typename analysis::pointer_type;
- using reference_type = typename analysis::reference_type;
- using value_type = typename analysis::value_type;
-
- public:
- using functor_type = FunctorType;
- using size_type = Kokkos::Experimental::HIP::size_type;
-
- static int constexpr UseShflReduction = (analysis::StaticValueSize != 0);
-
- private:
- struct ShflReductionTag {};
- struct SHMEMReductionTag {};
-
- // Algorithmic constraints: blockDim.y is a power of two AND
- // blockDim.y == blockDim.z == 1 shared memory utilization:
- //
- // [ global reduce space ]
- // [ team reduce space ]
- // [ team shared space ]
- //
-
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const bool m_result_ptr_device_accessible;
- const bool m_result_ptr_host_accessible;
- size_type* m_scratch_space;
- size_type* m_scratch_flags;
- size_type m_team_begin;
- size_type m_shmem_begin;
- size_type m_shmem_size;
- void* m_scratch_ptr[2];
- size_t m_scratch_size[2];
- int32_t* m_scratch_locks;
- const size_type m_league_size;
- int m_team_size;
- const size_type m_vector_size;
- // Only let one ParallelFor/Reduce modify the team scratch memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::lock_guard<std::mutex> m_scratch_lock_guard;
-
- template <class TagType>
- __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
- member_type const& member, reference_type update) const {
- m_functor(member, update);
- }
-
- template <class TagType>
- __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
- member_type const& member, reference_type update) const {
- m_functor(TagType(), member, update);
- }
-
- __device__ inline void iterate_through_league(int const threadid,
- reference_type value) const {
- int const int_league_size = static_cast<int>(m_league_size);
- for (int league_rank = blockIdx.x; league_rank < int_league_size;
- league_rank += gridDim.x) {
- this->template exec_team<work_tag>(
- member_type(
- Kokkos::Experimental::kokkos_impl_hip_shared_memory<char>() +
- m_team_begin,
- m_shmem_begin, m_shmem_size,
- reinterpret_cast<void*>(
- reinterpret_cast<char*>(m_scratch_ptr[1]) +
- static_cast<ptrdiff_t>(threadid / (blockDim.x * blockDim.y)) *
- m_scratch_size[1]),
- m_scratch_size[1], league_rank, m_league_size),
- value);
- }
- }
-
- public:
- __device__ inline void operator()() const {
- int64_t threadid = 0;
- if (m_scratch_size[1] > 0) {
- threadid = hip_get_scratch_index(m_league_size, m_scratch_locks);
- }
-
- using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
- SHMEMReductionTag>;
- run(ReductionTag{}, threadid);
-
- if (m_scratch_size[1] > 0) {
- hip_release_scratch_index(m_scratch_locks, threadid);
- }
- }
-
- __device__ inline void run(SHMEMReductionTag, int const threadid) const {
- typename analysis::Reducer final_reducer(
- &reducer_conditional::select(m_functor, m_reducer));
-
- integral_nonzero_constant<size_type, analysis::StaticValueSize /
- sizeof(size_type)> const
- word_count(analysis::value_size(
- reducer_conditional::select(m_functor, m_reducer)) /
- sizeof(size_type));
-
- reference_type value = final_reducer.init(
- Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
- threadIdx.y * word_count.value);
-
- // Iterate this block through the league
- iterate_through_league(threadid, value);
-
- // Reduce with final value at blockDim.y - 1 location.
- bool do_final_reduce = (m_league_size == 0);
- if (!do_final_reduce)
- do_final_reduce =
- hip_single_inter_block_reduce_scan<false, FunctorType, work_tag>(
- reducer_conditional::select(m_functor, m_reducer), blockIdx.x,
- gridDim.x,
- Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>(),
- m_scratch_space, m_scratch_flags);
- if (do_final_reduce) {
- // This is the final block with the final result at the final threads'
- // location
-
- size_type* const shared =
- Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
- (blockDim.y - 1) * word_count.value;
- size_type* const global = m_result_ptr_device_accessible
- ? reinterpret_cast<size_type*>(m_result_ptr)
- : m_scratch_space;
-
- if (threadIdx.y == 0) {
- final_reducer.final(reinterpret_cast<value_type*>(shared));
- }
-
- if (Kokkos::Experimental::Impl::HIPTraits::WarpSize < word_count.value) {
- __syncthreads();
- }
-
- for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
- global[i] = shared[i];
- }
- }
- }
-
- __device__ inline void run(ShflReductionTag, int const threadid) const {
- typename analysis::Reducer final_reducer(
- &reducer_conditional::select(m_functor, m_reducer));
-
- value_type value;
- final_reducer.init(&value);
-
- // Iterate this block through the league
- iterate_through_league(threadid, value);
-
- pointer_type const result =
- m_result_ptr_device_accessible
- ? m_result_ptr
- : reinterpret_cast<pointer_type>(m_scratch_space);
-
- value_type init;
- final_reducer.init(&init);
- if (m_league_size == 0) {
- final_reducer.final(&value);
- *result = value;
- } else if (Impl::hip_inter_block_shuffle_reduction(
- value, init, final_reducer, m_scratch_space, result,
- m_scratch_flags, blockDim.y)) {
- unsigned int const id = threadIdx.y * blockDim.x + threadIdx.x;
- if (id == 0) {
- final_reducer.final(&value);
- *result = value;
- }
- }
- }
-
- inline void execute() {
- typename analysis::Reducer final_reducer(
- &reducer_conditional::select(m_functor, m_reducer));
-
- const bool is_empty_range = m_league_size == 0 || m_team_size == 0;
- const bool need_device_set = analysis::has_init_member_function ||
- analysis::has_final_member_function ||
- !m_result_ptr_host_accessible ||
- !std::is_same<ReducerType, InvalidType>::value;
- if (!is_empty_range || need_device_set) {
- const int block_count =
- UseShflReduction
- ? std::min(
- m_league_size,
- size_type(1024 *
- Kokkos::Experimental::Impl::HIPTraits::WarpSize))
- : std::min(static_cast<int>(m_league_size), m_team_size);
-
- m_scratch_space = Kokkos::Experimental::Impl::hip_internal_scratch_space(
- m_policy.space(), analysis::value_size(reducer_conditional::select(
- m_functor, m_reducer)) *
- block_count);
- m_scratch_flags = Kokkos::Experimental::Impl::hip_internal_scratch_flags(
- m_policy.space(), sizeof(size_type));
-
- dim3 block(m_vector_size, m_team_size, 1);
- dim3 grid(block_count, 1, 1);
- if (is_empty_range) {
- block = dim3(1, 1, 1);
- grid = dim3(1, 1, 1);
- }
- const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
-
- using closure_type =
- ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Experimental::HIP>;
- Kokkos::Experimental::Impl::hip_parallel_launch<closure_type,
- launch_bounds>(
- *this, grid, block, shmem_size_total,
- m_policy.space().impl_internal_space_instance(),
- true); // copy to device and execute
-
- if (!m_result_ptr_device_accessible) {
- m_policy.space().impl_internal_space_instance()->fence();
-
- if (m_result_ptr) {
- const int size = analysis::value_size(
- reducer_conditional::select(m_functor, m_reducer));
- DeepCopy<HostSpace, Kokkos::Experimental::HIPSpace>(
- m_result_ptr, m_scratch_space, size);
- }
- }
- } else {
- if (m_result_ptr) {
- final_reducer.init(m_result_ptr);
- }
- }
- }
-
- template <class ViewType>
- ParallelReduce(
- FunctorType const& arg_functor, Policy const& arg_policy,
- ViewType const& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- typename ViewType::memory_space>::accessible),
- m_result_ptr_host_accessible(
- MemorySpaceAccess<Kokkos::HostSpace,
- typename ViewType::memory_space>::accessible),
- m_scratch_space(nullptr),
- m_scratch_flags(nullptr),
- m_team_begin(0),
- m_shmem_begin(0),
- m_shmem_size(0),
- m_scratch_ptr{nullptr, nullptr},
- m_league_size(arg_policy.league_size()),
- m_team_size(arg_policy.team_size()),
- m_vector_size(arg_policy.impl_vector_length()),
- m_scratch_lock_guard(m_policy.space()
- .impl_internal_space_instance()
- ->m_team_scratch_mutex) {
- m_team_size = m_team_size >= 0 ? m_team_size
- : arg_policy.team_size_recommended(
- arg_functor, ParallelReduceTag());
-
- m_team_begin =
- UseShflReduction
- ? 0
- : hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
- work_tag>(arg_functor,
- m_team_size);
- m_shmem_begin = sizeof(double) * (m_team_size + 2);
- m_shmem_size =
- m_policy.scratch_size(0, m_team_size) +
- FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
- m_scratch_size[0] = m_shmem_size;
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
- m_scratch_locks =
- m_policy.space().impl_internal_space_instance()->m_scratch_locks;
- m_scratch_ptr[1] =
- m_team_size <= 0
- ? nullptr
- : m_policy.space()
- .impl_internal_space_instance()
- ->resize_team_scratch_space(
- static_cast<std::int64_t>(m_scratch_size[1]) *
- (std::min(static_cast<std::int64_t>(
- Kokkos::Experimental::HIP::concurrency() /
- (m_team_size * m_vector_size)),
- static_cast<std::int64_t>(m_league_size))));
-
- // The global parallel_reduce does not support vector_length other than 1 at
- // the moment
- if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
- Impl::throw_runtime_exception(
- "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
- "greater than 1 is not currently supported for HIP for dynamic "
- "sized reduction types.");
-
- if ((m_team_size < Kokkos::Experimental::Impl::HIPTraits::WarpSize) &&
- !UseShflReduction)
- Impl::throw_runtime_exception(
- "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
- "than 64 is not currently supported with HIP for dynamic sized "
- "reduction types.");
-
- // Functor's reduce memory, team scan memory, and team shared memory depend
- // upon team size.
-
- const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
-
- if (!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
- !UseShflReduction) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > bad team size"));
- }
-
- if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size_total) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > requested too much "
- "L0 scratch memory"));
- }
-
- size_t max_size =
- arg_policy.team_size_max(arg_functor, ParallelReduceTag());
- if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > requested too "
- "large team size."));
- }
- }
-
- ParallelReduce(FunctorType const& arg_functor, Policy const& arg_policy,
- ReducerType const& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_result_ptr_host_accessible(
- MemorySpaceAccess<Kokkos::HostSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_scratch_space(nullptr),
- m_scratch_flags(nullptr),
- m_team_begin(0),
- m_shmem_begin(0),
- m_shmem_size(0),
- m_scratch_ptr{nullptr, nullptr},
- m_league_size(arg_policy.league_size()),
- m_team_size(arg_policy.team_size()),
- m_vector_size(arg_policy.impl_vector_length()),
- m_scratch_lock_guard(m_policy.space()
- .impl_internal_space_instance()
- ->m_team_scratch_mutex) {
- m_team_size = m_team_size >= 0
- ? m_team_size
- : arg_policy.team_size_recommended(arg_functor, reducer,
- ParallelReduceTag());
- m_team_begin =
- UseShflReduction
- ? 0
- : hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
- work_tag>(arg_functor,
- m_team_size);
- m_shmem_begin = sizeof(double) * (m_team_size + 2);
- m_shmem_size =
- m_policy.scratch_size(0, m_team_size) +
- FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
- m_scratch_size[0] = m_shmem_size;
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
- m_scratch_locks =
- m_policy.space().impl_internal_space_instance()->m_scratch_locks;
- m_scratch_ptr[1] =
- m_team_size <= 0
- ? nullptr
- : m_policy.space()
- .impl_internal_space_instance()
- ->resize_team_scratch_space(
- static_cast<std::int64_t>(m_scratch_size[1]) *
- (std::min(static_cast<std::int64_t>(
- Kokkos::Experimental::HIP::concurrency() /
- (m_team_size * m_vector_size)),
- static_cast<std::int64_t>(m_league_size))));
-
- // The global parallel_reduce does not support vector_length other than 1 at
- // the moment
- if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
- Impl::throw_runtime_exception(
- "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
- "greater than 1 is not currently supported for HIP for dynamic "
- "sized reduction types.");
-
- if ((m_team_size < Kokkos::Experimental::Impl::HIPTraits::WarpSize) &&
- !UseShflReduction)
- Impl::throw_runtime_exception(
- "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
- "than 64 is not currently supported with HIP for dynamic sized "
- "reduction types.");
-
- // Functor's reduce memory, team scan memory, and team shared memory depend
- // upon team size.
-
- const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
- if ((!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
- !UseShflReduction) ||
- m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size_total) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > bad team size"));
- }
-
- size_t max_size =
- arg_policy.team_size_max(arg_functor, reducer, ParallelReduceTag());
- if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< HIP > requested too "
- "large team size."));
- }
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <Kokkos_Core.hpp>
-#include <Kokkos_HIP.hpp>
-#include <Kokkos_HIP_Space.hpp>
-
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-#include <impl/Kokkos_DeviceManagement.hpp>
-#include <impl/Kokkos_ExecSpaceManager.hpp>
-
-#include <stdlib.h>
-#include <iostream>
-#include <sstream>
-#include <algorithm>
-#include <atomic>
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-namespace {
-
-static std::atomic<bool> is_first_hip_managed_allocation(true);
-
-bool hip_driver_check_page_migration(int deviceId) {
- // check with driver if page migrating memory is available
- // this driver query is copied from the hip documentation
- int hasManagedMemory = 0; // false by default
- KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceGetAttribute(
- &hasManagedMemory, hipDeviceAttributeManagedMemory, deviceId));
- return static_cast<bool>(hasManagedMemory);
-}
-} // namespace
-namespace Kokkos {
-namespace Impl {
-
-namespace {
-hipStream_t get_deep_copy_stream() {
- static hipStream_t s = nullptr;
- if (s == nullptr) {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamCreate(&s));
- }
- return s;
-}
-} // namespace
-
-void DeepCopyHIP(void* dst, void const* src, size_t n) {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyAsync(dst, src, n, hipMemcpyDefault));
-}
-
-void DeepCopyAsyncHIP(const Kokkos::Experimental::HIP& instance, void* dst,
- void const* src, size_t n) {
- KOKKOS_IMPL_HIP_SAFE_CALL(
- hipMemcpyAsync(dst, src, n, hipMemcpyDefault, instance.hip_stream()));
-}
-
-void DeepCopyAsyncHIP(void* dst, void const* src, size_t n) {
- hipStream_t s = get_deep_copy_stream();
- KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyAsync(dst, src, n, hipMemcpyDefault, s));
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::HIP>(
- "Kokkos::Impl::DeepCopyAsyncHIP: Post Deep Copy Fence on Deep-Copy "
- "stream",
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- DeepCopyResourceSynchronization,
- [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(s)); });
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-namespace Kokkos {
-
-KOKKOS_DEPRECATED void Experimental::HIPSpace::access_error() {
- const std::string msg(
- "Kokkos::Experimental::HIPSpace::access_error attempt to execute "
- "Experimental::HIP function from non-HIP space");
- Kokkos::Impl::throw_runtime_exception(msg);
-}
-
-KOKKOS_DEPRECATED void Experimental::HIPSpace::access_error(const void* const) {
- const std::string msg(
- "Kokkos::Experimental::HIPSpace::access_error attempt to execute "
- "Experimental::HIP function from non-HIP space");
- Kokkos::Impl::throw_runtime_exception(msg);
-}
-
-} // namespace Kokkos
-#endif
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Experimental {
-
-HIPSpace::HIPSpace() : m_device(HIP().hip_device()) {}
-
-HIPHostPinnedSpace::HIPHostPinnedSpace() {}
-
-HIPManagedSpace::HIPManagedSpace() : m_device(HIP().hip_device()) {}
-
-void* HIPSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void* HIPSpace::allocate(
-
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-void* HIPSpace::impl_allocate(
-
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- void* ptr = nullptr;
-
- auto const error_code = hipMalloc(&ptr, arg_alloc_size);
- if (error_code != hipSuccess) {
- // This is the only way to clear the last error, which we should do here
- // since we're turning it into an exception here
- (void)hipGetLastError();
- throw HIPRawMemoryAllocationFailure(
- arg_alloc_size, error_code,
- RawMemoryAllocationFailure::AllocationMechanism::HIPMalloc);
- }
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
-
- return ptr;
-}
-
-void* HIPHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void* HIPHostPinnedSpace::allocate(const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-void* HIPHostPinnedSpace::impl_allocate(
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- void* ptr = nullptr;
-
- auto const error_code =
- hipHostMalloc(&ptr, arg_alloc_size, hipHostMallocNonCoherent);
- if (error_code != hipSuccess) {
- // This is the only way to clear the last error, which we should do here
- // since we're turning it into an exception here
- (void)hipGetLastError();
- throw HIPRawMemoryAllocationFailure(
- arg_alloc_size, error_code,
- RawMemoryAllocationFailure::AllocationMechanism::HIPHostMalloc);
- }
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
-
- return ptr;
-}
-
-void* HIPManagedSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void* HIPManagedSpace::allocate(const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-void* HIPManagedSpace::impl_allocate(
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- void* ptr = nullptr;
-
- if (arg_alloc_size > 0) {
- if (is_first_hip_managed_allocation.exchange(false) &&
- Kokkos::show_warnings()) {
- if (!hip_driver_check_page_migration(m_device)) {
- std::cerr << R"warning(
-Kokkos::HIP::allocation WARNING: The combination of device and system configuration
- does not support page migration between device and host.
- HIPManagedSpace might not work as expected.
- Please refer to the ROCm documentation on unified/managed memory.)warning"
- << std::endl;
- }
-
- // check for correct runtime environment
- const char* hsa_xnack = std::getenv("HSA_XNACK");
- if (!hsa_xnack)
- std::cerr << R"warning(
-Kokkos::HIP::runtime WARNING: Kokkos did not find an environment variable 'HSA_XNACK'
- for the current process.
- Nevertheless, xnack is enabled for all processes if
- amdgpu.noretry=0 was set in the Linux kernel boot line.
- Without xnack enabled, Kokkos::HIPManaged might not behave
- as expected.)warning"
- << std::endl;
- else if (Kokkos::Impl::strcmp(hsa_xnack, "1") != 0)
- std::cerr << "Kokkos::HIP::runtime WARNING: Kokkos detected the "
- "environement variable "
- << "'HSA_XNACK=" << hsa_xnack << "\n"
- << "Kokkos advises to set it to '1' to enable it per process."
- << std::endl;
- }
- auto const error_code = hipMallocManaged(&ptr, arg_alloc_size);
- if (error_code != hipSuccess) {
- // This is the only way to clear the last error, which we should do here
- // since we're turning it into an exception here
- (void)hipGetLastError();
- throw HIPRawMemoryAllocationFailure(
- arg_alloc_size, error_code,
- RawMemoryAllocationFailure::AllocationMechanism::HIPMallocManaged);
- }
- KOKKOS_IMPL_HIP_SAFE_CALL(hipMemAdvise(
- ptr, arg_alloc_size, hipMemAdviseSetCoarseGrain, m_device));
- }
-
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
-
- return ptr;
-}
-
-void HIPSpace::deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-void HIPSpace::deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-void HIPSpace::impl_deallocate(
- const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(arg_alloc_ptr));
-}
-
-void HIPHostPinnedSpace::deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void HIPHostPinnedSpace::deallocate(const char* arg_label,
- void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-void HIPHostPinnedSpace::impl_deallocate(
- const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(arg_alloc_ptr));
-}
-
-void HIPManagedSpace::deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void HIPManagedSpace::deallocate(const char* arg_label,
- void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-void HIPManagedSpace::impl_deallocate(
- const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- // We have to unset the CoarseGrain property manually as hipFree does not take
- // care of it. Otherwise, the allocation would continue to linger in the
- // kernel mem page table.
- KOKKOS_IMPL_HIP_SAFE_CALL(hipMemAdvise(
- arg_alloc_ptr, arg_alloc_size, hipMemAdviseUnsetCoarseGrain, m_device));
- KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(arg_alloc_ptr));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-#ifdef KOKKOS_ENABLE_DEBUG
-SharedAllocationRecord<void, void>
- SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>::s_root_record;
-
-SharedAllocationRecord<void, void> SharedAllocationRecord<
- Kokkos::Experimental::HIPHostPinnedSpace, void>::s_root_record;
-
-SharedAllocationRecord<void, void> SharedAllocationRecord<
- Kokkos::Experimental::HIPManagedSpace, void>::s_root_record;
-#endif
-
-SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
- void>::~SharedAllocationRecord() {
- auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- alloc_size, (alloc_size - sizeof(SharedAllocationHeader)));
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace,
- void>::~SharedAllocationRecord() {
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- SharedAllocationRecord<void, void>::m_alloc_size);
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace,
- void>::~SharedAllocationRecord() {
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- SharedAllocationRecord<void, void>::m_alloc_size);
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::HIPSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
- void>::s_root_record,
-#endif
- Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
-
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, arg_label);
-
- // Copy to device memory
- Kokkos::Experimental::HIP exec;
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::HIPSpace, HostSpace>(
- exec, RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
- exec.fence(
- "SharedAllocationRecord<Kokkos::Experimental::HIPSpace, "
- "void>::SharedAllocationRecord(): fence after copying header from "
- "HostSpace");
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::HIP& arg_exec_space,
- const Kokkos::Experimental::HIPSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
- void>::s_root_record,
-#endif
- Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
-
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, arg_label);
-
- // Copy to device memory
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::HIPSpace, HostSpace>(
- arg_exec_space, RecordBase::m_alloc_ptr, &header,
- sizeof(SharedAllocationHeader));
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::HIPHostPinnedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace,
- void>::s_root_record,
-#endif
- Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- // Fill in the Header information, directly accessible via host pinned memory
- this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
- arg_label);
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::HIPManagedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace,
- void>::s_root_record,
-#endif
- Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- // Fill in the Header information, directly accessible via managed memory
- this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
- arg_label);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-namespace Kokkos {
-namespace Experimental {
-
-int HIP::concurrency() {
- auto const& prop = hip_device_prop();
- return prop.maxThreadsPerMultiProcessor * prop.multiProcessorCount;
-}
-int HIP::impl_is_initialized() {
- return Impl::HIPInternal::singleton().is_initialized();
-}
-
-void HIP::impl_initialize(InitializationSettings const& settings) {
- Impl::HIPInternal::singleton().initialize(::Kokkos::Impl::get_gpu(settings));
-}
-
-void HIP::impl_finalize() { Impl::HIPInternal::singleton().finalize(); }
-
-HIP::HIP()
- : m_space_instance(&Impl::HIPInternal::singleton(),
- [](Impl::HIPInternal*) {}) {
- Impl::HIPInternal::singleton().verify_is_initialized(
- "HIP instance constructor");
-}
-
-HIP::HIP(hipStream_t const stream, bool manage_stream)
- : m_space_instance(new Impl::HIPInternal, [](Impl::HIPInternal* ptr) {
- ptr->finalize();
- delete ptr;
- }) {
- Impl::HIPInternal::singleton().verify_is_initialized(
- "HIP instance constructor");
- m_space_instance->initialize(Impl::HIPInternal::singleton().m_hipDev, stream,
- manage_stream);
-}
-
-void HIP::print_configuration(std::ostream& os, bool /*verbose*/) const {
- os << "Device Execution Space:\n";
- os << " KOKKOS_ENABLE_HIP: yes\n";
-
- os << "HIP Options:\n";
- os << " KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE: ";
-#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
- os << "yes\n";
-#else
- os << "no\n";
-#endif
-
- os << "\nRuntime Configuration:\n";
-
- m_space_instance->print_configuration(os);
-}
-
-uint32_t HIP::impl_instance_id() const noexcept {
- return m_space_instance->impl_get_instance_id();
-}
-void HIP::impl_static_fence(const std::string& name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::HIP>(
- name,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- GlobalDeviceSynchronization,
- [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceSynchronize()); });
-}
-
-void HIP::fence(const std::string& name) const {
- m_space_instance->fence(name);
-}
-
-hipStream_t HIP::hip_stream() const { return m_space_instance->m_stream; }
-
-int HIP::hip_device() const { return impl_internal_space_instance()->m_hipDev; }
-
-hipDeviceProp_t const& HIP::hip_device_prop() {
- return Impl::HIPInternal::singleton().m_deviceProp;
-}
-
-const char* HIP::name() { return "HIP"; }
-
-} // namespace Experimental
-
-namespace Impl {
-
-int g_hip_space_factory_initialized =
- initialize_space_factory<::Kokkos::Experimental::HIP>("150_HIP");
-
-} // namespace Impl
-
-#ifdef KOKKOS_ENABLE_CXX14
-namespace Tools {
-namespace Experimental {
-constexpr DeviceType DeviceTypeTraits<Kokkos::Experimental::HIP>::id;
-}
-} // namespace Tools
-#endif
-
-} // namespace Kokkos
-
-//==============================================================================
-// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
-
-#include <impl/Kokkos_SharedAlloc_timpl.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-// To avoid additional compilation cost for something that's (mostly?) not
-// performance sensitive, we explicity instantiate these CRTP base classes here,
-// where we have access to the associated *_timpl.hpp header files.
-template class HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::HIPSpace>;
-template class SharedAllocationRecordCommon<Kokkos::Experimental::HIPSpace>;
-template class SharedAllocationRecordCommon<
- Kokkos::Experimental::HIPHostPinnedSpace>;
-template class SharedAllocationRecordCommon<
- Kokkos::Experimental::HIPManagedSpace>;
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
-//==============================================================================
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_WORKGRAPHPOLICY_HPP
-#define KOKKOS_HIP_WORKGRAPHPOLICY_HPP
-
-#include <Kokkos_HIP.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
- Kokkos::Experimental::HIP> {
- public:
- using Policy = Kokkos::WorkGraphPolicy<Traits...>;
- using Self = ParallelFor<FunctorType, Policy, Kokkos::Experimental::HIP>;
-
- private:
- Policy m_policy;
- FunctorType m_functor;
-
- template <class TagType>
- __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- m_functor(w);
- }
-
- template <class TagType>
- __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- const TagType t{};
- m_functor(t, w);
- }
-
- public:
- __device__ inline void operator()() const noexcept {
- // Spin until COMPLETED_TOKEN.
- // END_TOKEN indicates no work is currently available.
- for (std::int32_t w = Policy::END_TOKEN;
- Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
- if (Policy::END_TOKEN != w) {
- exec_one<typename Policy::work_tag>(w);
- m_policy.completed_work(w);
- }
- }
- }
-
- inline void execute() {
- const int warps_per_block = 4;
- const dim3 grid(
- Kokkos::Experimental::Impl::hip_internal_multiprocessor_count(), 1, 1);
- const dim3 block(1, Kokkos::Experimental::Impl::HIPTraits::WarpSize,
- warps_per_block);
- const int shared = 0;
-
- Kokkos::Experimental::Impl::HIPParallelLaunch<Self>(
- *this, grid, block, shared,
- Experimental::HIP().impl_internal_space_instance(), false);
- }
-
- inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_policy(arg_policy), m_functor(arg_functor) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* #define KOKKOS_HIP_WORKGRAPHPOLICY_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Core.hpp>
-
-#ifdef KOKKOS_ENABLE_HPX
-#include <Kokkos_HPX.hpp>
-
-#include <impl/Kokkos_ExecSpaceManager.hpp>
-
-#include <hpx/local/condition_variable.hpp>
-#include <hpx/local/init.hpp>
-#include <hpx/local/thread.hpp>
-#include <hpx/local/mutex.hpp>
-
-#include <atomic>
-#include <chrono>
-#include <iostream>
-#include <memory>
-#include <string>
-#include <type_traits>
-
-namespace Kokkos {
-namespace Experimental {
-
-bool HPX::m_hpx_initialized = false;
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
-std::atomic<uint32_t> HPX::m_next_instance_id{HPX::impl_default_instance_id() +
- 1};
-uint32_t HPX::m_active_parallel_region_count{0};
-hpx::spinlock HPX::m_active_parallel_region_count_mutex;
-hpx::condition_variable_any HPX::m_active_parallel_region_count_cond;
-HPX::instance_data HPX::m_default_instance_data;
-#else
-Kokkos::Impl::thread_buffer HPX::m_default_buffer;
-#endif
-
-int HPX::concurrency() {
- hpx::runtime *rt = hpx::get_runtime_ptr();
- if (rt == nullptr) {
- return hpx::threads::hardware_concurrency();
- } else {
- if (hpx::threads::get_self_ptr() == nullptr) {
- return hpx::resource::get_thread_pool(0).get_os_thread_count();
- } else {
- return hpx::this_thread::get_pool()->get_os_thread_count();
- }
- }
-}
-
-void HPX::impl_initialize(InitializationSettings const &settings) {
- hpx::runtime *rt = hpx::get_runtime_ptr();
- if (rt == nullptr) {
- hpx::local::init_params i;
- i.cfg = {
-#ifdef KOKKOS_ENABLE_DEBUG
- "--hpx:attach-debugger=exception",
-#endif
- };
- if (settings.has_num_threads()) {
- i.cfg.emplace_back("hpx.os_threads=" +
- std::to_string(settings.get_num_threads()));
- }
- int argc_hpx = 1;
- char name[] = "kokkos_hpx";
- char *argv_hpx[] = {name, nullptr};
- hpx::local::start(nullptr, argc_hpx, argv_hpx, i);
-
- m_hpx_initialized = true;
- }
-}
-
-bool HPX::impl_is_initialized() noexcept {
- hpx::runtime *rt = hpx::get_runtime_ptr();
- return rt != nullptr;
-}
-
-void HPX::impl_finalize() {
- if (m_hpx_initialized) {
- hpx::runtime *rt = hpx::get_runtime_ptr();
- if (rt != nullptr) {
- hpx::apply([]() { hpx::local::finalize(); });
- hpx::local::stop();
- } else {
- Kokkos::abort(
- "Kokkos::Experimental::HPX::impl_finalize: Kokkos started "
- "HPX but something else already stopped HPX\n");
- }
- }
-}
-
-} // namespace Experimental
-
-namespace Impl {
-
-int g_hpx_space_factory_initialized =
- initialize_space_factory<Kokkos::Experimental::HPX>("060_HPX");
-
-} // namespace Impl
-
-#ifdef KOKKOS_ENABLE_CXX14
-namespace Tools {
-namespace Experimental {
-constexpr DeviceType DeviceTypeTraits<Kokkos::Experimental::HPX>::id;
-}
-} // namespace Tools
-#endif
-
-} // namespace Kokkos
-
-#else
-void KOKKOS_CORE_SRC_IMPL_HPX_PREVENT_LINK_ERROR() {}
-#endif //#ifdef KOKKOS_ENABLE_HPX
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_TASKDAG)
-
-#include <Kokkos_Core.hpp>
-
-#include <impl/Kokkos_TaskQueue_impl.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template class TaskQueue<Kokkos::Experimental::HPX,
- Kokkos::Experimental::HPX::memory_space>;
-
-} // namespace Impl
-} // namespace Kokkos
-
-#else
-void KOKKOS_CORE_SRC_IMPL_HPX_TASK_PREVENT_LINK_ERROR() {}
-#endif // #if defined( KOKKOS_ENABLE_HPX ) && defined( KOKKOS_ENABLE_TASKDAG )
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HPX_TASK_HPP
-#define KOKKOS_HPX_TASK_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_TASKDAG)
-
-#include <Kokkos_TaskScheduler_fwd.hpp>
-
-#include <Kokkos_HPX.hpp>
-
-#include <hpx/local/execution.hpp>
-#include <hpx/local/future.hpp>
-
-#include <type_traits>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class QueueType>
-class TaskQueueSpecialization<
- SimpleTaskScheduler<Kokkos::Experimental::HPX, QueueType>> {
- public:
- using execution_space = Kokkos::Experimental::HPX;
- using scheduler_type =
- SimpleTaskScheduler<Kokkos::Experimental::HPX, QueueType>;
- using member_type =
- TaskTeamMemberAdapter<Kokkos::Impl::HPXTeamMember, scheduler_type>;
- using memory_space = Kokkos::HostSpace;
-
- static void execute(scheduler_type const &scheduler) {
- // NOTE: We create an instance so that we can use dispatch_execute_task.
- // This is not necessarily the most efficient, but can be improved later.
- TaskQueueSpecialization<scheduler_type> task_queue;
- task_queue.scheduler = &scheduler;
- Kokkos::Impl::dispatch_execute_task(&task_queue,
- Kokkos::Experimental::HPX());
- Kokkos::Experimental::HPX().fence(
- "Kokkos::Impl::TaskQueueSpecialization<SimpleTask>::execute: fence "
- "after task execution");
- }
-
- // Must provide task queue execution function
- void execute_task() const {
- // See [note 1] in Kokkos_HPX.hpp for an explanation. The work graph policy
- // does not store an execution space instance, so we only need to reset the
- // parallel region count here.
- Kokkos::Experimental::HPX::reset_count_on_exit_parallel reset_count_on_exit;
-
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
- using task_base_type = typename scheduler_type::task_base_type;
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
-
- thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
- buffer.resize(num_worker_threads, 512);
-
- auto &queue = scheduler->queue();
-
- for_loop(par.with(static_chunk_size(1)), 0, num_worker_threads,
- [this, &queue, &buffer, num_worker_threads](int) {
- // NOTE: This implementation has been simplified based on the
- // assumption that team_size = 1. The HPX backend currently only
- // supports a team size of 1.
- std::size_t t =
- Kokkos::Experimental::HPX::impl_hardware_thread_id();
-
- buffer.get(t);
- HPXTeamMember member(
- TeamPolicyInternal<Kokkos::Experimental::HPX>(
- Kokkos::Experimental::HPX(), num_worker_threads, 1),
- 0, t, buffer.get(t), 512);
-
- member_type single_exec(*scheduler, member);
- member_type &team_exec = single_exec;
-
- auto &team_scheduler = team_exec.scheduler();
- auto current_task = OptionalRef<task_base_type>(nullptr);
-
- while (!queue.is_done()) {
- current_task =
- queue.pop_ready_task(team_scheduler.team_scheduler_info());
-
- if (current_task) {
- KOKKOS_ASSERT(current_task->is_single_runnable() ||
- current_task->is_team_runnable());
- current_task->as_runnable_task().run(single_exec);
- queue.complete((*std::move(current_task)).as_runnable_task(),
- team_scheduler.team_scheduler_info());
- }
- }
- });
- }
-
- static uint32_t get_max_team_count(execution_space const &espace) {
- return static_cast<uint32_t>(espace.concurrency());
- }
-
- template <typename TaskType>
- static void get_function_pointer(typename TaskType::function_type &ptr,
- typename TaskType::destroy_type &dtor) {
- ptr = TaskType::apply;
- dtor = TaskType::destroy;
- }
-
- private:
- const scheduler_type *scheduler;
-};
-
-template <class Scheduler>
-class TaskQueueSpecializationConstrained<
- Scheduler,
- std::enable_if_t<std::is_same<typename Scheduler::execution_space,
- Kokkos::Experimental::HPX>::value>> {
- public:
- using execution_space = Kokkos::Experimental::HPX;
- using scheduler_type = Scheduler;
- using member_type =
- TaskTeamMemberAdapter<Kokkos::Impl::HPXTeamMember, scheduler_type>;
- using memory_space = Kokkos::HostSpace;
-
- static void iff_single_thread_recursive_execute(
- scheduler_type const &scheduler) {
- using task_base_type = typename scheduler_type::task_base;
- using queue_type = typename scheduler_type::queue_type;
-
- if (1 == Kokkos::Experimental::HPX::concurrency()) {
- task_base_type *const end = (task_base_type *)task_base_type::EndTag;
- task_base_type *task = end;
-
- HPXTeamMember member(TeamPolicyInternal<Kokkos::Experimental::HPX>(
- Kokkos::Experimental::HPX(), 1, 1),
- 0, 0, nullptr, 0);
- member_type single_exec(scheduler, member);
-
- do {
- task = end;
-
- // Loop by priority and then type
- for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
- for (int j = 0; j < 2 && end == task; ++j) {
- task =
- queue_type::pop_ready_task(&scheduler.m_queue->m_ready[i][j]);
- }
- }
-
- if (end == task) break;
-
- (*task->m_apply)(task, &single_exec);
-
- scheduler.m_queue->complete(task);
-
- } while (true);
- }
- }
-
- static void execute(scheduler_type const &scheduler) {
- // NOTE: We create an instance so that we can use dispatch_execute_task.
- // This is not necessarily the most efficient, but can be improved later.
- TaskQueueSpecializationConstrained<scheduler_type> task_queue;
- task_queue.scheduler = &scheduler;
- Kokkos::Impl::dispatch_execute_task(&task_queue,
- Kokkos::Experimental::HPX());
- Kokkos::Experimental::HPX().fence(
- "Kokkos::Impl::TaskQueueSpecialization<SimpleTask>::execute: fence "
- "after task execution");
- }
-
- // Must provide task queue execution function
- void execute_task() const {
- // See [note 1] in Kokkos_HPX.hpp for an explanation. The work graph policy
- // does not store an execution space instance, so we only need to reset the
- // parallel region count here.
- Kokkos::Experimental::HPX::reset_count_on_exit_parallel reset_count_on_exit;
-
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
- using task_base_type = typename scheduler_type::task_base;
- using queue_type = typename scheduler_type::queue_type;
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
- static task_base_type *const end = (task_base_type *)task_base_type::EndTag;
- constexpr task_base_type *no_more_tasks_sentinel = nullptr;
-
- thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
- buffer.resize(num_worker_threads, 512);
-
- auto &queue = scheduler->queue();
- queue.initialize_team_queues(num_worker_threads);
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [this, &buffer, num_worker_threads](int t) {
- // NOTE: This implementation has been simplified based on the
- // assumption that team_size = 1. The HPX backend currently only
- // supports a team size of 1.
- buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id());
- HPXTeamMember member(
- TeamPolicyInternal<Kokkos::Experimental::HPX>(
- Kokkos::Experimental::HPX(), num_worker_threads, 1),
- 0, t, buffer.get(t), 512);
-
- member_type single_exec(*scheduler, member);
- member_type &team_exec = single_exec;
-
- auto &team_queue = team_exec.scheduler().queue();
- task_base_type *task = no_more_tasks_sentinel;
-
- do {
- if (task != no_more_tasks_sentinel && task != end) {
- team_queue.complete(task);
- }
-
- if (*((volatile int *)&team_queue.m_ready_count) > 0) {
- task = end;
- for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
- for (int j = 0; j < 2 && end == task; ++j) {
- task = queue_type::pop_ready_task(&team_queue.m_ready[i][j]);
- }
- }
- } else {
- task = team_queue.attempt_to_steal_task();
- }
-
- if (task != no_more_tasks_sentinel && task != end) {
- (*task->m_apply)(task, &single_exec);
- }
- } while (task != no_more_tasks_sentinel);
- });
- }
-
- template <typename TaskType>
- static void get_function_pointer(typename TaskType::function_type &ptr,
- typename TaskType::destroy_type &dtor) {
- ptr = TaskType::apply;
- dtor = TaskType::destroy;
- }
-
- private:
- const scheduler_type *scheduler;
-};
-
-extern template class TaskQueue<
- Kokkos::Experimental::HPX,
- typename Kokkos::Experimental::HPX::memory_space>;
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
-#endif /* #ifndef KOKKOS_HPX_TASK_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HPX_WORKGRAPHPOLICY_HPP
-#define KOKKOS_HPX_WORKGRAPHPOLICY_HPP
-
-#include <Kokkos_HPX.hpp>
-
-#include <hpx/local/algorithm.hpp>
-#include <hpx/local/execution.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
- Kokkos::Experimental::HPX> {
- private:
- using Policy = Kokkos::WorkGraphPolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
-
- Policy m_policy;
- FunctorType m_functor;
-
- template <class TagType>
- std::enable_if_t<std::is_void<TagType>::value> execute_functor(
- const std::int32_t w) const noexcept {
- m_functor(w);
- }
-
- template <class TagType>
- std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
- const std::int32_t w) const noexcept {
- const TagType t{};
- m_functor(t, w);
- }
-
- public:
- void execute() const {
- dispatch_execute_task(this, m_policy.space());
- m_policy.space().fence(
- "Kokkos::Experimental::Impl::HPX::ParallelFor<WorkGraphPolicy>: fence "
- "after kernel execution");
- }
-
- void execute_task() const {
- // See [note 1] in Kokkos_HPX.hpp for an explanation. The work graph policy
- // does not store an execution space instance, so we only need to reset the
- // parallel region count here.
- Kokkos::Experimental::HPX::reset_count_on_exit_parallel reset_count_on_exit;
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
-
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- for_loop(par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [this](int) {
- std::int32_t w = m_policy.pop_work();
- while (w != Policy::COMPLETED_TOKEN) {
- if (w != Policy::END_TOKEN) {
- execute_functor<WorkTag>(w);
- m_policy.completed_work(w);
- }
-
- w = m_policy.pop_work();
- }
- });
- }
-
- inline ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
- : m_policy(arg_policy), m_functor(arg_functor) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* #define KOKKOS_HPX_WORKGRAPHPOLICY_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_DECLARE_HPP_
-#define KOKKOS_DECLARE_HPP_
-
-#include <decl/Kokkos_Declare_SERIAL.hpp>
-
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_FWD_HPP_
-#define KOKKOS_FWD_HPP_
-
-#include <fwd/Kokkos_Fwd_SERIAL.hpp>
-
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_POST_INCLUDE_HPP_
-#define KOKKOS_POST_INCLUDE_HPP_
-
-
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_SETUP_HPP_
-#define KOKKOS_SETUP_HPP_
-
-
-
-#endif
+++ /dev/null
-
-#if !defined(KOKKOS_MACROS_HPP) || defined(KOKKOS_CORE_CONFIG_H)
-#error \
- "Do not include KokkosCore_config.h directly; include Kokkos_Macros.hpp instead."
-#else
-#define KOKKOS_CORE_CONFIG_H
-#endif
-
-// KOKKOS_VERSION % 100 is the patch level
-// KOKKOS_VERSION / 100 % 100 is the minor version
-// KOKKOS_VERSION / 10000 is the major version
-#define KOKKOS_VERSION 30700
-
-/* Execution Spaces */
-#define KOKKOS_ENABLE_SERIAL
-/* #undef KOKKOS_ENABLE_OPENMP */
-/* #undef KOKKOS_ENABLE_OPENACC */
-/* #undef KOKKOS_ENABLE_OPENMPTARGET */
-/* #undef KOKKOS_ENABLE_THREADS */
-/* #undef KOKKOS_ENABLE_CUDA */
-/* #undef KOKKOS_ENABLE_HIP */
-/* #undef KOKKOS_ENABLE_HPX */
-/* #undef KOKKOS_ENABLE_MEMKIND */
-/* #undef KOKKOS_ENABLE_LIBRT */
-/* #undef KOKKOS_ENABLE_SYCL */
-
-/* General Settings */
-#define KOKKOS_ENABLE_CXX14
-/* #undef KOKKOS_ENABLE_CXX17 */
-/* #undef KOKKOS_ENABLE_CXX20 */
-
-/* #undef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE */
-/* #undef KOKKOS_ENABLE_CUDA_UVM */
-/* #undef KOKKOS_ENABLE_CUDA_LAMBDA */
-/* #undef KOKKOS_ENABLE_CUDA_CONSTEXPR */
-/* #undef KOKKOS_ENABLE_CUDA_LDG_INTRINSIC */
-/* #undef KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC */
-/* #undef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE */
-/* #undef KOKKOS_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS */
-/* #undef KOKKOS_ENABLE_HPX_ASYNC_DISPATCH */
-/* #undef KOKKOS_ENABLE_DEBUG */
-/* #undef KOKKOS_ENABLE_DEBUG_DUALVIEW_MODIFY_CHECK */
-/* #undef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK */
-/* #undef KOKKOS_ENABLE_PROFILING_LOAD_PRINT */
-/* #undef KOKKOS_ENABLE_TUNING */
-#define KOKKOS_ENABLE_DEPRECATED_CODE_3
-#define KOKKOS_ENABLE_DEPRECATION_WARNINGS
-/* #undef KOKKOS_ENABLE_LARGE_MEM_TESTS */
-#define KOKKOS_ENABLE_COMPLEX_ALIGN
-#define KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-/* #undef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION */
-/* #undef KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION */
-
-/* TPL Settings */
-/* #undef KOKKOS_ENABLE_HWLOC */
-/* #undef KOKKOS_USE_LIBRT */
-/* #undef KOKKOS_ENABLE_HBWSPACE */
-/* #undef KOKKOS_ENABLE_LIBDL */
-/* #undef KOKKOS_ENABLE_LIBQUADMATH */
-/* #undef KOKKOS_IMPL_CUDA_CLANG_WORKAROUND */
-
-/* #undef KOKKOS_COMPILER_CUDA_VERSION */
-
-/* #undef KOKKOS_ARCH_SSE42 */
-/* #undef KOKKOS_ARCH_ARMV80 */
-/* #undef KOKKOS_ARCH_ARMV8_THUNDERX */
-/* #undef KOKKOS_ARCH_ARMV81 */
-/* #undef KOKKOS_ARCH_ARMV8_THUNDERX2 */
-/* #undef KOKKOS_ARCH_AMD_AVX2 */
-/* #undef KOKKOS_ARCH_AVX */
-/* #undef KOKKOS_ARCH_AVX2 */
-/* #undef KOKKOS_ARCH_AVX512XEON */
-/* #undef KOKKOS_ARCH_KNC */
-/* #undef KOKKOS_ARCH_AVX512MIC */
-/* #undef KOKKOS_ARCH_POWER7 */
-/* #undef KOKKOS_ARCH_POWER8 */
-/* #undef KOKKOS_ARCH_POWER9 */
-/* #undef KOKKOS_ARCH_INTEL_GEN */
-/* #undef KOKKOS_ARCH_INTEL_DG1 */
-/* #undef KOKKOS_ARCH_INTEL_GEN9 */
-/* #undef KOKKOS_ARCH_INTEL_GEN11 */
-/* #undef KOKKOS_ARCH_INTEL_GEN12LP */
-/* #undef KOKKOS_ARCH_INTEL_XEHP */
-/* #undef KOKKOS_ARCH_INTEL_GPU */
-/* #undef KOKKOS_ARCH_KEPLER */
-/* #undef KOKKOS_ARCH_KEPLER30 */
-/* #undef KOKKOS_ARCH_KEPLER32 */
-/* #undef KOKKOS_ARCH_KEPLER35 */
-/* #undef KOKKOS_ARCH_KEPLER37 */
-/* #undef KOKKOS_ARCH_MAXWELL */
-/* #undef KOKKOS_ARCH_MAXWELL50 */
-/* #undef KOKKOS_ARCH_MAXWELL52 */
-/* #undef KOKKOS_ARCH_MAXWELL53 */
-/* #undef KOKKOS_ARCH_PASCAL */
-/* #undef KOKKOS_ARCH_PASCAL60 */
-/* #undef KOKKOS_ARCH_PASCAL61 */
-/* #undef KOKKOS_ARCH_VOLTA */
-/* #undef KOKKOS_ARCH_VOLTA70 */
-/* #undef KOKKOS_ARCH_VOLTA72 */
-/* #undef KOKKOS_ARCH_TURING75 */
-/* #undef KOKKOS_ARCH_AMPERE */
-/* #undef KOKKOS_ARCH_AMPERE80 */
-/* #undef KOKKOS_ARCH_AMPERE86 */
-/* #undef KOKKOS_ARCH_AMD_ZEN */
-/* #undef KOKKOS_ARCH_AMD_ZEN2 */
-/* #undef KOKKOS_ARCH_AMD_ZEN3 */
-/* #undef KOKKOS_ARCH_VEGA */
-/* #undef KOKKOS_ARCH_VEGA900 */
-/* #undef KOKKOS_ARCH_VEGA906 */
-/* #undef KOKKOS_ARCH_VEGA908 */
-/* #undef KOKKOS_ARCH_VEGA90A */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_ACQUIRE_UNIQUE_TOKEN_IMPL_HPP
-#define KOKKOS_ACQUIRE_UNIQUE_TOKEN_IMPL_HPP
-
-#include <Kokkos_Core.hpp>
-#include <Kokkos_UniqueToken.hpp>
-namespace Kokkos {
-namespace Experimental {
-
-template <typename TeamPolicy>
-KOKKOS_FUNCTION AcquireTeamUniqueToken<TeamPolicy>::AcquireTeamUniqueToken(
- AcquireTeamUniqueToken<TeamPolicy>::token_type t, team_member_type team)
- : my_token(t), my_team_acquired_val(team.team_scratch(0)), my_team(team) {
- Kokkos::single(Kokkos::PerTeam(my_team),
- [&]() { my_team_acquired_val() = my_token.acquire(); });
- my_team.team_barrier();
-
- my_acquired_val = my_team_acquired_val();
-}
-
-template <typename TeamPolicy>
-KOKKOS_FUNCTION AcquireTeamUniqueToken<TeamPolicy>::~AcquireTeamUniqueToken() {
- my_team.team_barrier();
- Kokkos::single(Kokkos::PerTeam(my_team),
- [&]() { my_token.release(my_acquired_val); });
- my_team.team_barrier();
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif // KOKKOS_UNIQUE_TOKEN_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/// \file Kokkos_Atomic.hpp
-/// \brief Atomic functions
-///
-/// This header file defines prototypes for the following atomic functions:
-/// - exchange
-/// - compare and exchange
-/// - add
-///
-/// Supported types include:
-/// - signed and unsigned 4 and 8 byte integers
-/// - float
-/// - double
-///
-/// They are implemented through GCC compatible intrinsics, OpenMP
-/// directives and native CUDA intrinsics.
-///
-/// Including this header file requires one of the following
-/// compilers:
-/// - NVCC (for CUDA device code only)
-/// - GCC (for host code only)
-/// - Intel (for host code only)
-/// - A compiler that supports OpenMP 3.1 (for host code only)
-
-#ifndef KOKKOS_ATOMIC_HPP
-#define KOKKOS_ATOMIC_HPP
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-#include <Kokkos_Atomics_Desul_Wrapper.hpp>
-#include <Kokkos_Atomics_Desul_Volatile_Wrapper.hpp>
-#include <impl/Kokkos_Utilities.hpp>
-
-// Helper functions for places where we really should have called SeqCst atomics
-// anyway These can go away when we call desul unconditionally Non-Desul
-// versions are below
-namespace Kokkos {
-namespace Impl {
-using desul::MemoryOrderSeqCst;
-using desul::MemoryScopeDevice;
-
-template <class T>
-KOKKOS_INLINE_FUNCTION void desul_atomic_dec(T* dest, MemoryOrderSeqCst,
- MemoryScopeDevice) {
- return desul::atomic_dec(const_cast<T*>(dest), desul::MemoryOrderSeqCst(),
- desul::MemoryScopeDevice());
-}
-
-template <class T>
-KOKKOS_INLINE_FUNCTION void desul_atomic_inc(T* dest, MemoryOrderSeqCst,
- MemoryScopeDevice) {
- return desul::atomic_inc(const_cast<T*>(dest), desul::MemoryOrderSeqCst(),
- desul::MemoryScopeDevice());
-}
-
-template <class T>
-KOKKOS_INLINE_FUNCTION T
-desul_atomic_exchange(T* dest, const Kokkos::Impl::identity_t<T> val,
- MemoryOrderSeqCst, MemoryScopeDevice) {
- return desul::atomic_exchange(const_cast<T*>(dest), val,
- desul::MemoryOrderSeqCst(),
- desul::MemoryScopeDevice());
-}
-
-template <class T>
-KOKKOS_INLINE_FUNCTION T desul_atomic_compare_exchange(
- T* dest, Kokkos::Impl::identity_t<const T> compare,
- Kokkos::Impl::identity_t<const T> val, MemoryOrderSeqCst,
- MemoryScopeDevice) {
- return desul::atomic_compare_exchange(dest, compare, val,
- desul::MemoryOrderSeqCst(),
- desul::MemoryScopeDevice());
-}
-
-} // namespace Impl
-} // namespace Kokkos
-#else
-
-#include <Kokkos_HostSpace.hpp>
-#include <impl/Kokkos_Traits.hpp>
-
-//----------------------------------------------------------------------------
-
-// Need to fix this for pure clang on windows
-#if defined(_WIN32)
-#define KOKKOS_ENABLE_WINDOWS_ATOMICS
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#define KOKKOS_ENABLE_CUDA_ATOMICS
-#if defined(KOKKOS_COMPILER_CLANG)
-#define KOKKOS_ENABLE_GNU_ATOMICS
-#endif
-#endif
-
-#else // _WIN32
-#if defined(KOKKOS_ENABLE_CUDA)
-
-// Compiling NVIDIA device code, must use Cuda atomics:
-
-#define KOKKOS_ENABLE_CUDA_ATOMICS
-
-#elif defined(KOKKOS_ENABLE_HIP)
-
-#define KOKKOS_ENABLE_HIP_ATOMICS
-
-#endif
-
-#if !defined(KOKKOS_ENABLE_GNU_ATOMICS) && \
- !defined(KOKKOS_ENABLE_INTEL_ATOMICS) && \
- !defined(KOKKOS_ENABLE_OPENMP_ATOMICS) && \
- !defined(KOKKOS_ENABLE_STD_ATOMICS) && \
- !defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-// Compiling for non-Cuda atomic implementation has not been pre-selected.
-// Choose the best implementation for the detected compiler.
-// Preference: GCC, INTEL, OMP31
-
-#if defined(KOKKOS_INTERNAL_NOT_PARALLEL)
-
-#define KOKKOS_ENABLE_SERIAL_ATOMICS
-
-#elif defined(KOKKOS_COMPILER_GNU) || defined(KOKKOS_COMPILER_CLANG) || \
- (defined(KOKKOS_COMPILER_NVCC) || defined(KOKKOS_COMPILER_IBM))
-
-#define KOKKOS_ENABLE_GNU_ATOMICS
-
-#elif defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_CRAYC)
-
-#define KOKKOS_ENABLE_INTEL_ATOMICS
-
-#elif defined(_OPENMP) && (201107 <= _OPENMP)
-
-#define KOKKOS_ENABLE_OPENMP_ATOMICS
-
-#else
-
-#error "KOKKOS_ATOMICS_USE : Unsupported compiler"
-
-#endif
-
-#endif /* Not pre-selected atomic implementation */
-#endif
-
-#ifdef KOKKOS_ENABLE_CUDA
-#include <Cuda/Kokkos_Cuda_Locks.hpp>
-#endif
-
-namespace Kokkos {
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_add(volatile T* const dest, const T src);
-
-// Atomic increment
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_increment(volatile T* a);
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_decrement(volatile T* a);
-} // namespace Kokkos
-
-namespace Kokkos {
-
-inline const char* atomic_query_version() {
-#if defined(KOKKOS_ENABLE_CUDA_ATOMICS)
- return "KOKKOS_ENABLE_CUDA_ATOMICS";
-#elif defined(KOKKOS_ENABLE_GNU_ATOMICS)
- return "KOKKOS_ENABLE_GNU_ATOMICS";
-#elif defined(KOKKOS_ENABLE_INTEL_ATOMICS)
- return "KOKKOS_ENABLE_INTEL_ATOMICS";
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
- return "KOKKOS_ENABLE_OPENMP_ATOMICS";
-#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
- return "KOKKOS_ENABLE_WINDOWS_ATOMICS";
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- return "KOKKOS_ENABLE_SERIAL_ATOMICS";
-#else
-#error "No valid response for atomic_query_version!"
-#endif
-}
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-// Atomic Memory Orders
-//
-// Implements Strongly-typed analogs of C++ standard memory orders
-#include "impl/Kokkos_Atomic_Memory_Order.hpp"
-
-#if defined(KOKKOS_ENABLE_HIP)
-#include <HIP/Kokkos_HIP_Atomic.hpp>
-#endif
-
-#if defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
-#include "impl/Kokkos_Atomic_Windows.hpp"
-#endif
-//----------------------------------------------------------------------------
-// Atomic Assembly
-//
-// Implements CAS128-bit in assembly
-
-#include "impl/Kokkos_Atomic_Assembly.hpp"
-
-//----------------------------------------------------------------------------
-// Memory fence
-//
-// All loads and stores from this thread will be globally consistent before
-// continuing
-//
-// void memory_fence() {...};
-#include "impl/Kokkos_Memory_Fence.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic exchange
-//
-// template< typename T >
-// T atomic_exchange( volatile T* const dest , const T val )
-// { T tmp = *dest ; *dest = val ; return tmp ; }
-
-#include "impl/Kokkos_Atomic_Exchange.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic compare-and-exchange
-//
-// template<class T>
-// bool atomic_compare_exchange_strong(volatile T* const dest, const T compare,
-// const T val) { bool equal = compare == *dest ; if ( equal ) { *dest = val ; }
-// return equal ; }
-
-#include "impl/Kokkos_Atomic_Compare_Exchange_Strong.hpp"
-
-#include "impl/Kokkos_Atomic_Generic.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic fetch and add
-//
-// template<class T>
-// T atomic_fetch_add(volatile T* const dest, const T val)
-// { T tmp = *dest ; *dest += val ; return tmp ; }
-
-#include "impl/Kokkos_Atomic_Fetch_Add.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic increment
-//
-// template<class T>
-// T atomic_increment(volatile T* const dest)
-// { dest++; }
-
-#include "impl/Kokkos_Atomic_Increment.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic Decrement
-//
-// template<class T>
-// T atomic_decrement(volatile T* const dest)
-// { dest--; }
-
-#include "impl/Kokkos_Atomic_Decrement.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic fetch and sub
-//
-// template<class T>
-// T atomic_fetch_sub(volatile T* const dest, const T val)
-// { T tmp = *dest ; *dest -= val ; return tmp ; }
-
-#include "impl/Kokkos_Atomic_Fetch_Sub.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic fetch and or
-//
-// template<class T>
-// T atomic_fetch_or(volatile T* const dest, const T val)
-// { T tmp = *dest ; *dest = tmp | val ; return tmp ; }
-
-#include "impl/Kokkos_Atomic_Fetch_Or.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic fetch and and
-//
-// template<class T>
-// T atomic_fetch_and(volatile T* const dest, const T val)
-// { T tmp = *dest ; *dest = tmp & val ; return tmp ; }
-
-#include "impl/Kokkos_Atomic_Fetch_And.hpp"
-
-//----------------------------------------------------------------------------
-// Atomic MinMax
-//
-// template<class T>
-// T atomic_min(volatile T* const dest, const T val)
-// { T tmp = *dest ; *dest = min(*dest, val); return tmp ; }
-// template<class T>
-// T atomic_max(volatile T* const dest, const T val)
-// { T tmp = *dest ; *dest = max(*dest, val); return tmp ; }
-
-#include "impl/Kokkos_Atomic_MinMax.hpp"
-
-//----------------------------------------------------------------------------
-// Provide volatile_load and safe_load
-//
-// T volatile_load(T const volatile * const ptr);
-//
-// T const& safe_load(T const * const ptr);
-// XEON PHI
-// T safe_load(T const * const ptr
-
-#include "impl/Kokkos_Volatile_Load.hpp"
-
-//----------------------------------------------------------------------------
-// Provide atomic loads and stores with memory order semantics
-
-#include "impl/Kokkos_Atomic_Load.hpp"
-#include "impl/Kokkos_Atomic_Store.hpp"
-
-// Generic functions using the above defined functions
-#include "impl/Kokkos_Atomic_Generic_Secondary.hpp"
-//----------------------------------------------------------------------------
-// This atomic-style macro should be an inlined function, not a macro
-
-#if defined(KOKKOS_COMPILER_GNU) && !defined(__PGIC__) && \
- !defined(__CUDA_ARCH__)
-
-#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) __builtin_prefetch(addr, 0, 0)
-#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) __builtin_prefetch(addr, 1, 0)
-
-#else
-
-#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) ((void)0)
-#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) ((void)0)
-
-#endif
-
-//----------------------------------------------------------------------------
-
-// Helper functions for places where we really should have called SeqCst atomics
-// anyway These can go away when we call desul unconditionally
-namespace Kokkos {
-namespace Impl {
-struct MemoryOrderSeqCst {};
-struct MemoryScopeDevice {};
-
-template <class T>
-KOKKOS_INLINE_FUNCTION void desul_atomic_dec(T* dest, MemoryOrderSeqCst,
- MemoryScopeDevice) {
- return Kokkos::atomic_decrement(dest);
-}
-
-template <class T>
-KOKKOS_INLINE_FUNCTION void desul_atomic_inc(T* dest, MemoryOrderSeqCst,
- MemoryScopeDevice) {
- return Kokkos::atomic_increment(dest);
-}
-
-template <class T>
-KOKKOS_INLINE_FUNCTION T
-desul_atomic_exchange(T* dest, Kokkos::Impl::identity_t<const T> val,
- MemoryOrderSeqCst, MemoryScopeDevice) {
- return Kokkos::atomic_exchange(dest, val);
-}
-
-template <class T>
-KOKKOS_INLINE_FUNCTION T desul_atomic_compare_exchange(
- T* dest, Kokkos::Impl::identity_t<const T> compare,
- Kokkos::Impl::identity_t<const T> val, MemoryOrderSeqCst,
- MemoryScopeDevice) {
- return Kokkos::atomic_compare_exchange(dest, compare, val);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* !KOKKOS_ENABLE_IMPL_DESUL_ATOMICS */
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
-#endif
-#endif /* KOKKOS_ATOMIC_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_ATOMICS_DESUL_CONFIG_HPP
-#define KOKKOS_ATOMICS_DESUL_CONFIG_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#ifdef KOKKOS_ENABLE_OPENMPTARGET
-#define DESUL_HAVE_OPENMP_ATOMICS
-#endif
-
-#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
- defined(KOKKOS_ARCH_PASCAL)
-#define DESUL_CUDA_ARCH_IS_PRE_VOLTA
-#endif
-
-#endif // KOKKOS_ATOMICS_DESUL_CONFIG_HPP
+++ /dev/null
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_DESUL_ATOMICS_VOLATILE_WRAPPER_HPP_
-#define KOKKOS_DESUL_ATOMICS_VOLATILE_WRAPPER_HPP_
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-#include <Kokkos_Atomics_Desul_Config.hpp>
-#include <desul/atomics.hpp>
-
-#ifdef KOKKOS_INTERNAL_NOT_PARALLEL
-#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeCaller()
-#else
-#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeDevice()
-#endif
-
-// clang-format off
-namespace Kokkos {
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_load(volatile T* const dest) { return desul::atomic_load(const_cast<T*>(dest), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_store(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_store(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// atomic_fetch_op
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_add (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_add (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-#ifdef DESUL_IMPL_ATOMIC_CUDA_USE_DOUBLE_ATOMICADD
-KOKKOS_INLINE_FUNCTION
-double atomic_fetch_add(volatile double* const dest, double val) {
- #ifdef __CUDA_ARCH__
- return atomicAdd(const_cast<double*>(dest),val);
- #else
- return desul::atomic_fetch_add (const_cast<double*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
- #endif
-};
-
-KOKKOS_INLINE_FUNCTION
-double atomic_fetch_sub(volatile double* const dest, double val) {
- #ifdef __CUDA_ARCH__
- return atomicAdd(const_cast<double*>(dest),-val);
- #else
- return desul::atomic_fetch_sub (const_cast<double*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
- #endif
-};
-#endif
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_sub (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_sub (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_max (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_max (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_min (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_min (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_mul (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mul (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_div (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_div (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_mod (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mod (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_and (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_and (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_or (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_or (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_xor (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_xor (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_nand(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_nand(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_lshift(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_lshift(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_rshift(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_rshift(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_inc(volatile T* const dest) { return desul::atomic_fetch_inc(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_dec(volatile T* const dest) { return desul::atomic_fetch_dec(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-
-// atomic_op_fetch
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_add_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_sub_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_max_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_min_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_mul_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_div_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_mod_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mod_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_and_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_and_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_or_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_or_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_xor_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_xor_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_nand_fetch(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_nand_fetch(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_lshift_fetch(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_lshift_fetch(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_rshift_fetch(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_rshift_fetch(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_inc_fetch(volatile T* const dest) { return desul::atomic_inc_fetch(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_dec_fetch(volatile T* const dest) { return desul::atomic_dec_fetch(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-
-// atomic_op
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_add(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_sub(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_mul(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_div(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_min(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_max(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// FIXME: Desul doesn't have atomic_and yet so call fetch_and
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_and(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { (void) desul::atomic_fetch_and (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// FIXME: Desul doesn't have atomic_or yet so call fetch_or
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_or (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { (void) desul::atomic_fetch_or (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_inc(volatile T* const dest) { return desul::atomic_inc(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_dec(volatile T* const dest) { return desul::atomic_dec(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_increment(volatile T* const dest) { return desul::atomic_inc(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_decrement(volatile T* const dest) { return desul::atomic_dec(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// Exchange
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_exchange(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_exchange(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-bool atomic_compare_exchange_strong(volatile T* const dest, T& expected, const T desired) {
- return desul::atomic_compare_exchange_strong(const_cast<T*>(dest),expected, desired,
- desul::MemoryOrderRelaxed(), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
-}
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_compare_exchange(volatile T* const dest, const T compare, const T desired) {
- return desul::atomic_compare_exchange(const_cast<T*>(dest),compare, desired,
- desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
-}
-
-}
-#undef KOKKOS_DESUL_MEM_SCOPE
-
-// clang-format on
-#endif // KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-#endif
+++ /dev/null
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_DESUL_ATOMICS_WRAPPER_HPP_
-#define KOKKOS_DESUL_ATOMICS_WRAPPER_HPP_
-#include <Kokkos_Macros.hpp>
-
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-#include <Kokkos_Atomics_Desul_Config.hpp>
-#include <desul/atomics.hpp>
-
-#include <impl/Kokkos_Atomic_Memory_Order.hpp>
-#include <impl/Kokkos_Volatile_Load.hpp>
-
-// clang-format off
-namespace Kokkos {
-
-// FIXME: These functions don't have any use/test in unit tests ...
-// ==========================================================
-inline const char* atomic_query_version() { return "KOKKOS_DESUL_ATOMICS"; }
-
-#if defined(KOKKOS_COMPILER_GNU) && !defined(__PGIC__) && \
- !defined(__CUDA_ARCH__)
-
-#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) __builtin_prefetch(addr, 0, 0)
-#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) __builtin_prefetch(addr, 1, 0)
-
-#else
-
-#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) ((void)0)
-#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) ((void)0)
-
-#endif
-// ============================================================
-
-#ifdef KOKKOS_INTERNAL_NOT_PARALLEL
-#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeCaller()
-#else
-#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeDevice()
-#endif
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_load(T* const dest) { return desul::atomic_load(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_store(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_store(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_assign(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { atomic_store(dest,val); }
-
-KOKKOS_INLINE_FUNCTION
-void memory_fence() {
- desul::atomic_thread_fence(desul::MemoryOrderSeqCst(), KOKKOS_DESUL_MEM_SCOPE);
-}
-
-KOKKOS_INLINE_FUNCTION
-void load_fence() { return desul::atomic_thread_fence(desul::MemoryOrderAcquire(), KOKKOS_DESUL_MEM_SCOPE); }
-
-KOKKOS_INLINE_FUNCTION
-void store_fence() { return desul::atomic_thread_fence(desul::MemoryOrderRelease(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// atomic_fetch_op
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_add (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_add (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-#ifdef DESUL_IMPL_ATOMIC_CUDA_USE_DOUBLE_ATOMICADD
-KOKKOS_INLINE_FUNCTION
-double atomic_fetch_add(double* const dest, double val) {
- #ifdef __CUDA_ARCH__
- return atomicAdd(dest,val);
- #else
- return desul::atomic_fetch_add (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
- #endif
-};
-
-KOKKOS_INLINE_FUNCTION
-double atomic_fetch_sub(double* const dest, double val) {
- #ifdef __CUDA_ARCH__
- return atomicAdd(dest,-val);
- #else
- return desul::atomic_fetch_sub (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
- #endif
-};
-#endif
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_sub (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_sub (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_max (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_max (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_min (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_min (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_mul (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mul (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_div (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_div (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_mod (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mod (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_and (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_and (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_or (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_or (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_xor (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_xor (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_nand(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_nand(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_lshift(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_lshift(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_rshift(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_rshift(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_inc(T* const dest) { return desul::atomic_fetch_inc(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_fetch_dec(T* const dest) { return desul::atomic_fetch_dec(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-
-// atomic_op_fetch
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_add_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_sub_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_max_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_min_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_mul_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_div_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_mod_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mod_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_and_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_and_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_or_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_or_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_xor_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_xor_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_nand_fetch(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_nand_fetch(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_lshift_fetch(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_lshift_fetch(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_rshift_fetch(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_rshift_fetch(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_inc_fetch(T* const dest) { return desul::atomic_inc_fetch(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_dec_fetch(T* const dest) { return desul::atomic_dec_fetch(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-
-// atomic_op
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_add(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_sub(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_mul(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_div(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_min(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_max(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// FIXME: Desul doesn't have atomic_and yet so call fetch_and
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_and(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { (void) desul::atomic_fetch_and (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// FIXME: Desul doesn't have atomic_or yet so call fetch_or
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_or(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { (void) desul::atomic_fetch_or (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_inc(T* const dest) { return desul::atomic_inc(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_dec(T* const dest) { return desul::atomic_dec(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_increment(T* const dest) { return desul::atomic_inc(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-void atomic_decrement(T* const dest) { return desul::atomic_dec(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-// Exchange
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_exchange(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_exchange(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
-
-template<class T> KOKKOS_INLINE_FUNCTION
-bool atomic_compare_exchange_strong(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> expected, desul::Impl::dont_deduce_this_parameter_t<const T> desired) {
- T expected_ref = expected;
- return desul::atomic_compare_exchange_strong(dest, expected_ref, desired,
- desul::MemoryOrderRelaxed(), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
-}
-
-template<class T> KOKKOS_INLINE_FUNCTION
-T atomic_compare_exchange(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> compare, desul::Impl::dont_deduce_this_parameter_t<const T> desired) {
- return desul::atomic_compare_exchange(dest, compare, desired,
- desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
-}
-
-namespace Impl {
-
- template<class MemoryOrder>
- struct KokkosToDesulMemoryOrder;
-
- template<>
- struct KokkosToDesulMemoryOrder<memory_order_seq_cst_t> {
- using type = desul::MemoryOrderSeqCst;
- };
- template<>
- struct KokkosToDesulMemoryOrder<memory_order_acquire_t> {
- using type = desul::MemoryOrderAcquire;
- };
- template<>
- struct KokkosToDesulMemoryOrder<memory_order_release_t> {
- using type = desul::MemoryOrderRelease;
- };
- template<>
- struct KokkosToDesulMemoryOrder<memory_order_acq_rel_t> {
- using type = desul::MemoryOrderAcqRel;
- };
- template<>
- struct KokkosToDesulMemoryOrder<memory_order_relaxed_t> {
- using type = desul::MemoryOrderRelaxed;
- };
- template<class T, class MemOrderSuccess, class MemOrderFailure> KOKKOS_INLINE_FUNCTION
- bool atomic_compare_exchange_strong(T* const dest, T& expected, const T desired, MemOrderSuccess, MemOrderFailure) {
- return desul::atomic_compare_exchange_strong(dest, expected, desired,
- typename KokkosToDesulMemoryOrder<MemOrderSuccess>::type(),
- typename KokkosToDesulMemoryOrder<MemOrderFailure>::type(),
- KOKKOS_DESUL_MEM_SCOPE);
-
- }
- template<class T, class MemoryOrder>
- KOKKOS_INLINE_FUNCTION
- T atomic_load(const T* const src, MemoryOrder) {
- return desul::atomic_load(src, typename KokkosToDesulMemoryOrder<MemoryOrder>::type(), KOKKOS_DESUL_MEM_SCOPE);
- }
- template<class T, class MemoryOrder>
- KOKKOS_INLINE_FUNCTION
- void atomic_store(T* const src, const T val, MemoryOrder) {
- return desul::atomic_store(src, val, typename KokkosToDesulMemoryOrder<MemoryOrder>::type(), KOKKOS_DESUL_MEM_SCOPE);
- }
-}
-
-}
-
-#undef KOKKOS_DESUL_MEM_SCOPE
-
-// clang-format on
-#endif // KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_KOKKOS_EXTENTS_HPP
-#define KOKKOS_KOKKOS_EXTENTS_HPP
-
-#include <cstddef>
-#include <type_traits>
-#include <Kokkos_Macros.hpp>
-
-namespace Kokkos {
-namespace Experimental {
-
-constexpr ptrdiff_t dynamic_extent = -1;
-
-template <ptrdiff_t... ExtentSpecs>
-struct Extents {
- /* TODO @enhancement flesh this out more */
-};
-
-template <class Exts, ptrdiff_t NewExtent>
-struct PrependExtent;
-
-template <ptrdiff_t... Exts, ptrdiff_t NewExtent>
-struct PrependExtent<Extents<Exts...>, NewExtent> {
- using type = Extents<NewExtent, Exts...>;
-};
-
-template <class Exts, ptrdiff_t NewExtent>
-struct AppendExtent;
-
-template <ptrdiff_t... Exts, ptrdiff_t NewExtent>
-struct AppendExtent<Extents<Exts...>, NewExtent> {
- using type = Extents<Exts..., NewExtent>;
-};
-
-} // end namespace Experimental
-
-namespace Impl {
-
-namespace _parse_view_extents_impl {
-
-template <class T>
-struct _all_remaining_extents_dynamic : std::true_type {};
-
-template <class T>
-struct _all_remaining_extents_dynamic<T*> : _all_remaining_extents_dynamic<T> {
-};
-
-template <class T, unsigned N>
-struct _all_remaining_extents_dynamic<T[N]> : std::false_type {};
-
-template <class T, class Result, class = void>
-struct _parse_impl {
- using type = Result;
-};
-
-// We have to treat the case of int**[x] specially, since it *doesn't* go
-// backwards
-template <class T, ptrdiff_t... ExtentSpec>
-struct _parse_impl<T*, Kokkos::Experimental::Extents<ExtentSpec...>,
- std::enable_if_t<_all_remaining_extents_dynamic<T>::value>>
- : _parse_impl<T, Kokkos::Experimental::Extents<
- Kokkos::Experimental::dynamic_extent, ExtentSpec...>> {
-};
-
-// int*(*[x])[y] should still work also (meaning int[][x][][y])
-template <class T, ptrdiff_t... ExtentSpec>
-struct _parse_impl<
- T*, Kokkos::Experimental::Extents<ExtentSpec...>,
- std::enable_if_t<!_all_remaining_extents_dynamic<T>::value>> {
- using _next = Kokkos::Experimental::AppendExtent<
- typename _parse_impl<T, Kokkos::Experimental::Extents<ExtentSpec...>,
- void>::type,
- Kokkos::Experimental::dynamic_extent>;
- using type = typename _next::type;
-};
-
-template <class T, ptrdiff_t... ExtentSpec, unsigned N>
-struct _parse_impl<T[N], Kokkos::Experimental::Extents<ExtentSpec...>, void>
- : _parse_impl<
- T, Kokkos::Experimental::Extents<ExtentSpec...,
- ptrdiff_t(N)> // TODO @pedantic this
- // could be a
- // narrowing cast
- > {};
-
-} // end namespace _parse_view_extents_impl
-
-template <class DataType>
-struct ParseViewExtents {
- using type = typename _parse_view_extents_impl ::_parse_impl<
- DataType, Kokkos::Experimental::Extents<>>::type;
-};
-
-template <class ValueType, ptrdiff_t Ext>
-struct ApplyExtent {
- using type = ValueType[Ext];
-};
-
-template <class ValueType>
-struct ApplyExtent<ValueType, Kokkos::Experimental::dynamic_extent> {
- using type = ValueType*;
-};
-
-template <class ValueType, unsigned N, ptrdiff_t Ext>
-struct ApplyExtent<ValueType[N], Ext> {
- using type = typename ApplyExtent<ValueType, Ext>::type[N];
-};
-
-template <class ValueType, ptrdiff_t Ext>
-struct ApplyExtent<ValueType*, Ext> {
- using type = ValueType * [Ext];
-};
-
-template <class ValueType>
-struct ApplyExtent<ValueType*, Kokkos::Experimental::dynamic_extent> {
- using type =
- typename ApplyExtent<ValueType,
- Kokkos::Experimental::dynamic_extent>::type*;
-};
-
-template <class ValueType, unsigned N>
-struct ApplyExtent<ValueType[N], Kokkos::Experimental::dynamic_extent> {
- using type =
- typename ApplyExtent<ValueType,
- Kokkos::Experimental::dynamic_extent>::type[N];
-};
-
-} // end namespace Impl
-
-} // end namespace Kokkos
-
-#endif // KOKKOS_KOKKOS_EXTENTS_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_KOKKOS_GRAPH_FWD_HPP
-#define KOKKOS_KOKKOS_GRAPH_FWD_HPP
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-namespace Kokkos {
-namespace Experimental {
-
-struct TypeErasedTag {};
-
-template <class ExecutionSpace>
-struct Graph;
-
-template <class ExecutionSpace, class Kernel = TypeErasedTag,
- class Predecessor = TypeErasedTag>
-class GraphNodeRef;
-
-} // end namespace Experimental
-} // end namespace Kokkos
-
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
-#endif
-#endif // KOKKOS_KOKKOS_GRAPH_FWD_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_HBWSPACE_HPP
-#define KOKKOS_HBWSPACE_HPP
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_HBWSPACE
-
-#include <Kokkos_HostSpace.hpp>
-
-namespace Kokkos {
-
-namespace Experimental {
-
-namespace Impl {
-
-/// \brief Initialize lock array for arbitrary size atomics.
-///
-/// Arbitrary atomics are implemented using a hash table of locks
-/// where the hash value is derived from the address of the
-/// object for which an atomic operation is performed.
-/// This function initializes the locks to zero (unset).
-void init_lock_array_hbw_space();
-
-/// \brief Acquire a lock for the address
-///
-/// This function tries to acquire the lock for the hash value derived
-/// from the provided ptr. If the lock is successfully acquired the
-/// function returns true. Otherwise it returns false.
-bool lock_address_hbw_space(void* ptr);
-
-/// \brief Release lock for the address
-///
-/// This function releases the lock for the hash value derived
-/// from the provided ptr. This function should only be called
-/// after previously successfully acquiring a lock with
-/// lock_address.
-void unlock_address_hbw_space(void* ptr);
-
-} // namespace Impl
-
-} // namespace Experimental
-
-} // namespace Kokkos
-
-namespace Kokkos {
-
-namespace Experimental {
-
-/// \class HBWSpace
-/// \brief Memory management for host memory.
-///
-/// HBWSpace is a memory space that governs host memory. "Host"
-/// memory means the usual CPU-accessible memory.
-class HBWSpace {
- public:
- //! Tag this class as a kokkos memory space
- using memory_space = HBWSpace;
- using size_type = size_t;
-
- /// \typedef execution_space
- /// \brief Default execution space for this memory space.
- ///
- /// Every memory space has a default execution space. This is
- /// useful for things like initializing a View (which happens in
- /// parallel using the View's default execution space).
- using execution_space = Kokkos::DefaultHostExecutionSpace;
-
- //! This memory space preferred device_type
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- /**\brief Default memory space instance */
- HBWSpace();
- HBWSpace(const HBWSpace& rhs) = default;
- HBWSpace& operator=(const HBWSpace&) = default;
- ~HBWSpace() = default;
-
- /**\brief Non-default memory space instance to choose allocation mechansim,
- * if available */
-
- enum AllocationMechanism {
- STD_MALLOC,
- POSIX_MEMALIGN,
- POSIX_MMAP,
- INTEL_MM_ALLOC
- };
-
- explicit HBWSpace(const AllocationMechanism&);
-
- /**\brief Allocate untracked memory in the space */
- void* allocate(const size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- /**\brief Deallocate untracked memory in the space */
- void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- template <class, class, class, class>
- friend class LogicalMemorySpace;
-
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
-
- public:
- /**\brief Return Name of the MemorySpace */
- static constexpr const char* name() { return "HBW"; }
-
- private:
- AllocationMechanism m_alloc_mech;
- friend class Kokkos::Impl::SharedAllocationRecord<
- Kokkos::Experimental::HBWSpace, void>;
-};
-
-} // namespace Experimental
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>
- : public SharedAllocationRecord<void, void> {
- private:
- friend Kokkos::Experimental::HBWSpace;
-
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
- static void deallocate(RecordBase*);
-
-#ifdef KOKKOS_ENABLE_DEBUG
- /**\brief Root record for tracked allocations from this HBWSpace instance */
- static RecordBase s_root_record;
-#endif
-
- const Kokkos::Experimental::HBWSpace m_space;
-
- protected:
- ~SharedAllocationRecord()
-#if defined( \
- KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
- noexcept
-#endif
- ;
- SharedAllocationRecord() = default;
-
- SharedAllocationRecord(
- const Kokkos::Experimental::HBWSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate);
-
- public:
- inline std::string get_label() const {
- return std::string(RecordBase::head()->m_label);
- }
-
- KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
- const Kokkos::Experimental::HBWSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size) {
- KOKKOS_IF_ON_HOST((return new SharedAllocationRecord(arg_space, arg_label,
- arg_alloc_size);))
- KOKKOS_IF_ON_DEVICE(((void)arg_space; (void)arg_label; (void)arg_alloc_size;
- return nullptr;))
- }
-
- /**\brief Allocate tracked memory in the space */
- static void* allocate_tracked(const Kokkos::Experimental::HBWSpace& arg_space,
- const std::string& arg_label,
- const size_t arg_alloc_size);
-
- /**\brief Reallocate tracked memory in the space */
- static void* reallocate_tracked(void* const arg_alloc_ptr,
- const size_t arg_alloc_size);
-
- /**\brief Deallocate tracked memory in the space */
- static void deallocate_tracked(void* const arg_alloc_ptr);
-
- static SharedAllocationRecord* get_record(void* arg_alloc_ptr);
-
- static void print_records(std::ostream&,
- const Kokkos::Experimental::HBWSpace&,
- bool detail = false);
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-static_assert(
- Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::HBWSpace,
- Kokkos::Experimental::HBWSpace>::assignable,
- "");
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::Experimental::HBWSpace> {
- enum : bool { assignable = true };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HBWSpace, Kokkos::HostSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <>
-struct DeepCopy<Kokkos::Experimental::HBWSpace, Kokkos::Experimental::HBWSpace,
- DefaultHostExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- hostspace_parallel_deepcopy(exec, dst, src, n);
- }
-};
-
-template <class ExecutionSpace>
-struct DeepCopy<Kokkos::Experimental::HBWSpace, Kokkos::Experimental::HBWSpace,
- ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence(
- "Kokkos::Impl::DeepCopy<Kokkos::Experimental::HBWSpace, "
- "Kokkos::Experimental::HBWSpace,ExecutionSpace::DeepCopy: fence "
- "before copy");
- hostspace_parallel_deepcopy_async(dst, src, n);
- }
-};
-
-template <>
-struct DeepCopy<HostSpace, Kokkos::Experimental::HBWSpace,
- DefaultHostExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- hostspace_parallel_deepcopy(exec, dst, src, n);
- }
-};
-
-template <class ExecutionSpace>
-struct DeepCopy<HostSpace, Kokkos::Experimental::HBWSpace, ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence(
- "Kokkos::Impl::DeepCopy<HostSpace, Kokkos::Experimental::HBWSpace, "
- "ExecutionSpace>::DeepCopy: fence before copy");
- hostspace_parallel_deepcopy_async(copy_space, dst, src, n);
- }
-};
-
-template <>
-struct DeepCopy<Kokkos::Experimental::HBWSpace, HostSpace,
- DefaultHostExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- hostspace_parallel_deepcopy(exec, dst, src, n);
- }
-};
-
-template <class ExecutionSpace>
-struct DeepCopy<Kokkos::Experimental::HBWSpace, HostSpace, ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence(
- "Kokkos::Impl::DeepCopy<Kokkos::Experimental::HBWSpace, HostSpace, "
- "ExecutionSpace>::DeepCopy: fence before copy");
- hostspace_parallel_deepcopy_async(dst, src, n);
- }
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-#endif
-#endif // #define KOKKOS_HBWSPACE_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_HIP_HPP
-#define KOKKOS_HIP_HPP
-
-#include <Kokkos_Core_fwd.hpp>
-
-#if defined(KOKKOS_ENABLE_HIP)
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#include <Kokkos_HIP_Space.hpp>
-#include <Kokkos_Parallel.hpp>
-
-#include <HIP/Kokkos_HIP_Half_Impl_Type.hpp>
-#include <HIP/Kokkos_HIP_Half_Conversion.hpp>
-#include <HIP/Kokkos_HIP_Instance.hpp>
-#include <HIP/Kokkos_HIP_MDRangePolicy.hpp>
-#include <HIP/Kokkos_HIP_Parallel_Range.hpp>
-#include <HIP/Kokkos_HIP_Parallel_MDRange.hpp>
-#include <HIP/Kokkos_HIP_Parallel_Team.hpp>
-#include <HIP/Kokkos_HIP_UniqueToken.hpp>
-
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_HIPSPACE_HPP
-#define KOKKOS_HIPSPACE_HPP
-
-#include <Kokkos_Core_fwd.hpp>
-
-#if defined(KOKKOS_ENABLE_HIP)
-
-#include <iosfwd>
-#include <typeinfo>
-#include <string>
-#include <cstddef>
-#include <iosfwd>
-
-#include <Kokkos_HostSpace.hpp>
-#include <Kokkos_Layout.hpp>
-#include <Kokkos_ScratchSpace.hpp>
-#include <HIP/Kokkos_HIP_Error.hpp> // HIP_SAFE_CALL
-
-#include <impl/Kokkos_Profiling_Interface.hpp>
-#include <impl/Kokkos_HostSharedPtr.hpp>
-#include <impl/Kokkos_InitializationSettings.hpp>
-
-#include <hip/hip_runtime_api.h>
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Impl {
-
-template <typename T>
-struct is_hip_type_space : public std::false_type {};
-
-} // namespace Impl
-
-namespace Experimental {
-/** \brief HIP on-device memory management */
-
-class HIPSpace {
- public:
- //! Tag this class as a kokkos memory space
- using memory_space = HIPSpace;
- using execution_space = Kokkos::Experimental::HIP;
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- using size_type = unsigned int;
-
- /*--------------------------------*/
-
- HIPSpace();
- HIPSpace(HIPSpace&& rhs) = default;
- HIPSpace(const HIPSpace& rhs) = default;
- HIPSpace& operator=(HIPSpace&& rhs) = default;
- HIPSpace& operator=(const HIPSpace& rhs) = default;
- ~HIPSpace() = default;
-
- /**\brief Allocate untracked memory in the hip space */
- void* allocate(const size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- /**\brief Deallocate untracked memory in the hip space */
- void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- template <class, class, class, class>
- friend class LogicalMemorySpace;
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
-
- public:
- /**\brief Return Name of the MemorySpace */
- static constexpr const char* name() { return "HIP"; }
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- /*--------------------------------*/
- /** \brief Error reporting for HostSpace attempt to access HIPSpace */
- KOKKOS_DEPRECATED static void access_error();
- KOKKOS_DEPRECATED static void access_error(const void* const);
-#endif
-
- private:
- int m_device; ///< Which HIP device
-
- friend class Kokkos::Impl::SharedAllocationRecord<
- Kokkos::Experimental::HIPSpace, void>;
-};
-
-} // namespace Experimental
-
-template <>
-struct Impl::is_hip_type_space<Experimental::HIPSpace> : public std::true_type {
-};
-
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Experimental {
-/** \brief Host memory that is accessible to HIP execution space
- * through HIP's host-pinned memory allocation.
- */
-class HIPHostPinnedSpace {
- public:
- //! Tag this class as a kokkos memory space
- /** \brief Memory is in HostSpace so use the HostSpace::execution_space */
- using execution_space = HostSpace::execution_space;
- using memory_space = HIPHostPinnedSpace;
- using device_type = Kokkos::Device<execution_space, memory_space>;
- using size_type = unsigned int;
-
- /*--------------------------------*/
-
- HIPHostPinnedSpace();
- HIPHostPinnedSpace(HIPHostPinnedSpace&& rhs) = default;
- HIPHostPinnedSpace(const HIPHostPinnedSpace& rhs) = default;
- HIPHostPinnedSpace& operator=(HIPHostPinnedSpace&& rhs) = default;
- HIPHostPinnedSpace& operator=(const HIPHostPinnedSpace& rhs) = default;
- ~HIPHostPinnedSpace() = default;
-
- /**\brief Allocate untracked memory in the space */
- void* allocate(const size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- /**\brief Deallocate untracked memory in the space */
- void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- template <class, class, class, class>
- friend class LogicalMemorySpace;
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
-
- public:
- /**\brief Return Name of the MemorySpace */
- static constexpr const char* name() { return "HIPHostPinned"; }
-
- /*--------------------------------*/
-};
-} // namespace Experimental
-
-template <>
-struct Impl::is_hip_type_space<Experimental::HIPHostPinnedSpace>
- : public std::true_type {};
-
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Experimental {
-/** \brief Memory that is accessible to HIP execution space
- * and host through HIP's memory page migration.
- */
-class HIPManagedSpace {
- public:
- //! Tag this class as a kokkos memory space
- /** \brief Memory is unified to both device and host via page migration
- * and therefore able to be used by HostSpace::execution_space and
- * DeviceSpace::execution_space.
- */
- //! tag this class as a kokkos memory space
- using memory_space = HIPManagedSpace;
- using execution_space = Kokkos::Experimental::HIP;
- using device_type = Kokkos::Device<execution_space, memory_space>;
- using size_type = unsigned int;
-
- /*--------------------------------*/
-
- HIPManagedSpace();
- HIPManagedSpace(HIPManagedSpace&& rhs) = default;
- HIPManagedSpace(const HIPManagedSpace& rhs) = default;
- HIPManagedSpace& operator=(HIPManagedSpace&& rhs) = default;
- HIPManagedSpace& operator=(const HIPManagedSpace& rhs) = default;
- ~HIPManagedSpace() = default;
-
- /**\brief Allocate untracked memory in the space */
- void* allocate(const size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- /**\brief Deallocate untracked memory in the space */
- void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- int m_device; ///< Which HIP device
- template <class, class, class, class>
- friend class LogicalMemorySpace;
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
-
- public:
- /**\brief Return Name of the MemorySpace */
- static constexpr const char* name() { return "HIPManaged"; }
-
- /*--------------------------------*/
-};
-} // namespace Experimental
-
-template <>
-struct Impl::is_hip_type_space<Experimental::HIPManagedSpace>
- : public std::true_type {};
-
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Impl {
-
-static_assert(
- Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- Kokkos::Experimental::HIPSpace>::assignable,
- "");
-
-//----------------------------------------
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::Experimental::HIPSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::Experimental::HIPHostPinnedSpace> {
- // HostSpace::execution_space == HIPHostPinnedSpace::execution_space
- enum : bool { assignable = true };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::Experimental::HIPManagedSpace> {
- // HostSpace::execution_space != HIPManagedSpace::execution_space
- enum : bool { assignable = false };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-//----------------------------------------
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace, Kokkos::HostSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- Kokkos::Experimental::HIPHostPinnedSpace> {
- // HIPSpace::execution_space != HIPHostPinnedSpace::execution_space
- enum : bool { assignable = false };
- enum : bool { accessible = true }; // HIPSpace::execution_space
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- Kokkos::Experimental::HIPManagedSpace> {
- // HIPSpace::execution_space == HIPManagedSpace::execution_space
- enum : bool { assignable = true };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-//----------------------------------------
-// HIPHostPinnedSpace::execution_space == HostSpace::execution_space
-// HIPHostPinnedSpace accessible to both HIP and Host
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPHostPinnedSpace,
- Kokkos::HostSpace> {
- enum : bool { assignable = false }; // Cannot access from HIP
- enum : bool { accessible = true }; // HIPHostPinnedSpace::execution_space
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPHostPinnedSpace,
- Kokkos::Experimental::HIPSpace> {
- enum : bool { assignable = false }; // Cannot access from Host
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPHostPinnedSpace,
- Kokkos::Experimental::HIPManagedSpace> {
- enum : bool { assignable = false }; // different exec_space
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-//----------------------------------------
-// HIPManagedSpace::execution_space != HostSpace::execution_space
-// HIPManagedSpace accessible to both HIP and Host
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPManagedSpace,
- Kokkos::HostSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false }; // HIPHostPinnedSpace::execution_space
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPManagedSpace,
- Kokkos::Experimental::HIPSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPManagedSpace,
- Kokkos::Experimental::HIPHostPinnedSpace> {
- enum : bool { assignable = false }; // different exec_space
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-}; // namespace Impl
-//----------------------------------------
-
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Impl {
-
-void DeepCopyHIP(void* dst, const void* src, size_t n);
-void DeepCopyAsyncHIP(const Kokkos::Experimental::HIP& instance, void* dst,
- const void* src, size_t n);
-void DeepCopyAsyncHIP(void* dst, const void* src, size_t n);
-
-template <class MemSpace>
-struct DeepCopy<MemSpace, HostSpace, Kokkos::Experimental::HIP,
- std::enable_if_t<is_hip_type_space<MemSpace>::value>> {
- DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
- DeepCopy(const Kokkos::Experimental::HIP& instance, void* dst,
- const void* src, size_t n) {
- DeepCopyAsyncHIP(instance, dst, src, n);
- }
-};
-
-template <class MemSpace>
-struct DeepCopy<HostSpace, MemSpace, Kokkos::Experimental::HIP,
- std::enable_if_t<is_hip_type_space<MemSpace>::value>> {
- DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
- DeepCopy(const Kokkos::Experimental::HIP& instance, void* dst,
- const void* src, size_t n) {
- DeepCopyAsyncHIP(instance, dst, src, n);
- }
-};
-
-template <class MemSpace1, class MemSpace2>
-struct DeepCopy<MemSpace1, MemSpace2, Kokkos::Experimental::HIP,
- std::enable_if_t<is_hip_type_space<MemSpace1>::value &&
- is_hip_type_space<MemSpace2>::value>> {
- DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
- DeepCopy(const Kokkos::Experimental::HIP& instance, void* dst,
- const void* src, size_t n) {
- DeepCopyAsyncHIP(instance, dst, src, n);
- }
-};
-
-template <class MemSpace1, class MemSpace2, class ExecutionSpace>
-struct DeepCopy<
- MemSpace1, MemSpace2, ExecutionSpace,
- std::enable_if_t<
- is_hip_type_space<MemSpace1>::value &&
- is_hip_type_space<MemSpace2>::value &&
- !std::is_same<ExecutionSpace, Kokkos::Experimental::HIP>::value>> {
- inline DeepCopy(void* dst, const void* src, size_t n) {
- DeepCopyHIP(dst, src, n);
- }
-
- inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- exec.fence(fence_string());
- DeepCopyAsyncHIP(dst, src, n);
- }
-
- private:
- static const std::string& fence_string() {
- static const std::string string =
- std::string("Kokkos::Impl::DeepCopy<") + MemSpace1::name() + "Space, " +
- MemSpace2::name() +
- "Space, ExecutionSpace>::DeepCopy: fence before copy";
- return string;
- }
-};
-
-template <class MemSpace, class ExecutionSpace>
-struct DeepCopy<
- MemSpace, HostSpace, ExecutionSpace,
- std::enable_if_t<
- is_hip_type_space<MemSpace>::value &&
- !std::is_same<ExecutionSpace, Kokkos::Experimental::HIP>::value>> {
- inline DeepCopy(void* dst, const void* src, size_t n) {
- DeepCopyHIP(dst, src, n);
- }
-
- inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- exec.fence(fence_string());
- DeepCopyAsyncHIP(dst, src, n);
- }
-
- private:
- static const std::string& fence_string() {
- static const std::string string =
- std::string("Kokkos::Impl::DeepCopy<") + MemSpace::name() +
- "Space, HostSpace, ExecutionSpace>::DeepCopy: fence before copy";
- return string;
- }
-};
-
-template <class MemSpace, class ExecutionSpace>
-struct DeepCopy<
- HostSpace, MemSpace, ExecutionSpace,
- std::enable_if_t<
- is_hip_type_space<MemSpace>::value &&
- !std::is_same<ExecutionSpace, Kokkos::Experimental::HIP>::value>> {
- inline DeepCopy(void* dst, const void* src, size_t n) {
- DeepCopyHIP(dst, src, n);
- }
-
- inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- exec.fence(fence_string());
- DeepCopyAsyncHIP(dst, src, n);
- }
-
- private:
- static const std::string& fence_string() {
- static const std::string string =
- std::string("Kokkos::Impl::DeepCopy<HostSpace, ") + MemSpace::name() +
- "Space, ExecutionSpace>::DeepCopy: fence before copy";
- return string;
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>
- : public HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::HIPSpace> {
- private:
- friend class SharedAllocationRecordCommon<Kokkos::Experimental::HIPSpace>;
- friend class HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::HIPSpace>;
- using base_t = HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::HIPSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
-#ifdef KOKKOS_ENABLE_DEBUG
- static RecordBase s_root_record;
-#endif
-
- const Kokkos::Experimental::HIPSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec*/,
- const Kokkos::Experimental::HIPSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::HIP& exec_space,
- const Kokkos::Experimental::HIPSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-
- SharedAllocationRecord(
- const Kokkos::Experimental::HIPSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-};
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace, void>
- : public SharedAllocationRecordCommon<
- Kokkos::Experimental::HIPHostPinnedSpace> {
- private:
- friend class SharedAllocationRecordCommon<
- Kokkos::Experimental::HIPHostPinnedSpace>;
- using base_t =
- SharedAllocationRecordCommon<Kokkos::Experimental::HIPHostPinnedSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
-#ifdef KOKKOS_ENABLE_DEBUG
- static RecordBase s_root_record;
-#endif
-
- const Kokkos::Experimental::HIPHostPinnedSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
- SharedAllocationRecord() = default;
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::Experimental::HIPHostPinnedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::HIPHostPinnedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-};
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace, void>
- : public SharedAllocationRecordCommon<
- Kokkos::Experimental::HIPManagedSpace> {
- private:
- friend class SharedAllocationRecordCommon<
- Kokkos::Experimental::HIPManagedSpace>;
- using base_t =
- SharedAllocationRecordCommon<Kokkos::Experimental::HIPManagedSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
-#ifdef KOKKOS_ENABLE_DEBUG
- static RecordBase s_root_record;
-#endif
-
- const Kokkos::Experimental::HIPManagedSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
- SharedAllocationRecord() = default;
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::Experimental::HIPManagedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::HIPManagedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-};
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-class HIPInternal;
-}
-/// \class HIP
-/// \brief Kokkos device for multicore processors in the host memory space.
-class HIP {
- public:
- //------------------------------------
- //! \name Type declarations that all Kokkos devices must provide.
- //@{
-
- //! Tag this class as a kokkos execution space
- using execution_space = HIP;
- using memory_space = HIPSpace;
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- using array_layout = LayoutLeft;
- using size_type = HIPSpace::size_type;
-
- using scratch_memory_space = ScratchMemorySpace<HIP>;
-
- HIP();
- HIP(hipStream_t stream, bool manage_stream = false);
-
- //@}
- //------------------------------------
- //! \name Functions that all Kokkos devices must implement.
- //@{
-
- KOKKOS_INLINE_FUNCTION static int in_parallel() {
-#if defined(__HIP_DEVICE_COMPILE__)
- return true;
-#else
- return false;
-#endif
- }
-
- /** \brief Wait until all dispatched functors complete.
- *
- * The parallel_for or parallel_reduce dispatch of a functor may return
- * asynchronously, before the functor completes. This method does not return
- * until all dispatched functors on this device have completed.
- */
- static void impl_static_fence(const std::string& name);
-
- void fence(const std::string& name =
- "Kokkos::HIP::fence(): Unnamed Instance Fence") const;
-
- hipStream_t hip_stream() const;
-
- /// \brief Print configuration information to the given output stream.
- void print_configuration(std::ostream& os, bool verbose = false) const;
-
- /// \brief Free any resources being consumed by the device.
- static void impl_finalize();
-
- /** \brief Initialize the device.
- *
- */
- int hip_device() const;
- static hipDeviceProp_t const& hip_device_prop();
-
- static void impl_initialize(InitializationSettings const&);
-
- static int impl_is_initialized();
-
- // static size_type device_arch();
-
- static size_type detect_device_count();
-
- static int concurrency();
- static const char* name();
-
- inline Impl::HIPInternal* impl_internal_space_instance() const {
- return m_space_instance.get();
- }
-
- uint32_t impl_instance_id() const noexcept;
-
- private:
- Kokkos::Impl::HostSharedPtr<Impl::HIPInternal> m_space_instance;
-};
-} // namespace Experimental
-namespace Tools {
-namespace Experimental {
-template <>
-struct DeviceTypeTraits<Kokkos::Experimental::HIP> {
- static constexpr DeviceType id = DeviceType::HIP;
- static int device_id(const Kokkos::Experimental::HIP& exec) {
- return exec.hip_device();
- }
-};
-} // namespace Experimental
-} // namespace Tools
-
-namespace Impl {
-template <class DT, class... DP>
-struct ZeroMemset<Kokkos::Experimental::HIP, DT, DP...> {
- ZeroMemset(const Kokkos::Experimental::HIP& exec_space,
- const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- KOKKOS_IMPL_HIP_SAFE_CALL(hipMemsetAsync(
- dst.data(), 0,
- dst.size() * sizeof(typename View<DT, DP...>::value_type),
- exec_space.hip_stream()));
- }
-
- ZeroMemset(const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- KOKKOS_IMPL_HIP_SAFE_CALL(
- hipMemset(dst.data(), 0,
- dst.size() * sizeof(typename View<DT, DP...>::value_type)));
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
- Kokkos::Experimental::HIP::scratch_memory_space> {
- enum : bool { assignable = false };
- enum : bool { accessible = true };
- enum : bool { deepcopy = false };
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* #if defined( KOKKOS_ENABLE_HIP ) */
-#endif /* #define KOKKOS_HIPSPACE_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_HPX_HPP
-#define KOKKOS_HPX_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_HPX)
-
-#include <Kokkos_Core_fwd.hpp>
-
-#include <Kokkos_HostSpace.hpp>
-#include <cstddef>
-#include <iosfwd>
-
-#ifdef KOKKOS_ENABLE_HBWSPACE
-#include <Kokkos_HBWSpace.hpp>
-#endif
-
-#include <Kokkos_HostSpace.hpp>
-#include <Kokkos_Layout.hpp>
-#include <Kokkos_MemoryTraits.hpp>
-#include <Kokkos_Parallel.hpp>
-#include <Kokkos_ScratchSpace.hpp>
-#include <Kokkos_TaskScheduler.hpp>
-#include <impl/Kokkos_ConcurrentBitset.hpp>
-#include <impl/Kokkos_FunctorAnalysis.hpp>
-#include <impl/Kokkos_Tools.hpp>
-#include <impl/Kokkos_TaskQueue.hpp>
-#include <impl/Kokkos_InitializationSettings.hpp>
-
-#include <KokkosExp_MDRangePolicy.hpp>
-
-#include <hpx/local/algorithm.hpp>
-#include <hpx/local/barrier.hpp>
-#include <hpx/local/condition_variable.hpp>
-#include <hpx/local/execution.hpp>
-#include <hpx/local/future.hpp>
-#include <hpx/local/init.hpp>
-#include <hpx/local/mutex.hpp>
-#include <hpx/local/runtime.hpp>
-#include <hpx/local/thread.hpp>
-
-#include <Kokkos_UniqueToken.hpp>
-
-#include <functional>
-#include <iostream>
-#include <memory>
-#include <sstream>
-#include <type_traits>
-#include <vector>
-
-// There are currently two different implementations for the parallel dispatch
-// functions:
-//
-// - 0: The HPX way. Unfortunately, this comes with unnecessary
-// overheads at the moment, so there is
-// - 1: The manual way. This uses for_loop, but only spawns one task per worker
-// thread. This is significantly faster in most cases.
-//
-// In the long run 0 should be the preferred implementation, but until HPX is
-// improved 1 will be the default.
-#ifndef KOKKOS_HPX_IMPLEMENTATION
-#define KOKKOS_HPX_IMPLEMENTATION 1
-#endif
-
-#if (KOKKOS_HPX_IMPLEMENTATION < 0) || (KOKKOS_HPX_IMPLEMENTATION > 1)
-#error "You have chosen an invalid value for KOKKOS_HPX_IMPLEMENTATION"
-#endif
-
-// [note 1]
-//
-// When using the asynchronous backend and independent instances, we explicitly
-// reset the shared data at the end of a parallel task (execute_task). We do
-// this to avoid circular references with shared pointers that would otherwise
-// never be released.
-//
-// The HPX instance holds shared data for the instance in a shared_ptr. One of
-// the pieces of shared data is the future that we use to sequence parallel
-// dispatches. When a parallel task is launched, a copy of the closure
-// (ParallelFor, ParallelReduce, etc.) is captured in the task. The closure
-// also holds the policy, the policy holds the HPX instance, the instance holds
-// the shared data (for use of buffers in the parallel task). When attaching a
-// continuation to a future, the continuation is stored in the future (shared
-// state). This means that there is a cycle future -> continuation -> closure
-// -> policy -> HPX -> shared data -> future. We break this by releasing the
-// shared data early, as (the pointer to) the shared data will not be used
-// anymore by the closure at the end of execute_task.
-//
-// We also mark the shared instance data as mutable so that we can reset it
-// from the const execute_task member function.
-
-namespace Kokkos {
-namespace Impl {
-class thread_buffer {
- static constexpr std::size_t m_cache_line_size = 64;
-
- std::size_t m_num_threads;
- std::size_t m_size_per_thread;
- std::size_t m_size_total;
- char *m_data;
-
- void pad_to_cache_line(std::size_t &size) {
- size = ((size + m_cache_line_size - 1) / m_cache_line_size) *
- m_cache_line_size;
- }
-
- public:
- thread_buffer()
- : m_num_threads(0),
- m_size_per_thread(0),
- m_size_total(0),
- m_data(nullptr) {}
- thread_buffer(const std::size_t num_threads,
- const std::size_t size_per_thread) {
- resize(num_threads, size_per_thread);
- }
- ~thread_buffer() { delete[] m_data; }
-
- thread_buffer(const thread_buffer &) = delete;
- thread_buffer(thread_buffer &&) = delete;
- thread_buffer &operator=(const thread_buffer &) = delete;
- thread_buffer &operator=(thread_buffer) = delete;
-
- void resize(const std::size_t num_threads,
- const std::size_t size_per_thread) {
- m_num_threads = num_threads;
- m_size_per_thread = size_per_thread;
-
- pad_to_cache_line(m_size_per_thread);
-
- std::size_t size_total_new = m_num_threads * m_size_per_thread;
-
- if (m_size_total < size_total_new) {
- delete[] m_data;
- m_data = new char[size_total_new];
- m_size_total = size_total_new;
- }
- }
-
- char *get(std::size_t thread_num) {
- assert(thread_num < m_num_threads);
- if (m_data == nullptr) {
- return nullptr;
- }
- return &m_data[thread_num * m_size_per_thread];
- }
-
- std::size_t size_per_thread() const noexcept { return m_size_per_thread; }
- std::size_t size_total() const noexcept { return m_size_total; }
-};
-} // namespace Impl
-
-namespace Experimental {
-class HPX {
- public:
- static constexpr uint32_t impl_default_instance_id() { return 1; }
-
- private:
- static bool m_hpx_initialized;
- uint32_t m_instance_id = impl_default_instance_id();
-
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- static std::atomic<uint32_t> m_next_instance_id;
-
- public:
- enum class instance_mode { default_, independent };
-
- private:
- static uint32_t m_active_parallel_region_count;
- static hpx::spinlock m_active_parallel_region_count_mutex;
- static hpx::condition_variable_any m_active_parallel_region_count_cond;
-
- struct instance_data {
- instance_data() = default;
- instance_data(hpx::shared_future<void> future) : m_future(future) {}
- Kokkos::Impl::thread_buffer m_buffer;
- hpx::shared_future<void> m_future = hpx::make_ready_future<void>();
- hpx::spinlock m_future_mutex;
- };
-
- mutable std::shared_ptr<instance_data> m_independent_instance_data;
- static instance_data m_default_instance_data;
-
- std::reference_wrapper<Kokkos::Impl::thread_buffer> m_buffer;
- std::reference_wrapper<hpx::shared_future<void>> m_future;
- std::reference_wrapper<hpx::spinlock> m_future_mutex;
-#else
- static Kokkos::Impl::thread_buffer m_default_buffer;
-#endif
-
- public:
- using execution_space = HPX;
- using memory_space = HostSpace;
- using device_type = Kokkos::Device<execution_space, memory_space>;
- using array_layout = LayoutRight;
- using size_type = memory_space::size_type;
- using scratch_memory_space = ScratchMemorySpace<HPX>;
-
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- HPX()
- noexcept
- : m_instance_id(impl_default_instance_id()),
- m_buffer(m_default_instance_data.m_buffer),
- m_future(m_default_instance_data.m_future),
- m_future_mutex(m_default_instance_data.m_future_mutex) {}
-
- HPX(instance_mode mode)
- : m_instance_id(mode == instance_mode::independent
- ? m_next_instance_id++
- : impl_default_instance_id()),
- m_independent_instance_data(mode == instance_mode::independent
- ? (new instance_data())
- : nullptr),
- m_buffer(mode == instance_mode::independent
- ? m_independent_instance_data->m_buffer
- : m_default_instance_data.m_buffer),
- m_future(mode == instance_mode::independent
- ? m_independent_instance_data->m_future
- : m_default_instance_data.m_future),
- m_future_mutex(mode == instance_mode::independent
- ? m_independent_instance_data->m_future_mutex
- : m_default_instance_data.m_future_mutex) {}
-
- HPX(hpx::shared_future<void> future)
- : m_instance_id(m_next_instance_id++),
-
- m_independent_instance_data(new instance_data(future)),
- m_buffer(m_independent_instance_data->m_buffer),
- m_future(m_independent_instance_data->m_future),
- m_future_mutex(m_independent_instance_data->m_future_mutex) {}
-
- HPX(HPX &&other) = default;
- HPX &operator=(HPX &&other) = default;
- HPX(const HPX &other) = default;
- HPX &operator=(const HPX &other) = default;
-#else
- HPX() noexcept {}
-#endif
-
- void print_configuration(std::ostream &os, bool /*verbose*/ = false) const {
- os << "HPX backend\n";
- os << "HPX Execution Space:\n";
- os << " KOKKOS_ENABLE_HPX: yes\n";
- os << "\nHPX Runtime Configuration:\n";
- }
- uint32_t impl_instance_id() const noexcept { return m_instance_id; }
-
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- static bool in_parallel(HPX const &instance = HPX()) noexcept {
- return !instance.impl_get_future().is_ready();
- }
-#else
- static bool in_parallel(HPX const & = HPX()) noexcept { return false; }
-#endif
-
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- static void impl_decrement_active_parallel_region_count() {
- std::unique_lock<hpx::spinlock> l(m_active_parallel_region_count_mutex);
- if (--m_active_parallel_region_count == 0) {
- l.unlock();
- m_active_parallel_region_count_cond.notify_all();
- };
- }
-
- static void impl_increment_active_parallel_region_count() {
- std::unique_lock<hpx::spinlock> l(m_active_parallel_region_count_mutex);
- ++m_active_parallel_region_count;
- }
-#endif
-
- void fence(
- const std::string &name =
- "Kokkos::Experimental::HPX::fence: Unnamed Instance Fence") const {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::HPX>(
- name,
- Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
- impl_instance_id()},
- [&]() {
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- impl_get_future().wait();
- // Reset the future to free variables that may have been captured in
- // parallel regions.
- impl_get_future() = hpx::make_ready_future<void>();
-#endif
- });
- }
-
- static void impl_static_fence(const std::string &name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::HPX>(
- name,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- GlobalDeviceSynchronization,
- [&]() {
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- std::unique_lock<hpx::spinlock> l(
- m_active_parallel_region_count_mutex);
- m_active_parallel_region_count_cond.wait(
- l, [&]() { return m_active_parallel_region_count == 0; });
- // Reset the future to free variables that may have been captured in
- // parallel regions (however, we don't have access to futures from
- // instances other than the default instances, they will only be
- // released by fence).
- HPX().impl_get_future() = hpx::make_ready_future<void>();
-#endif
- });
- }
-
- static hpx::execution::parallel_executor impl_get_executor() {
- return hpx::execution::parallel_executor();
- }
-
- static bool is_asynchronous(HPX const & = HPX()) noexcept {
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- return true;
-#else
- return false;
-#endif
- }
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- static std::vector<HPX> partition(...) {
- Kokkos::abort(
- "Kokkos::Experimental::HPX::partition_master: can't partition an HPX "
- "instance\n");
- return std::vector<HPX>();
- }
-
- template <typename F>
- KOKKOS_DEPRECATED static void partition_master(
- F const &, int requested_num_partitions = 0, int = 0) {
- if (requested_num_partitions > 1) {
- Kokkos::abort(
- "Kokkos::Experimental::HPX::partition_master: can't partition an "
- "HPX instance\n");
- }
- }
-#endif
-
- static int concurrency();
- static void impl_initialize(InitializationSettings const &);
- static bool impl_is_initialized() noexcept;
- static void impl_finalize();
-
- static int impl_thread_pool_size() noexcept {
- hpx::runtime *rt = hpx::get_runtime_ptr();
- if (rt == nullptr) {
- return 0;
- } else {
- if (hpx::threads::get_self_ptr() == nullptr) {
- return hpx::resource::get_thread_pool(0).get_os_thread_count();
- } else {
- return hpx::this_thread::get_pool()->get_os_thread_count();
- }
- }
- }
-
- static int impl_thread_pool_rank() noexcept {
- hpx::runtime *rt = hpx::get_runtime_ptr();
- if (rt == nullptr) {
- return 0;
- } else {
- if (hpx::threads::get_self_ptr() == nullptr) {
- return 0;
- } else {
- return hpx::this_thread::get_pool()->get_pool_index();
- }
- }
- }
-
- static int impl_thread_pool_size(int depth) {
- if (depth == 0) {
- return impl_thread_pool_size();
- } else {
- return 1;
- }
- }
-
- static int impl_max_hardware_threads() noexcept {
- return hpx::threads::hardware_concurrency();
- }
-
- static int impl_hardware_thread_id() noexcept {
- return hpx::get_worker_thread_num();
- }
-
- Kokkos::Impl::thread_buffer &impl_get_buffer() const noexcept {
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- return m_buffer.get();
-#else
- return m_default_buffer;
-#endif
- }
-
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- hpx::shared_future<void> &impl_get_future() const noexcept {
- return m_future;
- }
-
- hpx::spinlock &impl_get_future_mutex() const noexcept {
- return m_future_mutex;
- }
-#endif
-
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
- struct KOKKOS_ATTRIBUTE_NODISCARD reset_on_exit_parallel {
- HPX const &m_space;
- reset_on_exit_parallel(HPX const &space) : m_space(space) {}
- ~reset_on_exit_parallel() {
- // See [note 1] for an explanation. m_independent_instance_data is
- // marked mutable.
- m_space.m_independent_instance_data.reset();
-
- HPX::impl_decrement_active_parallel_region_count();
- }
- };
-
- // This struct is identical to the above except it does not reset the shared
- // data. It does, however, still decrement the parallel region count. It is
- // meant for use in parallel regions which do not capture the execution space
- // instance.
- struct KOKKOS_ATTRIBUTE_NODISCARD reset_count_on_exit_parallel {
- reset_count_on_exit_parallel() {}
- ~reset_count_on_exit_parallel() {
- HPX::impl_decrement_active_parallel_region_count();
- }
- };
-#else
- struct KOKKOS_ATTRIBUTE_NODISCARD reset_on_exit_parallel {
- reset_on_exit_parallel(HPX const &) {}
- ~reset_on_exit_parallel() {}
- };
-
- struct KOKKOS_ATTRIBUTE_NODISCARD reset_count_on_exit_parallel {
- reset_count_on_exit_parallel() {}
- ~reset_count_on_exit_parallel() {}
- };
-#endif
-
- static constexpr const char *name() noexcept { return "HPX"; }
-};
-} // namespace Experimental
-
-namespace Tools {
-namespace Experimental {
-template <>
-struct DeviceTypeTraits<Kokkos::Experimental::HPX> {
- static constexpr DeviceType id = DeviceType::HPX;
- static int device_id(const Kokkos::Experimental::HPX &) { return 0; }
-};
-} // namespace Experimental
-} // namespace Tools
-
-namespace Impl {
-
-#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
-template <typename Closure>
-inline void dispatch_execute_task(Closure *closure,
- Kokkos::Experimental::HPX const &instance,
- bool force_synchronous = false) {
- Kokkos::Experimental::HPX::impl_increment_active_parallel_region_count();
-
- Closure closure_copy = *closure;
-
- {
- std::unique_lock<hpx::spinlock> l(instance.impl_get_future_mutex());
- hpx::util::ignore_lock(&instance.impl_get_future_mutex());
- hpx::shared_future<void> &fut = instance.impl_get_future();
-
- fut = fut.then(hpx::execution::parallel_executor(
- hpx::threads::thread_schedule_hint(0)),
- [closure_copy](hpx::shared_future<void> &&) {
- return closure_copy.execute_task();
- });
- }
-
- if (force_synchronous) {
- instance.fence(
- "Kokkos::Experimental::Impl::HPX::dispatch_execute_task: fence due to "
- "forced syncronizations");
- }
-}
-#else
-template <typename Closure>
-inline void dispatch_execute_task(Closure *closure,
- Kokkos::Experimental::HPX const &,
- bool = false) {
- closure->execute_task();
-}
-#endif
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::HPX::memory_space,
- Kokkos::Experimental::HPX::scratch_memory_space> {
- enum : bool { assignable = false };
- enum : bool { accessible = true };
- enum : bool { deepcopy = false };
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Experimental {
-template <>
-class UniqueToken<HPX, UniqueTokenScope::Instance> {
- private:
- using buffer_type = Kokkos::View<uint32_t *, Kokkos::HostSpace>;
- int m_count;
- buffer_type m_buffer_view;
- uint32_t volatile *m_buffer;
-
- public:
- using execution_space = HPX;
- using size_type = int;
-
- /// \brief create object size for concurrency on the given instance
- ///
- /// This object should not be shared between instances
- UniqueToken(execution_space const & = execution_space()) noexcept
- : m_count(execution_space::impl_max_hardware_threads()),
- m_buffer_view(buffer_type()),
- m_buffer(nullptr) {}
-
- UniqueToken(size_type max_size, execution_space const & = execution_space())
- : m_count(max_size > execution_space::impl_max_hardware_threads()
- ? execution_space::impl_max_hardware_threads()
- : max_size),
- m_buffer_view(
- max_size > execution_space::impl_max_hardware_threads()
- ? buffer_type()
- : buffer_type("UniqueToken::m_buffer_view",
- ::Kokkos::Impl::concurrent_bitset::buffer_bound(
- m_count))),
- m_buffer(m_buffer_view.data()) {}
-
- /// \brief upper bound for acquired values, i.e. 0 <= value < size()
- KOKKOS_INLINE_FUNCTION
- int size() const noexcept { return m_count; }
-
- /// \brief acquire value such that 0 <= value < size()
- KOKKOS_INLINE_FUNCTION
- int acquire() const noexcept {
- KOKKOS_IF_ON_HOST((
- if (m_buffer == nullptr) {
- return execution_space::impl_hardware_thread_id();
- } else {
- const ::Kokkos::pair<int, int> result =
- ::Kokkos::Impl::concurrent_bitset::acquire_bounded(
- m_buffer, m_count, ::Kokkos::Impl::clock_tic() % m_count);
-
- if (result.first < 0) {
- ::Kokkos::abort(
- "UniqueToken<HPX> failure to acquire tokens, no tokens "
- "available");
- }
- return result.first;
- }))
-
- KOKKOS_IF_ON_DEVICE((return 0;))
- }
-
- /// \brief release a value acquired by generate
- KOKKOS_INLINE_FUNCTION
- void release(int i) const noexcept {
- KOKKOS_IF_ON_HOST((if (m_buffer != nullptr) {
- ::Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
- }))
-
- KOKKOS_IF_ON_DEVICE(((void)i;))
- }
-};
-
-template <>
-class UniqueToken<HPX, UniqueTokenScope::Global> {
- public:
- using execution_space = HPX;
- using size_type = int;
- UniqueToken(execution_space const & = execution_space()) noexcept {}
-
- // NOTE: Currently this assumes that there is no oversubscription.
- // hpx::get_num_worker_threads can't be used directly because it may yield
- // it's task (problematic if called after hpx::get_worker_thread_num).
- int size() const noexcept { return HPX::impl_max_hardware_threads(); }
- int acquire() const noexcept { return HPX::impl_hardware_thread_id(); }
- void release(int) const noexcept {}
-};
-} // namespace Experimental
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-struct HPXTeamMember {
- public:
- using execution_space = Kokkos::Experimental::HPX;
- using scratch_memory_space =
- Kokkos::ScratchMemorySpace<Kokkos::Experimental::HPX>;
-
- private:
- scratch_memory_space m_team_shared;
-
- int m_league_size;
- int m_league_rank;
- int m_team_size;
- int m_team_rank;
-
- public:
- KOKKOS_INLINE_FUNCTION
- const scratch_memory_space &team_shmem() const {
- return m_team_shared.set_team_thread_mode(0, 1, 0);
- }
-
- KOKKOS_INLINE_FUNCTION
- const execution_space::scratch_memory_space &team_scratch(const int) const {
- return m_team_shared.set_team_thread_mode(0, 1, 0);
- }
-
- KOKKOS_INLINE_FUNCTION
- const execution_space::scratch_memory_space &thread_scratch(const int) const {
- return m_team_shared.set_team_thread_mode(0, team_size(), team_rank());
- }
-
- KOKKOS_INLINE_FUNCTION int league_rank() const noexcept {
- return m_league_rank;
- }
-
- KOKKOS_INLINE_FUNCTION int league_size() const noexcept {
- return m_league_size;
- }
-
- KOKKOS_INLINE_FUNCTION int team_rank() const noexcept { return m_team_rank; }
- KOKKOS_INLINE_FUNCTION int team_size() const noexcept { return m_team_size; }
-
- template <class... Properties>
- constexpr KOKKOS_INLINE_FUNCTION HPXTeamMember(
- const TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>
- &policy,
- const int team_rank, const int league_rank, void *scratch,
- size_t scratch_size) noexcept
- : m_team_shared(scratch, scratch_size, scratch, scratch_size),
- m_league_size(policy.league_size()),
- m_league_rank(league_rank),
- m_team_size(policy.team_size()),
- m_team_rank(team_rank) {}
-
- KOKKOS_INLINE_FUNCTION
- void team_barrier() const {}
-
- template <class ValueType>
- KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType &, const int &) const {}
-
- template <class Closure, class ValueType>
- KOKKOS_INLINE_FUNCTION void team_broadcast(const Closure &closure,
- ValueType &value,
- const int &) const {
- closure(value);
- }
-
- template <class ValueType, class JoinOp>
- KOKKOS_INLINE_FUNCTION ValueType team_reduce(const ValueType &value,
- const JoinOp &) const {
- return value;
- }
-
- template <class ReducerType>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
- team_reduce(const ReducerType &) const {}
-
- template <typename Type>
- KOKKOS_INLINE_FUNCTION Type
- team_scan(const Type &value, Type *const global_accum = nullptr) const {
- if (global_accum) {
- Kokkos::atomic_fetch_add(global_accum, value);
- }
-
- return 0;
- }
-};
-
-template <class... Properties>
-class TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>
- : public PolicyTraits<Properties...> {
- int m_league_size;
- int m_team_size;
- std::size_t m_team_scratch_size[2];
- std::size_t m_thread_scratch_size[2];
- int m_chunk_size;
-
- public:
- using traits = PolicyTraits<Properties...>;
-
- //! Tag this class as a kokkos execution policy
- using execution_policy = TeamPolicyInternal;
-
- using member_type = HPXTeamMember;
-
- //! Execution space of this execution policy:
- using execution_space = Kokkos::Experimental::HPX;
-
- // NOTE: Max size is 1 for simplicity. In most cases more than 1 is not
- // necessary on CPU. Implement later if there is a need.
- template <class FunctorType>
- inline static int team_size_max(const FunctorType &) {
- return 1;
- }
-
- template <class FunctorType>
- inline static int team_size_recommended(const FunctorType &) {
- return 1;
- }
-
- template <class FunctorType>
- inline static int team_size_recommended(const FunctorType &, const int &) {
- return 1;
- }
-
- template <class FunctorType>
- int team_size_max(const FunctorType &, const ParallelForTag &) const {
- return 1;
- }
-
- template <class FunctorType>
- int team_size_max(const FunctorType &, const ParallelReduceTag &) const {
- return 1;
- }
-
- template <class FunctorType, class ReducerType>
- int team_size_max(const FunctorType &, const ReducerType &,
- const ParallelReduceTag &) const {
- return 1;
- }
-
- template <class FunctorType>
- int team_size_recommended(const FunctorType &, const ParallelForTag &) const {
- return 1;
- }
-
- template <class FunctorType>
- int team_size_recommended(const FunctorType &,
- const ParallelReduceTag &) const {
- return 1;
- }
-
- template <class FunctorType, class ReducerType>
- int team_size_recommended(const FunctorType &, const ReducerType &,
- const ParallelReduceTag &) const {
- return 1;
- }
-
- static int vector_length_max() { return 1; }
-
- inline int impl_vector_length() noexcept { return 1; }
- inline bool impl_auto_team_size() noexcept { return false; }
- inline bool impl_auto_vector_length() noexcept { return false; }
- inline void impl_set_vector_length(int) noexcept {}
- inline void impl_set_team_size(int) noexcept {}
-
- private:
- inline void init(const int league_size_request, const int team_size_request) {
- m_league_size = league_size_request;
- const int max_team_size = 1; // TODO: Can't use team_size_max(...) because
- // it requires a functor as argument.
- m_team_size =
- team_size_request > max_team_size ? max_team_size : team_size_request;
-
- if (m_chunk_size > 0) {
- if (!Impl::is_integral_power_of_two(m_chunk_size))
- Kokkos::abort("TeamPolicy blocking granularity must be power of two");
- } else {
- int new_chunk_size = 1;
- while (new_chunk_size * 4 * Kokkos::Experimental::HPX::concurrency() <
- m_league_size) {
- new_chunk_size *= 2;
- }
-
- if (new_chunk_size < 128) {
- new_chunk_size = 1;
- while ((new_chunk_size * Kokkos::Experimental::HPX::concurrency() <
- m_league_size) &&
- (new_chunk_size < 128))
- new_chunk_size *= 2;
- }
-
- m_chunk_size = new_chunk_size;
- }
- }
-
- public:
- inline int team_size() const { return m_team_size; }
- inline int league_size() const { return m_league_size; }
-
- size_t scratch_size(const int &level, int team_size_ = -1) const {
- if (team_size_ < 0) {
- team_size_ = m_team_size;
- }
- return m_team_scratch_size[level] +
- team_size_ * m_thread_scratch_size[level];
- }
-
- inline static int scratch_size_max(int level) {
- return (level == 0 ? 1024 * 32 : // Roughly L1 size
- 20 * 1024 * 1024); // Limit to keep compatibility with CUDA
- }
-
- public:
- template <class ExecSpace, class... OtherProperties>
- friend class TeamPolicyInternal;
-
- const typename traits::execution_space &space() const {
- static typename traits::execution_space m_space;
- return m_space;
- }
-
- template <class... OtherProperties>
- TeamPolicyInternal(const TeamPolicyInternal<Kokkos::Experimental::HPX,
- OtherProperties...> &p) {
- m_league_size = p.m_league_size;
- m_team_size = p.m_team_size;
- m_team_scratch_size[0] = p.m_team_scratch_size[0];
- m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
- m_team_scratch_size[1] = p.m_team_scratch_size[1];
- m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
- m_chunk_size = p.m_chunk_size;
- }
-
- TeamPolicyInternal(const typename traits::execution_space &,
- int league_size_request, int team_size_request,
- int /* vector_length_request */ = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, team_size_request);
- }
-
- TeamPolicyInternal(const typename traits::execution_space &,
- int league_size_request, const Kokkos::AUTO_t &,
- int /* vector_length_request */ = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, 1);
- }
-
- TeamPolicyInternal(const typename traits::execution_space &,
- int league_size_request,
- const Kokkos::AUTO_t &, /* team_size_request */
- const Kokkos::AUTO_t & /* vector_length_request */)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, 1);
- }
-
- TeamPolicyInternal(const typename traits::execution_space &,
- int league_size_request, int team_size_request,
- const Kokkos::AUTO_t & /* vector_length_request */
- )
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, team_size_request);
- }
-
- TeamPolicyInternal(int league_size_request,
- const Kokkos::AUTO_t &, /* team_size_request */
- const Kokkos::AUTO_t & /* vector_length_request */)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, 1);
- }
-
- TeamPolicyInternal(int league_size_request, int team_size_request,
- const Kokkos::AUTO_t & /* vector_length_request */
- )
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, team_size_request);
- }
-
- TeamPolicyInternal(int league_size_request, int team_size_request,
- int /* vector_length_request */ = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, team_size_request);
- }
-
- TeamPolicyInternal(int league_size_request, const Kokkos::AUTO_t &,
- int /* vector_length_request */ = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(0) {
- init(league_size_request, 1);
- }
-
- inline int chunk_size() const { return m_chunk_size; }
-
- inline TeamPolicyInternal &set_chunk_size(
- typename traits::index_type chunk_size_) {
- m_chunk_size = chunk_size_;
- return *this;
- }
-
- inline TeamPolicyInternal &set_scratch_size(const int &level,
- const PerTeamValue &per_team) {
- m_team_scratch_size[level] = per_team.value;
- return *this;
- }
-
- inline TeamPolicyInternal &set_scratch_size(
- const int &level, const PerThreadValue &per_thread) {
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-
- inline TeamPolicyInternal &set_scratch_size(
- const int &level, const PerTeamValue &per_team,
- const PerThreadValue &per_thread) {
- m_team_scratch_size[level] = per_team.value;
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-template <typename Policy>
-typename Policy::member_type get_hpx_adjusted_chunk_size(Policy const &policy) {
- const int concurrency = Kokkos::Experimental::HPX::concurrency();
- const typename Policy::member_type n = policy.end() - policy.begin();
- typename Policy::member_type new_chunk_size = policy.chunk_size();
-
- while (n >= 4 * concurrency * new_chunk_size) {
- new_chunk_size *= 2;
- }
-
- return new_chunk_size;
-}
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::HPX> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- template <class TagType>
- static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Member i) {
- functor(i);
- }
-
- template <class TagType>
- static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Member i) {
- const TagType t{};
- functor(t, i);
- }
-
- template <class TagType>
- static std::enable_if_t<std::is_void<TagType>::value> execute_functor_range(
- const FunctorType &functor, const Member i_begin, const Member i_end) {
- for (Member i = i_begin; i < i_end; ++i) {
- functor(i);
- }
- }
-
- template <class TagType>
- static std::enable_if_t<!std::is_void<TagType>::value> execute_functor_range(
- const FunctorType &functor, const Member i_begin, const Member i_end) {
- const TagType t{};
- for (Member i = i_begin; i < i_end; ++i) {
- functor(t, i);
- }
- }
-
- public:
- void execute() const {
- Kokkos::Impl::dispatch_execute_task(this, m_policy.space());
- }
-
- void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_policy.space());
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
-#if KOKKOS_HPX_IMPLEMENTATION == 0
- using hpx::for_loop;
-
- for_loop(par.on(exec).with(static_chunk_size(m_policy.chunk_size())),
- m_policy.begin(), m_policy.end(), [this](const Member i) {
- execute_functor<WorkTag>(m_functor, i);
- });
-
-#elif KOKKOS_HPX_IMPLEMENTATION == 1
- using hpx::for_loop_strided;
-
- const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
-
- for_loop_strided(
- par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
- [this, chunk_size](const Member i_begin) {
- const Member i_end = (std::min)(i_begin + chunk_size, m_policy.end());
- execute_functor_range<WorkTag>(m_functor, i_begin, i_end);
- });
-#endif
- }
-
- inline ParallelFor(const FunctorType &arg_functor, Policy arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-};
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::Experimental::HPX> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
- using WorkTag = typename MDRangePolicy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
- using iterate_type =
- typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
- WorkTag, void>;
-
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy;
-
- public:
- void execute() const { dispatch_execute_task(this, m_mdr_policy.space()); }
-
- inline void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_mdr_policy.space());
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
-#if KOKKOS_HPX_IMPLEMENTATION == 0
- using hpx::for_loop;
-
- for_loop(par.on(exec).with(
- static_chunk_size(get_hpx_adjusted_chunk_size(m_policy))),
- m_policy.begin(), m_policy.end(), [this](const Member i) {
- iterate_type(m_mdr_policy, m_functor)(i);
- });
-
-#elif KOKKOS_HPX_IMPLEMENTATION == 1
- using hpx::for_loop_strided;
-
- const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
-
- for_loop_strided(par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
- [this, chunk_size](const Member i_begin) {
- const Member i_end =
- (std::min)(i_begin + chunk_size, m_policy.end());
- for (Member i = i_begin; i < i_end; ++i) {
- iterate_type(m_mdr_policy, m_functor)(i);
- }
- });
-#endif
- }
-
- inline ParallelFor(const FunctorType &arg_functor, MDRangePolicy arg_policy)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, arg_policy.m_num_tiles).set_chunk_size(1)) {}
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy &, const Functor &) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::HPX> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
- using value_type = typename Analysis::value_type;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
-
- bool m_force_synchronous;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Member i, reference_type update) {
- functor(i, update);
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Member i, reference_type update) {
- const TagType t{};
- functor(t, i, update);
- }
-
- template <class TagType>
- inline std::enable_if_t<std::is_void<TagType>::value> execute_functor_range(
- reference_type update, const Member i_begin, const Member i_end) const {
- for (Member i = i_begin; i < i_end; ++i) {
- m_functor(i, update);
- }
- }
-
- template <class TagType>
- inline std::enable_if_t<!std::is_void<TagType>::value> execute_functor_range(
- reference_type update, const Member i_begin, const Member i_end) const {
- const TagType t{};
-
- for (Member i = i_begin; i < i_end; ++i) {
- m_functor(t, i, update);
- }
- }
-
- class value_type_wrapper {
- private:
- std::size_t m_value_size;
- char *m_value_buffer;
-
- public:
- value_type_wrapper() : m_value_size(0), m_value_buffer(nullptr) {}
-
- value_type_wrapper(const std::size_t value_size)
- : m_value_size(value_size), m_value_buffer(new char[m_value_size]) {}
-
- value_type_wrapper(const value_type_wrapper &other)
- : m_value_size(0), m_value_buffer(nullptr) {
- if (this != &other) {
- m_value_buffer = new char[other.m_value_size];
- m_value_size = other.m_value_size;
-
- std::copy(other.m_value_buffer, other.m_value_buffer + m_value_size,
- m_value_buffer);
- }
- }
-
- ~value_type_wrapper() { delete[] m_value_buffer; }
-
- value_type_wrapper(value_type_wrapper &&other)
- : m_value_size(0), m_value_buffer(nullptr) {
- if (this != &other) {
- m_value_buffer = other.m_value_buffer;
- m_value_size = other.m_value_size;
-
- other.m_value_buffer = nullptr;
- other.m_value_size = 0;
- }
- }
-
- value_type_wrapper &operator=(const value_type_wrapper &other) {
- if (this != &other) {
- delete[] m_value_buffer;
- m_value_buffer = new char[other.m_value_size];
- m_value_size = other.m_value_size;
-
- std::copy(other.m_value_buffer, other.m_value_buffer + m_value_size,
- m_value_buffer);
- }
-
- return *this;
- }
-
- value_type_wrapper &operator=(value_type_wrapper &&other) {
- if (this != &other) {
- delete[] m_value_buffer;
- m_value_buffer = other.m_value_buffer;
- m_value_size = other.m_value_size;
-
- other.m_value_buffer = nullptr;
- other.m_value_size = 0;
- }
-
- return *this;
- }
-
- pointer_type pointer() const {
- return reinterpret_cast<pointer_type>(m_value_buffer);
- }
-
- reference_type reference() const {
- return Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(m_value_buffer));
- }
- };
-
- public:
- void execute() const {
- if (m_policy.end() <= m_policy.begin()) {
- if (m_result_ptr) {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- final_reducer.init(m_result_ptr);
- final_reducer.final(m_result_ptr);
- }
- return;
- }
- dispatch_execute_task(this, m_policy.space(), m_force_synchronous);
- }
-
- inline void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_policy.space());
-
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- const std::size_t value_size =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
-#if KOKKOS_HPX_IMPLEMENTATION == 0
- // NOTE: This version makes the most use of HPX functionality, but
- // requires the struct value_type_wrapper to handle different
- // reference_types. It is also significantly slower than the version
- // below due to not reusing the buffer used by other functions.
- using hpx::parallel::reduction;
-
- value_type_wrapper final_value(value_size);
- value_type_wrapper identity(value_size);
-
- final_reducer.init(final_value.pointer());
- final_reducer.init(identity.pointer());
-
- for_loop(par.on(exec).with(
- static_chunk_size(get_hpx_adjusted_chunk_size(m_policy))),
- m_policy.begin(), m_policy.end(),
- reduction(final_value, identity,
- [final_reducer](
- value_type_wrapper &a,
- value_type_wrapper &b) -> value_type_wrapper & {
- final_reducer.join(a.pointer(), b.pointer());
- return a;
- }),
- [this](Member i, value_type_wrapper &update) {
- execute_functor<WorkTag>(m_functor, i, update.reference());
- });
-
- pointer_type final_value_ptr = final_value.pointer();
-
-#elif KOKKOS_HPX_IMPLEMENTATION == 1
- using hpx::for_loop_strided;
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
-
- thread_buffer &buffer = m_policy.space().impl_get_buffer();
- buffer.resize(num_worker_threads, value_size);
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [&buffer, final_reducer ](const int t) noexcept {
- final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
- });
-
- const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
-
- for_loop_strided(
- par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
- [this, &buffer, chunk_size](const Member i_begin) {
- reference_type update = Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(buffer.get(
- Kokkos::Experimental::HPX::impl_hardware_thread_id())));
- const Member i_end = (std::min)(i_begin + chunk_size, m_policy.end());
- execute_functor_range<WorkTag>(update, i_begin, i_end);
- });
-
- for (int i = 1; i < num_worker_threads; ++i) {
- final_reducer.join(reinterpret_cast<pointer_type>(buffer.get(0)),
- reinterpret_cast<pointer_type>(buffer.get(i)));
- }
-
- pointer_type final_value_ptr =
- reinterpret_cast<pointer_type>(buffer.get(0));
-#endif
-
- final_reducer.final(final_value_ptr);
-
- if (m_result_ptr != nullptr) {
- const int n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
-
- for (int j = 0; j < n; ++j) {
- m_result_ptr[j] = final_value_ptr[j];
- }
- }
- }
-
- template <class ViewType>
- inline ParallelReduce(
- const FunctorType &arg_functor, Policy arg_policy,
- const ViewType &arg_view,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void *> = nullptr)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_view.data()),
- m_force_synchronous(!arg_view.impl_track().has_record()) {}
-
- inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
- const ReducerType &reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_force_synchronous(!reducer.view().impl_track().has_record()) {}
-};
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::HPX> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
- using WorkTag = typename MDRangePolicy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using Analysis = FunctorAnalysis<FunctorPatternInterface::REDUCE,
- MDRangePolicy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
- using iterate_type =
- typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
- WorkTag, reference_type>;
-
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
-
- bool m_force_synchronous;
-
- public:
- void execute() const {
- dispatch_execute_task(this, m_mdr_policy.space(), m_force_synchronous);
- }
-
- inline void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_mdr_policy.space());
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
- const std::size_t value_size =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
-
- thread_buffer &buffer = m_mdr_policy.space().impl_get_buffer();
- buffer.resize(num_worker_threads, value_size);
-
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
-#if KOKKOS_HPX_IMPLEMENTATION == 0
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [&buffer, final_reducer](std::size_t t) {
- final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
- });
-
- for_loop(par.on(exec).with(
- static_chunk_size(get_hpx_adjusted_chunk_size(m_policy))),
- m_policy.begin(), m_policy.end(), [this, &buffer](const Member i) {
- reference_type update = Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(buffer.get(
- Kokkos::Experimental::HPX::impl_hardware_thread_id())));
- iterate_type(m_mdr_policy, m_functor, update)(i);
- });
-
-#elif KOKKOS_HPX_IMPLEMENTATION == 1
- using hpx::for_loop_strided;
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), std::size_t(0),
- num_worker_threads, [&buffer, final_reducer](const std::size_t t) {
- final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
- });
-
- const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
-
- for_loop_strided(
- par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
- [this, &buffer, chunk_size](const Member i_begin) {
- reference_type update = Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(buffer.get(
- Kokkos::Experimental::HPX::impl_hardware_thread_id())));
- const Member i_end = (std::min)(i_begin + chunk_size, m_policy.end());
-
- for (Member i = i_begin; i < i_end; ++i) {
- iterate_type(m_mdr_policy, m_functor, update)(i);
- }
- });
-#endif
-
- for (int i = 1; i < num_worker_threads; ++i) {
- final_reducer.join(reinterpret_cast<pointer_type>(buffer.get(0)),
- reinterpret_cast<pointer_type>(buffer.get(i)));
- }
-
- final_reducer.final(reinterpret_cast<pointer_type>(buffer.get(0)));
-
- if (m_result_ptr != nullptr) {
- const int n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
-
- for (int j = 0; j < n; ++j) {
- m_result_ptr[j] = reinterpret_cast<pointer_type>(buffer.get(0))[j];
- }
- }
- }
-
- template <class ViewType>
- inline ParallelReduce(
- const FunctorType &arg_functor, MDRangePolicy arg_policy,
- const ViewType &arg_view,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void *> = nullptr)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, arg_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(InvalidType()),
- m_result_ptr(arg_view.data()),
- m_force_synchronous(!arg_view.impl_track().has_record()) {}
-
- inline ParallelReduce(const FunctorType &arg_functor,
- MDRangePolicy arg_policy, const ReducerType &reducer)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_force_synchronous(!reducer.view().impl_track().has_record()) {}
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy &, const Functor &) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::HPX> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
- using value_type = typename Analysis::value_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Member i_begin,
- const Member i_end, reference_type update,
- const bool final) {
- for (Member i = i_begin; i < i_end; ++i) {
- functor(i, update, final);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Member i_begin,
- const Member i_end, reference_type update,
- const bool final) {
- const TagType t{};
- for (Member i = i_begin; i < i_end; ++i) {
- functor(t, i, update, final);
- }
- }
-
- public:
- void execute() const { dispatch_execute_task(this, m_policy.space()); }
-
- inline void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_policy.space());
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
- const int value_count = Analysis::value_count(m_functor);
- const std::size_t value_size = Analysis::value_size(m_functor);
-
- thread_buffer &buffer = m_policy.space().impl_get_buffer();
- buffer.resize(num_worker_threads, 2 * value_size);
-
- using hpx::barrier;
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
- barrier<> bar(num_worker_threads);
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- typename Analysis::Reducer final_reducer(&m_functor);
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [this, &bar, &buffer, num_worker_threads, value_count, value_size,
- final_reducer](int t) {
- reference_type update_sum =
- final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
-
- const WorkRange range(m_policy, t, num_worker_threads);
- execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
- update_sum, false);
-
- bar.arrive_and_wait();
-
- if (t == 0) {
- final_reducer.init(
- reinterpret_cast<pointer_type>(buffer.get(0) + value_size));
-
- for (int i = 1; i < num_worker_threads; ++i) {
- pointer_type ptr_1_prev =
- reinterpret_cast<pointer_type>(buffer.get(i - 1));
- pointer_type ptr_2_prev = reinterpret_cast<pointer_type>(
- buffer.get(i - 1) + value_size);
- pointer_type ptr_2 =
- reinterpret_cast<pointer_type>(buffer.get(i) + value_size);
-
- for (int j = 0; j < value_count; ++j) {
- ptr_2[j] = ptr_2_prev[j];
- }
-
- final_reducer.join(ptr_2, ptr_1_prev);
- }
- }
-
- bar.arrive_and_wait();
-
- reference_type update_base = Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(buffer.get(t) + value_size));
-
- execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
- update_base, true);
- });
- }
-
- inline ParallelScan(const FunctorType &arg_functor, const Policy &arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-};
-
-template <class FunctorType, class ReturnType, class... Traits>
-class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
- ReturnType, Kokkos::Experimental::HPX> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
- using value_type = typename Analysis::value_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
- ReturnType &m_returnvalue;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Member i_begin,
- const Member i_end, reference_type update,
- const bool final) {
- for (Member i = i_begin; i < i_end; ++i) {
- functor(i, update, final);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Member i_begin,
- const Member i_end, reference_type update,
- const bool final) {
- const TagType t{};
- for (Member i = i_begin; i < i_end; ++i) {
- functor(t, i, update, final);
- }
- }
-
- public:
- void execute() const { dispatch_execute_task(this, m_policy.space()); }
-
- inline void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_policy.space());
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
- const int value_count = Analysis::value_count(m_functor);
- const std::size_t value_size = Analysis::value_size(m_functor);
-
- thread_buffer &buffer = m_policy.space().impl_get_buffer();
- buffer.resize(num_worker_threads, 2 * value_size);
-
- using hpx::barrier;
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
- barrier<> bar(num_worker_threads);
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- typename Analysis::Reducer final_reducer(&m_functor);
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [this, &bar, &buffer, num_worker_threads, value_count, value_size,
- final_reducer](int t) {
- reference_type update_sum =
- final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
-
- const WorkRange range(m_policy, t, num_worker_threads);
- execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
- update_sum, false);
-
- bar.arrive_and_wait();
-
- if (t == 0) {
- final_reducer.init(
- reinterpret_cast<pointer_type>(buffer.get(0) + value_size));
-
- for (int i = 1; i < num_worker_threads; ++i) {
- pointer_type ptr_1_prev =
- reinterpret_cast<pointer_type>(buffer.get(i - 1));
- pointer_type ptr_2_prev = reinterpret_cast<pointer_type>(
- buffer.get(i - 1) + value_size);
- pointer_type ptr_2 =
- reinterpret_cast<pointer_type>(buffer.get(i) + value_size);
-
- for (int j = 0; j < value_count; ++j) {
- ptr_2[j] = ptr_2_prev[j];
- }
-
- final_reducer.join(ptr_2, ptr_1_prev);
- }
- }
-
- bar.arrive_and_wait();
-
- reference_type update_base = Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(buffer.get(t) + value_size));
-
- execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
- update_base, true);
-
- if (t == num_worker_threads - 1) {
- m_returnvalue = update_base;
- }
- });
- }
-
- inline ParallelScanWithTotal(const FunctorType &arg_functor,
- const Policy &arg_policy,
- ReturnType &arg_returnvalue)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_returnvalue(arg_returnvalue) {}
-};
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-template <class FunctorType, class... Properties>
-class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
- Kokkos::Experimental::HPX> {
- private:
- using Policy = TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
- using memory_space = Kokkos::HostSpace;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const int m_league;
- const std::size_t m_shared;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Policy &policy, const int league_rank,
- char *local_buffer, const std::size_t local_buffer_size) {
- functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size));
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Policy &policy, const int league_rank,
- char *local_buffer, const std::size_t local_buffer_size) {
- const TagType t{};
- functor(t, Member(policy, 0, league_rank, local_buffer, local_buffer_size));
- }
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Policy &policy,
- const int league_rank_begin, const int league_rank_end,
- char *local_buffer,
- const std::size_t local_buffer_size) {
- for (int league_rank = league_rank_begin; league_rank < league_rank_end;
- ++league_rank) {
- functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size));
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Policy &policy,
- const int league_rank_begin, const int league_rank_end,
- char *local_buffer,
- const std::size_t local_buffer_size) {
- const TagType t{};
- for (int league_rank = league_rank_begin; league_rank < league_rank_end;
- ++league_rank) {
- functor(t,
- Member(policy, 0, league_rank, local_buffer, local_buffer_size));
- }
- }
-
- public:
- void execute() const { dispatch_execute_task(this, m_policy.space()); }
-
- inline void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_policy.space());
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
-
- thread_buffer &buffer = m_policy.space().impl_get_buffer();
- buffer.resize(num_worker_threads, m_shared);
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
-#if KOKKOS_HPX_IMPLEMENTATION == 0
- using hpx::for_loop;
-
- for_loop(
- par.on(exec).with(static_chunk_size(m_policy.chunk_size())), 0,
- m_policy.league_size(), [this, &buffer](const int league_rank) {
- execute_functor<WorkTag>(
- m_functor, m_policy, league_rank,
- buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id()),
- m_shared);
- });
-
-#elif KOKKOS_HPX_IMPLEMENTATION == 1
- using hpx::for_loop_strided;
-
- for_loop_strided(
- par.on(exec), 0, m_policy.league_size(), m_policy.chunk_size(),
- [this, &buffer](const int league_rank_begin) {
- const int league_rank_end =
- (std::min)(league_rank_begin + m_policy.chunk_size(),
- m_policy.league_size());
- execute_functor_range<WorkTag>(
- m_functor, m_policy, league_rank_begin, league_rank_end,
- buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id()),
- m_shared);
- });
-#endif
- }
-
- ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_league(arg_policy.league_size()),
- m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())) {}
-};
-
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Experimental::HPX> {
- private:
- using Policy = TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>;
- using Member = typename Policy::member_type;
- using WorkTag = typename Policy::work_tag;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
- using value_type = typename Analysis::value_type;
-
- const FunctorType m_functor;
- const int m_league;
- const Policy m_policy;
- const ReducerType m_reducer;
- pointer_type m_result_ptr;
- const std::size_t m_shared;
-
- bool m_force_synchronous;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Policy &policy, const int league_rank,
- char *local_buffer, const std::size_t local_buffer_size,
- reference_type update) {
- functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size),
- update);
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
- const FunctorType &functor, const Policy &policy, const int league_rank,
- char *local_buffer, const std::size_t local_buffer_size,
- reference_type update) {
- const TagType t{};
- functor(t, Member(policy, 0, league_rank, local_buffer, local_buffer_size),
- update);
- }
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Policy &policy,
- const int league_rank_begin, const int league_rank_end,
- char *local_buffer, const std::size_t local_buffer_size,
- reference_type update) {
- for (int league_rank = league_rank_begin; league_rank < league_rank_end;
- ++league_rank) {
- functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size),
- update);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value>
- execute_functor_range(const FunctorType &functor, const Policy &policy,
- const int league_rank_begin, const int league_rank_end,
- char *local_buffer, const std::size_t local_buffer_size,
- reference_type update) {
- const TagType t{};
- for (int league_rank = league_rank_begin; league_rank < league_rank_end;
- ++league_rank) {
- functor(t,
- Member(policy, 0, league_rank, local_buffer, local_buffer_size),
- update);
- }
- }
-
- public:
- void execute() const {
- if (m_policy.league_size() * m_policy.team_size() == 0) {
- if (m_result_ptr) {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
- final_reducer.init(m_result_ptr);
- final_reducer.final(m_result_ptr);
- }
- return;
- }
- dispatch_execute_task(this, m_policy.space());
- }
-
- inline void execute_task() const {
- // See [note 1] for an explanation.
- Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
- m_policy.space());
-
- const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
- const std::size_t value_size =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
-
- thread_buffer &buffer = m_policy.space().impl_get_buffer();
- buffer.resize(num_worker_threads, value_size + m_shared);
-
- auto exec = Kokkos::Experimental::HPX::impl_get_executor();
-
- using hpx::for_loop;
- using hpx::execution::par;
- using hpx::execution::static_chunk_size;
-
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
-#if KOKKOS_HPX_IMPLEMENTATION == 0
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [&buffer, final_reducer](const std::size_t t) {
- final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
- });
-
- for_loop(par.on(exec).with(static_chunk_size(m_policy.chunk_size())), 0,
- m_policy.league_size(),
- [this, &buffer, value_size](const int league_rank) {
- std::size_t t =
- Kokkos::Experimental::HPX::impl_hardware_thread_id();
- reference_type update = Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(buffer.get(t)));
-
- execute_functor<WorkTag>(m_functor, m_policy, league_rank,
- buffer.get(t) + value_size, m_shared,
- update);
- });
-
-#elif KOKKOS_HPX_IMPLEMENTATION == 1
- using hpx::for_loop_strided;
-
- for_loop(
- par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
- [&buffer, final_reducer](std::size_t const t) {
- final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
- });
-
- for_loop_strided(
- par.on(exec), 0, m_policy.league_size(), m_policy.chunk_size(),
- [this, &buffer, value_size](int const league_rank_begin) {
- std::size_t t = Kokkos::Experimental::HPX::impl_hardware_thread_id();
- reference_type update = Analysis::Reducer::reference(
- reinterpret_cast<pointer_type>(buffer.get(t)));
- const int league_rank_end =
- (std::min)(league_rank_begin + m_policy.chunk_size(),
- m_policy.league_size());
- execute_functor_range<WorkTag>(
- m_functor, m_policy, league_rank_begin, league_rank_end,
- buffer.get(t) + value_size, m_shared, update);
- });
-#endif
-
- const pointer_type ptr = reinterpret_cast<pointer_type>(buffer.get(0));
- for (int t = 1; t < num_worker_threads; ++t) {
- final_reducer.join(ptr, reinterpret_cast<pointer_type>(buffer.get(t)));
- }
-
- final_reducer.final(ptr);
-
- if (m_result_ptr) {
- const int n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
-
- for (int j = 0; j < n; ++j) {
- m_result_ptr[j] = ptr[j];
- }
- }
- }
-
- template <class ViewType>
- ParallelReduce(const FunctorType &arg_functor, const Policy &arg_policy,
- const ViewType &arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void *> = nullptr)
- : m_functor(arg_functor),
- m_league(arg_policy.league_size()),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- m_functor, arg_policy.team_size())),
- m_force_synchronous(!arg_result.impl_track().has_record()) {}
-
- inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
- const ReducerType &reducer)
- : m_functor(arg_functor),
- m_league(arg_policy.league_size()),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())),
- m_force_synchronous(!reducer.view().impl_track().has_record()) {}
-};
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION
- Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- TeamThreadRange(const Impl::HPXTeamMember &thread, const iType &count) {
- return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
- thread, count);
-}
-
-template <typename iType1, typename iType2>
-KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
- std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
-TeamThreadRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
- const iType2 &i_end) {
- using iType = std::common_type_t<iType1, iType2>;
- return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
- thread, iType(i_begin), iType(i_end));
-}
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION
- Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- TeamVectorRange(const Impl::HPXTeamMember &thread, const iType &count) {
- return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
- thread, count);
-}
-
-template <typename iType1, typename iType2>
-KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
- std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
-TeamVectorRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
- const iType2 &i_end) {
- using iType = std::common_type_t<iType1, iType2>;
- return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
- thread, iType(i_begin), iType(i_end));
-}
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION
- Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- ThreadVectorRange(const Impl::HPXTeamMember &thread, const iType &count) {
- return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
- thread, count);
-}
-
-template <typename iType1, typename iType2>
-KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
- std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
-ThreadVectorRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
- const iType2 &i_end) {
- using iType = std::common_type_t<iType1, iType2>;
- return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
- thread, iType(i_begin), iType(i_end));
-}
-
-KOKKOS_INLINE_FUNCTION
-Impl::ThreadSingleStruct<Impl::HPXTeamMember> PerTeam(
- const Impl::HPXTeamMember &thread) {
- return Impl::ThreadSingleStruct<Impl::HPXTeamMember>(thread);
-}
-
-KOKKOS_INLINE_FUNCTION
-Impl::VectorSingleStruct<Impl::HPXTeamMember> PerThread(
- const Impl::HPXTeamMember &thread) {
- return Impl::VectorSingleStruct<Impl::HPXTeamMember>(thread);
-}
-
-/** \brief Inter-thread parallel_for. Executes lambda(iType i) for each
- * i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all threads of the the calling thread team.
- */
-template <typename iType, class Lambda>
-KOKKOS_INLINE_FUNCTION void parallel_for(
- const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const Lambda &lambda) {
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment)
- lambda(i);
-}
-
-/** \brief Inter-thread vector parallel_reduce. Executes lambda(iType i,
- * ValueType & val) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all threads of the the calling thread team
- * and a summation of val is performed and put into result.
- */
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const Lambda &lambda, ValueType &result) {
- result = ValueType();
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, result);
- }
-}
-
-/** \brief Intra-thread vector parallel_for. Executes lambda(iType i) for each
- * i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling thread.
- */
-template <typename iType, class Lambda>
-KOKKOS_INLINE_FUNCTION void parallel_for(
- const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const Lambda &lambda) {
-#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
-#pragma ivdep
-#endif
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i);
- }
-}
-
-/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
- * ValueType & val) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
- * and a summation of val is performed and put into result.
- */
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const Lambda &lambda, ValueType &result) {
- result = ValueType();
-#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
-#pragma ivdep
-#endif
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, result);
- }
-}
-
-template <typename iType, class Lambda, typename ReducerType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const Lambda &lambda, const ReducerType &reducer) {
- reducer.init(reducer.reference());
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, reducer.reference());
- }
-}
-
-template <typename iType, class Lambda, typename ReducerType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const Lambda &lambda, const ReducerType &reducer) {
- reducer.init(reducer.reference());
-#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
-#pragma ivdep
-#endif
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, reducer.reference());
- }
-}
-
-template <typename iType, class FunctorType>
-KOKKOS_INLINE_FUNCTION void parallel_scan(
- Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember> const
- &loop_boundaries,
- const FunctorType &lambda) {
- using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void,
- FunctorType>::value_type;
-
- value_type scan_val = value_type();
-
- // Intra-member scan
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, scan_val, false);
- }
-
- // 'scan_val' output is the exclusive prefix sum
- scan_val = loop_boundaries.thread.team_scan(scan_val);
-
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, scan_val, true);
- }
-}
-
-/** \brief Intra-thread vector parallel exclusive prefix sum. Executes
- * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes in the thread and a scan
- * operation is performed. Depending on the target execution space the operator
- * might be called twice: once with final=false and once with final=true. When
- * final==true val contains the prefix sum value. The contribution of this "i"
- * needs to be added to val no matter whether final==true or not. In a serial
- * execution (i.e. team_size==1) the operator is only called once with
- * final==true. Scan_val will be set to the final sum value over all vector
- */
-template <typename iType, class FunctorType>
-KOKKOS_INLINE_FUNCTION void parallel_scan(
- const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const FunctorType &lambda) {
- using value_type =
- typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- TeamPolicy<Experimental::HPX>,
- FunctorType>::value_type;
-
- value_type scan_val = value_type();
-
-#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
-#pragma ivdep
-#endif
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, scan_val, true);
- }
-}
-
-/** \brief Intra-thread vector parallel scan with reducer
- *
- */
-template <typename iType, class FunctorType, typename ReducerType>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
-parallel_scan(
- const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
- &loop_boundaries,
- const FunctorType &lambda, const ReducerType &reducer) {
- typename ReducerType::value_type scan_val;
- reducer.init(scan_val);
-
-#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
-#pragma ivdep
-#endif
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, scan_val, true);
- }
-}
-
-template <class FunctorType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::VectorSingleStruct<Impl::HPXTeamMember> &,
- const FunctorType &lambda) {
- lambda();
-}
-
-template <class FunctorType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::ThreadSingleStruct<Impl::HPXTeamMember> &,
- const FunctorType &lambda) {
- lambda();
-}
-
-template <class FunctorType, class ValueType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::VectorSingleStruct<Impl::HPXTeamMember> &,
- const FunctorType &lambda, ValueType &val) {
- lambda(val);
-}
-
-template <class FunctorType, class ValueType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::ThreadSingleStruct<Impl::HPXTeamMember> &,
- const FunctorType &lambda, ValueType &val) {
- lambda(val);
-}
-
-} // namespace Kokkos
-
-#include <HPX/Kokkos_HPX_Task.hpp>
-
-#endif /* #if defined( KOKKOS_ENABLE_HPX ) */
-#endif /* #ifndef KOKKOS_HPX_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_HOSTSPACE_HPP
-#define KOKKOS_HOSTSPACE_HPP
-
-#include <cstring>
-#include <string>
-#include <iosfwd>
-#include <typeinfo>
-
-#include <Kokkos_Core_fwd.hpp>
-#include <Kokkos_Concepts.hpp>
-#include <Kokkos_MemoryTraits.hpp>
-
-#include <impl/Kokkos_Traits.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_SharedAlloc.hpp>
-#include <impl/Kokkos_Tools.hpp>
-
-#include "impl/Kokkos_HostSpace_deepcopy.hpp"
-#include <impl/Kokkos_MemorySpace.hpp>
-
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-
-namespace Impl {
-
-/// \brief Initialize lock array for arbitrary size atomics.
-///
-/// Arbitrary atomics are implemented using a hash table of locks
-/// where the hash value is derived from the address of the
-/// object for which an atomic operation is performed.
-/// This function initializes the locks to zero (unset).
-void init_lock_array_host_space();
-
-/// \brief Acquire a lock for the address
-///
-/// This function tries to acquire the lock for the hash value derived
-/// from the provided ptr. If the lock is successfully acquired the
-/// function returns true. Otherwise it returns false.
-bool lock_address_host_space(void* ptr);
-
-/// \brief Release lock for the address
-///
-/// This function releases the lock for the hash value derived
-/// from the provided ptr. This function should only be called
-/// after previously successfully acquiring a lock with
-/// lock_address.
-void unlock_address_host_space(void* ptr);
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-namespace Kokkos {
-/// \class HostSpace
-/// \brief Memory management for host memory.
-///
-/// HostSpace is a memory space that governs host memory. "Host"
-/// memory means the usual CPU-accessible memory.
-class HostSpace {
- public:
- //! Tag this class as a kokkos memory space
- using memory_space = HostSpace;
- using size_type = size_t;
-
- /// \typedef execution_space
- /// \brief Default execution space for this memory space.
- ///
- /// Every memory space has a default execution space. This is
- /// useful for things like initializing a View (which happens in
- /// parallel using the View's default execution space).
- using execution_space = DefaultHostExecutionSpace;
-
- //! This memory space preferred device_type
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- /**\brief Default memory space instance */
- HostSpace();
- HostSpace(HostSpace&& rhs) = default;
- HostSpace(const HostSpace& rhs) = default;
- HostSpace& operator=(HostSpace&&) = default;
- HostSpace& operator=(const HostSpace&) = default;
- ~HostSpace() = default;
-
- /**\brief Non-default memory space instance to choose allocation mechansim,
- * if available */
-
- enum AllocationMechanism {
- STD_MALLOC,
- POSIX_MEMALIGN,
- POSIX_MMAP,
- INTEL_MM_ALLOC
- };
-
- explicit HostSpace(const AllocationMechanism&);
-
- /**\brief Allocate untracked memory in the space */
- void* allocate(const size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- /**\brief Deallocate untracked memory in the space */
- void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- template <class, class, class, class>
- friend class Kokkos::Experimental::LogicalMemorySpace;
-
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
-
- public:
- /**\brief Return Name of the MemorySpace */
- static constexpr const char* name() { return m_name; }
-
- private:
- AllocationMechanism m_alloc_mech;
- static constexpr const char* m_name = "Host";
- friend class Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
-};
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-static_assert(Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::HostSpace>::assignable,
- "");
-
-template <typename S>
-struct HostMirror {
- private:
- // If input execution space can access HostSpace then keep it.
- // Example: Kokkos::OpenMP can access, Kokkos::Cuda cannot
- enum {
- keep_exe = Kokkos::Impl::MemorySpaceAccess<
- typename S::execution_space::memory_space,
- Kokkos::HostSpace>::accessible
- };
-
- // If HostSpace can access memory space then keep it.
- // Example: Cannot access Kokkos::CudaSpace, can access Kokkos::CudaUVMSpace
- enum {
- keep_mem =
- Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
- typename S::memory_space>::accessible
- };
-
- public:
- using Space = std::conditional_t<
- keep_exe && keep_mem, S,
- std::conditional_t<keep_mem,
- Kokkos::Device<Kokkos::HostSpace::execution_space,
- typename S::memory_space>,
- Kokkos::HostSpace>>;
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <>
-class SharedAllocationRecord<Kokkos::HostSpace, void>
- : public SharedAllocationRecordCommon<Kokkos::HostSpace> {
- private:
- friend Kokkos::HostSpace;
- friend class SharedAllocationRecordCommon<Kokkos::HostSpace>;
-
- using base_t = SharedAllocationRecordCommon<Kokkos::HostSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
-#ifdef KOKKOS_ENABLE_DEBUG
- /**\brief Root record for tracked allocations from this HostSpace instance */
- static RecordBase s_root_record;
-#endif
-
- const Kokkos::HostSpace m_space;
-
- protected:
- ~SharedAllocationRecord()
-#if defined( \
- KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
- noexcept
-#endif
- ;
- SharedAllocationRecord() = default;
-
- // This constructor does not forward to the one without exec_space arg
- // in order to work around https://github.com/kokkos/kokkos/issues/5258
- // This constructor is templated so I can't just put it into the cpp file
- // like the other constructor.
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /* exec_space*/, const Kokkos::HostSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate)
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::HostSpace, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
- arg_label);
- }
-
- SharedAllocationRecord(
- const Kokkos::HostSpace& arg_space, const std::string& arg_label,
- const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate);
-
- public:
- KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
- const Kokkos::HostSpace& arg_space, const std::string& arg_label,
- const size_t arg_alloc_size) {
- KOKKOS_IF_ON_HOST((return new SharedAllocationRecord(arg_space, arg_label,
- arg_alloc_size);))
- KOKKOS_IF_ON_DEVICE(((void)arg_space; (void)arg_label; (void)arg_alloc_size;
- return nullptr;))
- }
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <class DT, class... DP>
-struct ZeroMemset<typename HostSpace::execution_space, DT, DP...> {
- ZeroMemset(const typename HostSpace::execution_space& exec,
- const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- // Host spaces, except for HPX, are synchronous and we need to fence for HPX
- // since we can't properly enqueue a std::memset otherwise.
- // We can't use exec.fence() directly since we don't have a full definition
- // of HostSpace here.
- hostspace_fence(exec);
- using ValueType = typename View<DT, DP...>::value_type;
- std::memset(dst.data(), 0, sizeof(ValueType) * dst.size());
- }
-
- ZeroMemset(const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- using ValueType = typename View<DT, DP...>::value_type;
- std::memset(dst.data(), 0, sizeof(ValueType) * dst.size());
- }
-};
-
-template <>
-struct DeepCopy<HostSpace, HostSpace, DefaultHostExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- hostspace_parallel_deepcopy_async(exec, dst, src, n);
- }
-};
-
-template <class ExecutionSpace>
-struct DeepCopy<HostSpace, HostSpace, ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- hostspace_parallel_deepcopy(dst, src, n);
- }
-
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence(
- "Kokkos::Impl::DeepCopy<HostSpace, HostSpace, "
- "ExecutionSpace>::DeepCopy: fence before copy");
- hostspace_parallel_deepcopy_async(dst, src, n);
- }
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-#endif // #define KOKKOS_HOSTSPACE_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_LOGICALSPACES_HPP
-#define KOKKOS_LOGICALSPACES_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <Kokkos_Core_fwd.hpp>
-#include <Kokkos_ScratchSpace.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_SharedAlloc.hpp>
-#include <impl/Kokkos_Profiling.hpp>
-#include <cstring>
-namespace Kokkos {
-namespace Experimental {
-struct DefaultMemorySpaceNamer {
- static constexpr const char* get_name() {
- return "DefaultLogicalMemorySpaceName";
- }
-};
-
-struct LogicalSpaceSharesAccess {
- struct shared_access {};
- struct no_shared_access {};
-};
-
-/// \class LogicalMemorySpace
-/// \brief
-///
-/// LogicalMemorySpace is a space that is identical to another space,
-/// but differentiable by name and template argument
-template <class BaseSpace, class DefaultBaseExecutionSpace = void,
- class Namer = DefaultMemorySpaceNamer,
- class SharesAccessWithBase = LogicalSpaceSharesAccess::shared_access>
-class LogicalMemorySpace {
-#ifdef KOKKOS_ENABLE_OPENMPTARGET
- // [DZP] For some reason I don't yet know, using LogicalMemorySpaces
- // inside an OpenMPTarget build causes errors in the
- // SharedAllocationRecords of other types. This is my way of erroring
- // a build if we instantiate a LogicalMemSpace in an OMPTarget build
- static_assert(!std::is_same<BaseSpace, BaseSpace>::value,
- "Can't use LogicalMemorySpaces in an OpenMPTarget build, we're "
- "debugging memory issues");
-#endif
- public:
- //! Tag this class as a kokkos memory space
- using memory_space = LogicalMemorySpace<BaseSpace, DefaultBaseExecutionSpace,
- Namer, SharesAccessWithBase>;
- using size_type = typename BaseSpace::size_type;
-
- /// \typedef execution_space
- /// \brief Default execution space for this memory space.
- ///
- /// Every memory space has a default execution space. This is
- /// useful for things like initializing a View (which happens in
- /// parallel using the View's default execution space).
-
- using execution_space =
- std::conditional_t<std::is_void<DefaultBaseExecutionSpace>::value,
- typename BaseSpace::execution_space,
- DefaultBaseExecutionSpace>;
-
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- LogicalMemorySpace() = default;
-
- template <typename... Args>
- LogicalMemorySpace(Args&&... args) : underlying_space((Args &&) args...) {}
-
- /**\brief Allocate untracked memory in the space */
- void* allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
- }
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
- }
-
- /**\brief Deallocate untracked memory in the space */
- void deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
- }
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
- }
-
- /**\brief Return Name of the MemorySpace */
- constexpr static const char* name() { return Namer::get_name(); }
-
- private:
- BaseSpace underlying_space;
- template <class, class, class, class>
- friend class LogicalMemorySpace;
- friend class Kokkos::Impl::SharedAllocationRecord<memory_space, void>;
-
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- Kokkos::Tools::SpaceHandle arg_handle =
- Kokkos::Tools::make_space_handle(name())) const {
- return underlying_space.impl_allocate(arg_label, arg_alloc_size,
- arg_logical_size, arg_handle);
- }
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle arg_handle =
- Kokkos::Tools::make_space_handle(name())) const {
- underlying_space.impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size,
- arg_logical_size, arg_handle);
- }
-};
-} // namespace Experimental
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <typename BaseSpace, typename DefaultBaseExecutionSpace, class Namer,
- typename OtherSpace>
-struct MemorySpaceAccess<
- Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer,
- Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>,
- OtherSpace> {
- enum { assignable = MemorySpaceAccess<BaseSpace, OtherSpace>::assignable };
- enum { accessible = MemorySpaceAccess<BaseSpace, OtherSpace>::accessible };
- enum { deepcopy = MemorySpaceAccess<BaseSpace, OtherSpace>::deepcopy };
-};
-
-template <typename BaseSpace, typename DefaultBaseExecutionSpace, class Namer,
- typename OtherSpace>
-struct MemorySpaceAccess<
- OtherSpace,
- Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer,
- Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>> {
- enum { assignable = MemorySpaceAccess<OtherSpace, BaseSpace>::assignable };
- enum { accessible = MemorySpaceAccess<OtherSpace, BaseSpace>::accessible };
- enum { deepcopy = MemorySpaceAccess<OtherSpace, BaseSpace>::deepcopy };
-};
-
-template <typename BaseSpace, typename DefaultBaseExecutionSpace, class Namer>
-struct MemorySpaceAccess<
- Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer,
- Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>,
- Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer,
- Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>> {
- enum { assignable = true };
- enum { accessible = true };
- enum { deepcopy = true };
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-template <class BaseSpace, class DefaultBaseExecutionSpace, class Namer,
- class SharesAccessSemanticsWithBase>
-class SharedAllocationRecord<Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer,
- SharesAccessSemanticsWithBase>,
- void> : public SharedAllocationRecord<void, void> {
- private:
- using SpaceType =
- Kokkos::Experimental::LogicalMemorySpace<BaseSpace,
- DefaultBaseExecutionSpace, Namer,
- SharesAccessSemanticsWithBase>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
- static void deallocate(RecordBase* arg_rec) {
- delete static_cast<SharedAllocationRecord*>(arg_rec);
- }
-
-#ifdef KOKKOS_ENABLE_DEBUG
- /**\brief Root record for tracked allocations from this
- * LogicalMemorySpace instance */
- static RecordBase s_root_record;
-#endif
-
- const SpaceType m_space;
-
- protected:
- ~SharedAllocationRecord() {
- m_space.deallocate(RecordBase::m_alloc_ptr->m_label,
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- SharedAllocationRecord<void, void>::m_alloc_size,
- (SharedAllocationRecord<void, void>::m_alloc_size -
- sizeof(SharedAllocationHeader)));
- }
- SharedAllocationRecord() = default;
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/, const SpaceType& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const SpaceType& arg_space, const std::string& arg_label,
- const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate)
- : SharedAllocationRecord<void, void>(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<SpaceType, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- // Fill in the Header information
- RecordBase::m_alloc_ptr->m_record =
- static_cast<SharedAllocationRecord<void, void>*>(this);
-
- strncpy(RecordBase::m_alloc_ptr->m_label, arg_label.c_str(),
- SharedAllocationHeader::maximum_label_length - 1);
- // Set last element zero, in case c_str is too long
- RecordBase::m_alloc_ptr
- ->m_label[SharedAllocationHeader::maximum_label_length - 1] = '\0';
- }
-
- public:
- inline std::string get_label() const {
- return std::string(RecordBase::head()->m_label);
- }
- KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
- const SpaceType& arg_space, const std::string& arg_label,
- const size_t arg_alloc_size) {
- KOKKOS_IF_ON_HOST((return new SharedAllocationRecord(arg_space, arg_label,
- arg_alloc_size);))
- KOKKOS_IF_ON_DEVICE(((void)arg_space; (void)arg_label; (void)arg_alloc_size;
- return nullptr;))
- }
-
- /**\brief Allocate tracked memory in the space */
- static void* allocate_tracked(const SpaceType& arg_space,
- const std::string& arg_label,
- const size_t arg_alloc_size) {
- if (!arg_alloc_size) return (void*)nullptr;
-
- SharedAllocationRecord* const r =
- allocate(arg_space, arg_label, arg_alloc_size);
-
- RecordBase::increment(r);
-
- return r->data();
- }
-
- /**\brief Reallocate tracked memory in the space */
- static void* reallocate_tracked(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) {
- SharedAllocationRecord* const r_old = get_record(arg_alloc_ptr);
- SharedAllocationRecord* const r_new =
- allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
-
- Kokkos::Impl::DeepCopy<SpaceType, SpaceType>(
- r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
- Kokkos::fence(
- "SharedAllocationRecord<Kokkos::Experimental::LogicalMemorySpace, "
- "void>::reallocate_tracked: fence after copying data");
-
- RecordBase::increment(r_new);
- RecordBase::decrement(r_old);
-
- return r_new->data();
- }
- /**\brief Deallocate tracked memory in the space */
- static void deallocate_tracked(void* const arg_alloc_ptr) {
- if (arg_alloc_ptr != nullptr) {
- SharedAllocationRecord* const r = get_record(arg_alloc_ptr);
-
- RecordBase::decrement(r);
- }
- }
-
- static SharedAllocationRecord* get_record(void* alloc_ptr) {
- using Header = SharedAllocationHeader;
- using RecordHost = SharedAllocationRecord<SpaceType, void>;
-
- SharedAllocationHeader const* const head =
- alloc_ptr ? Header::get_header(alloc_ptr)
- : (SharedAllocationHeader*)nullptr;
- RecordHost* const record =
- head ? static_cast<RecordHost*>(head->m_record) : (RecordHost*)nullptr;
-
- if (!alloc_ptr || record->m_alloc_ptr != head) {
- Kokkos::Impl::throw_runtime_exception(std::string(
- "Kokkos::Impl::SharedAllocationRecord< LogicalMemorySpace<> , "
- "void >::get_record ERROR"));
- }
-
- return record;
- }
-#ifdef KOKKOS_ENABLE_DEBUG
- static void print_records(std::ostream& s, const SpaceType&,
- bool detail = false) {
- SharedAllocationRecord<void, void>::print_host_accessible_records(
- s, "HostSpace", &s_root_record, detail);
- }
-#else
- static void print_records(std::ostream&, const SpaceType&,
- bool detail = false) {
- (void)detail;
- throw_runtime_exception(
- "SharedAllocationRecord<HostSpace>::print_records only works "
- "with KOKKOS_ENABLE_DEBUG enabled");
- }
-#endif
-};
-#ifdef KOKKOS_ENABLE_DEBUG
-/**\brief Root record for tracked allocations from this LogicalSpace
- * instance */
-template <class BaseSpace, class DefaultBaseExecutionSpace, class Namer,
- class SharesAccessSemanticsWithBase>
-SharedAllocationRecord<void, void>
- SharedAllocationRecord<Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer,
- SharesAccessSemanticsWithBase>,
- void>::s_root_record;
-#endif
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <class Namer, class BaseSpace, class DefaultBaseExecutionSpace,
- class SharesAccess, class ExecutionSpace>
-struct DeepCopy<Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
- Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
- ExecutionSpace> {
- DeepCopy(void* dst, void* src, size_t n) {
- DeepCopy<BaseSpace, BaseSpace, ExecutionSpace>(dst, src, n);
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, void* src, size_t n) {
- DeepCopy<BaseSpace, BaseSpace, ExecutionSpace>(exec, dst, src, n);
- }
-};
-
-template <class Namer, class BaseSpace, class DefaultBaseExecutionSpace,
- class SharesAccess, class ExecutionSpace, class SourceSpace>
-struct DeepCopy<SourceSpace,
- Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
- ExecutionSpace> {
- DeepCopy(void* dst, void* src, size_t n) {
- DeepCopy<SourceSpace, BaseSpace, ExecutionSpace>(dst, src, n);
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, void* src, size_t n) {
- DeepCopy<SourceSpace, BaseSpace, ExecutionSpace>(exec, dst, src, n);
- }
-};
-
-template <class Namer, class BaseSpace, class DefaultBaseExecutionSpace,
- class SharesAccess, class ExecutionSpace, class DestinationSpace>
-struct DeepCopy<Kokkos::Experimental::LogicalMemorySpace<
- BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
- DestinationSpace, ExecutionSpace> {
- DeepCopy(void* dst, void* src, size_t n) {
- DeepCopy<BaseSpace, DestinationSpace, ExecutionSpace>(dst, src, n);
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, void* src, size_t n) {
- DeepCopy<BaseSpace, DestinationSpace, ExecutionSpace>(exec, dst, src, n);
- }
-};
-} // namespace Impl
-
-} // namespace Kokkos
-#endif // KOKKOS_LOGICALSPACES_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_MASTER_LOCK_HPP
-#define KOKKOS_MASTER_LOCK_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-
-namespace Kokkos {
-namespace Experimental {
-
-// my be used to coordinate work between master instances
-// SHOULD NOT be used within a parallel algorithm
-//
-// This lock should be used with with a scoped lock guard
-// i.e. std::unique_lock<Lock>, std::lock_guard
-//
-// cannot be copied or moved
-// has the following functions available
-//
-// Lock()
-// ~Lock()
-//
-// void lock()
-// void unlock()
-// bool try_lock()
-//
-template <typename ExecutionSpace>
-class MasterLock;
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
-
-#endif // KOKKOS_MASTER_LOCK_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_MATHEMATICAL_CONSTANTS_HPP
-#define KOKKOS_MATHEMATICAL_CONSTANTS_HPP
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
-#endif
-
-#include <Kokkos_Macros.hpp>
-#include <type_traits>
-
-namespace Kokkos {
-namespace Experimental {
-
-#if defined(KOKKOS_ENABLE_CXX17)
-#define KOKKOS_IMPL_MATH_CONSTANT(TRAIT, VALUE) \
- template <class T> \
- inline constexpr auto TRAIT##_v = \
- std::enable_if_t<std::is_floating_point_v<T>, T>(VALUE)
-#else
-#define KOKKOS_IMPL_MATH_CONSTANT(TRAIT, VALUE) \
- template <class T> \
- constexpr auto TRAIT##_v = \
- std::enable_if_t<std::is_floating_point<T>::value, T>(VALUE)
-#endif
-
-// clang-format off
-KOKKOS_IMPL_MATH_CONSTANT(e, 2.718281828459045235360287471352662498L);
-KOKKOS_IMPL_MATH_CONSTANT(log2e, 1.442695040888963407359924681001892137L);
-KOKKOS_IMPL_MATH_CONSTANT(log10e, 0.434294481903251827651128918916605082L);
-KOKKOS_IMPL_MATH_CONSTANT(pi, 3.141592653589793238462643383279502884L);
-KOKKOS_IMPL_MATH_CONSTANT(inv_pi, 0.318309886183790671537767526745028724L);
-KOKKOS_IMPL_MATH_CONSTANT(inv_sqrtpi, 0.564189583547756286948079451560772586L);
-KOKKOS_IMPL_MATH_CONSTANT(ln2, 0.693147180559945309417232121458176568L);
-KOKKOS_IMPL_MATH_CONSTANT(ln10, 2.302585092994045684017991454684364208L);
-KOKKOS_IMPL_MATH_CONSTANT(sqrt2, 1.414213562373095048801688724209698079L);
-KOKKOS_IMPL_MATH_CONSTANT(sqrt3, 1.732050807568877293527446341505872367L);
-KOKKOS_IMPL_MATH_CONSTANT(inv_sqrt3, 0.577350269189625764509148780501957456L);
-KOKKOS_IMPL_MATH_CONSTANT(egamma, 0.577215664901532860606512090082402431L);
-KOKKOS_IMPL_MATH_CONSTANT(phi, 1.618033988749894848204586834365638118L);
-// clang-format on
-
-#undef KOKKOS_IMPL_MATH_CONSTANT
-
-} // namespace Experimental
-} // namespace Kokkos
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_NUMERIC_TRAITS_HPP
-#define KOKKOS_NUMERIC_TRAITS_HPP
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERICTRAITS
-#endif
-
-#include <Kokkos_Macros.hpp>
-#include <cfloat>
-#include <climits>
-#include <cmath>
-#include <cstdint>
-#include <type_traits>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-// clang-format off
-template <class> struct infinity_helper {};
-template <> struct infinity_helper<float> { static constexpr float value = HUGE_VALF; };
-template <> struct infinity_helper<double> { static constexpr double value = HUGE_VAL; };
-template <> struct infinity_helper<long double> { static constexpr long double value = HUGE_VALL; };
-template <class> struct finite_min_helper {};
-template <> struct finite_min_helper<bool> { static constexpr bool value = false; };
-template <> struct finite_min_helper<char> { static constexpr char value = CHAR_MIN; };
-template <> struct finite_min_helper<signed char> { static constexpr signed char value = SCHAR_MIN; };
-template <> struct finite_min_helper<unsigned char> { static constexpr unsigned char value = 0; };
-template <> struct finite_min_helper<short> { static constexpr short value = SHRT_MIN; };
-template <> struct finite_min_helper<unsigned short> { static constexpr unsigned short value = 0; };
-template <> struct finite_min_helper<int> { static constexpr int value = INT_MIN; };
-template <> struct finite_min_helper<unsigned int> { static constexpr unsigned int value = 0; };
-template <> struct finite_min_helper<long int> { static constexpr long int value = LONG_MIN; };
-template <> struct finite_min_helper<unsigned long int> { static constexpr unsigned long int value = 0; };
-template <> struct finite_min_helper<long long int> { static constexpr long long int value = LLONG_MIN; };
-template <> struct finite_min_helper<unsigned long long int> { static constexpr unsigned long long int value = 0; };
-template <> struct finite_min_helper<float> { static constexpr float value = -FLT_MAX; };
-template <> struct finite_min_helper<double> { static constexpr double value = -DBL_MAX; };
-template <> struct finite_min_helper<long double> { static constexpr long double value = -LDBL_MAX; };
-template <class> struct finite_max_helper {};
-template <> struct finite_max_helper<bool> { static constexpr bool value = true; };
-template <> struct finite_max_helper<char> { static constexpr char value = CHAR_MAX; };
-template <> struct finite_max_helper<signed char> { static constexpr signed char value = SCHAR_MAX; };
-template <> struct finite_max_helper<unsigned char> { static constexpr unsigned char value = UCHAR_MAX; };
-template <> struct finite_max_helper<short> { static constexpr short value = SHRT_MAX; };
-template <> struct finite_max_helper<unsigned short> { static constexpr unsigned short value = USHRT_MAX; };
-template <> struct finite_max_helper<int> { static constexpr int value = INT_MAX; };
-template <> struct finite_max_helper<unsigned int> { static constexpr unsigned int value = UINT_MAX; };
-template <> struct finite_max_helper<long int> { static constexpr long int value = LONG_MAX; };
-template <> struct finite_max_helper<unsigned long int> { static constexpr unsigned long int value = ULONG_MAX; };
-template <> struct finite_max_helper<long long int> { static constexpr long long int value = LLONG_MAX; };
-template <> struct finite_max_helper<unsigned long long int> { static constexpr unsigned long long int value = ULLONG_MAX; };
-template <> struct finite_max_helper<float> { static constexpr float value = FLT_MAX; };
-template <> struct finite_max_helper<double> { static constexpr double value = DBL_MAX; };
-template <> struct finite_max_helper<long double> { static constexpr long double value = LDBL_MAX; };
-template <class> struct epsilon_helper {};
-namespace{
- // FIXME workaround for LDL_EPSILON with XL
- template<typename T>
- constexpr T machineeps() {
- T epsilon = 1, prev = 1, expression = 1;
- do {
- prev = epsilon;
- epsilon /= 2;
- expression = 1 + epsilon;
- } while (expression > 1);
- return prev;
- }
-}
-template <> struct epsilon_helper<float> { static constexpr float value = FLT_EPSILON; };
-template <> struct epsilon_helper<double> { static constexpr double value = DBL_EPSILON; };
-template <> struct epsilon_helper<long double> {
-#ifdef KOKKOS_COMPILER_IBM
- static constexpr long double value = machineeps<long double>();
-#else
- static constexpr long double value = LDBL_EPSILON;
-#endif
-};
-template <class> struct round_error_helper {};
-template <> struct round_error_helper<float> { static constexpr float value = 0.5F; };
-template <> struct round_error_helper<double> { static constexpr double value = 0.5; };
-template <> struct round_error_helper<long double> { static constexpr long double value = 0.5L; };
-template <class> struct norm_min_helper {};
-template <> struct norm_min_helper<float> { static constexpr float value = FLT_MIN; };
-template <> struct norm_min_helper<double> { static constexpr double value = DBL_MIN; };
-template <> struct norm_min_helper<long double> { static constexpr long double value = LDBL_MIN; };
-template <class> struct denorm_min_helper {};
-// Workaround for GCC <9.2, Clang <9, Intel
-// vvvvvvvvvvvvvvvvvvvvvvvvv
-#if defined(KOKKOS_ENABLE_CXX17) && defined (FLT_TRUE_MIN) || defined(_MSC_VER)
-template <> struct denorm_min_helper<float> { static constexpr float value = FLT_TRUE_MIN; };
-template <> struct denorm_min_helper<double> { static constexpr double value = DBL_TRUE_MIN; };
-template <> struct denorm_min_helper<long double> { static constexpr long double value = LDBL_TRUE_MIN; };
-#else
-template <> struct denorm_min_helper<float> { static constexpr float value = __FLT_DENORM_MIN__; };
-template <> struct denorm_min_helper<double> { static constexpr double value = __DBL_DENORM_MIN__; };
-template <> struct denorm_min_helper<long double> { static constexpr long double value = __LDBL_DENORM_MIN__; };
-#endif
-// GCC <10.3 is not able to evaluate T(1) / finite_max_v<T> at compile time when passing -frounding-math
-// https://godbolt.org/z/zj9svb1T7
-// Similar issue was reported on IBM Power without the compiler option
-#define KOKKOS_IMPL_WORKAROUND_CONSTANT_EXPRESSION_COMPILER_BUG
-#ifndef KOKKOS_IMPL_WORKAROUND_CONSTANT_EXPRESSION_COMPILER_BUG
-// NOTE see ?lamch routine from LAPACK that determines machine parameters for floating-point arithmetic
-template <class T>
-constexpr T safe_minimum(T /*ignored*/) {
- constexpr auto one = static_cast<T>(1);
- constexpr auto eps = epsilon_helper<T>::value;
- constexpr auto tiny = norm_min_helper<T>::value;
- constexpr auto huge = finite_max_helper<T>::value;
- constexpr auto small = one / huge; // error: is not a constant expression
- return small >= tiny ? small * (one + eps) : tiny;
-}
-template <class> struct reciprocal_overflow_threshold_helper {};
-template <> struct reciprocal_overflow_threshold_helper<float> { static constexpr float value = safe_minimum(0.f); };
-template <> struct reciprocal_overflow_threshold_helper<double> { static constexpr double value = safe_minimum(0.); };
-template <> struct reciprocal_overflow_threshold_helper<long double> { static constexpr long double value = safe_minimum(0.l); };
-#else
-template <class> struct reciprocal_overflow_threshold_helper {};
-template <> struct reciprocal_overflow_threshold_helper<float> { static constexpr float value = norm_min_helper<float>::value; }; // OK for IEEE-754 floating-point numbers
-template <> struct reciprocal_overflow_threshold_helper<double> { static constexpr double value = norm_min_helper<double>::value; };
-template <> struct reciprocal_overflow_threshold_helper<long double> { static constexpr long double value = norm_min_helper<long double>::value; };
-#endif
-#undef KOKKOS_IMPL_WORKAROUND_CONSTANT_EXPRESSION_COMPILER_BUG
-template <class> struct quiet_NaN_helper {};
-template <> struct quiet_NaN_helper<float> { static constexpr float value = __builtin_nanf(""); };
-template <> struct quiet_NaN_helper<double> { static constexpr double value = __builtin_nan(""); };
-#if defined(_MSC_VER)
-template <> struct quiet_NaN_helper<long double> { static constexpr long double value = __builtin_nan(""); };
-#else
-template <> struct quiet_NaN_helper<long double> { static constexpr long double value = __builtin_nanl(""); };
-#endif
-template <class> struct signaling_NaN_helper {};
-template <> struct signaling_NaN_helper<float> { static constexpr float value = __builtin_nansf(""); };
-template <> struct signaling_NaN_helper<double> { static constexpr double value = __builtin_nans(""); };
-#if defined(_MSC_VER)
-template <> struct signaling_NaN_helper<long double> { static constexpr long double value = __builtin_nans(""); };
-#else
-template <> struct signaling_NaN_helper<long double> { static constexpr long double value = __builtin_nansl(""); };
-#endif
-template <class> struct digits_helper {};
-template <> struct digits_helper<bool> { static constexpr int value = 1; };
-template <> struct digits_helper<char> { static constexpr int value = CHAR_BIT - std::is_signed<char>::value; };
-template <> struct digits_helper<signed char> { static constexpr int value = CHAR_BIT - 1; };
-template <> struct digits_helper<unsigned char> { static constexpr int value = CHAR_BIT; };
-template <> struct digits_helper<short> { static constexpr int value = CHAR_BIT*sizeof(short)-1; };
-template <> struct digits_helper<unsigned short> { static constexpr int value = CHAR_BIT*sizeof(short); };
-template <> struct digits_helper<int> { static constexpr int value = CHAR_BIT*sizeof(int)-1; };
-template <> struct digits_helper<unsigned int> { static constexpr int value = CHAR_BIT*sizeof(int); };
-template <> struct digits_helper<long int> { static constexpr int value = CHAR_BIT*sizeof(long int)-1; };
-template <> struct digits_helper<unsigned long int> { static constexpr int value = CHAR_BIT*sizeof(long int); };
-template <> struct digits_helper<long long int> { static constexpr int value = CHAR_BIT*sizeof(long long int)-1; };
-template <> struct digits_helper<unsigned long long int> { static constexpr int value = CHAR_BIT*sizeof(long long int); };
-template <> struct digits_helper<float> { static constexpr int value = FLT_MANT_DIG; };
-template <> struct digits_helper<double> { static constexpr int value = DBL_MANT_DIG; };
-template <> struct digits_helper<long double> { static constexpr int value = LDBL_MANT_DIG; };
-template <class> struct digits10_helper {};
-template <> struct digits10_helper<bool> { static constexpr int value = 0; };
-// The fraction 643/2136 approximates log10(2) to 7 significant digits.
-// Workaround GCC compiler bug with -frounding-math that prevented the
-// floating-point expression to be evaluated at compile time.
-#define DIGITS10_HELPER_INTEGRAL(TYPE) \
-template <> struct digits10_helper<TYPE> { static constexpr int value = digits_helper<TYPE>::value * 643L / 2136; };
-DIGITS10_HELPER_INTEGRAL(char)
-DIGITS10_HELPER_INTEGRAL(signed char)
-DIGITS10_HELPER_INTEGRAL(unsigned char)
-DIGITS10_HELPER_INTEGRAL(short)
-DIGITS10_HELPER_INTEGRAL(unsigned short)
-DIGITS10_HELPER_INTEGRAL(int)
-DIGITS10_HELPER_INTEGRAL(unsigned int)
-DIGITS10_HELPER_INTEGRAL(long int)
-DIGITS10_HELPER_INTEGRAL(unsigned long int)
-DIGITS10_HELPER_INTEGRAL(long long int)
-DIGITS10_HELPER_INTEGRAL(unsigned long long int)
-#undef DIGITS10_HELPER_INTEGRAL
-template <> struct digits10_helper<float> { static constexpr int value = FLT_DIG; };
-template <> struct digits10_helper<double> { static constexpr int value = DBL_DIG; };
-template <> struct digits10_helper<long double> { static constexpr int value = LDBL_DIG; };
-template <class> struct max_digits10_helper {};
-// Approximate ceil(digits<T>::value * log10(2) + 1)
-#define MAX_DIGITS10_HELPER(TYPE) \
-template <> struct max_digits10_helper<TYPE> { static constexpr int value = (digits_helper<TYPE>::value * 643L + 2135) / 2136 + 1; };
-#ifdef FLT_DECIMAL_DIG
-template <> struct max_digits10_helper<float> { static constexpr int value = FLT_DECIMAL_DIG; };
-#else
-MAX_DIGITS10_HELPER(float)
-#endif
-#ifdef DBL_DECIMAL_DIG
-template <> struct max_digits10_helper<double> { static constexpr int value = DBL_DECIMAL_DIG; };
-#else
-MAX_DIGITS10_HELPER(double)
-#endif
-#ifdef DECIMAL_DIG
-template <> struct max_digits10_helper<long double> { static constexpr int value = DECIMAL_DIG; };
-#elif LDBL_DECIMAL_DIG
-template <> struct max_digits10_helper<long double> { static constexpr int value = LDBL_DECIMAL_DIG; };
-#else
-MAX_DIGITS10_HELPER(long double)
-#endif
-#undef MAX_DIGITS10_HELPER
-template <class> struct radix_helper {};
-template <> struct radix_helper<bool> { static constexpr int value = 2; };
-template <> struct radix_helper<char> { static constexpr int value = 2; };
-template <> struct radix_helper<signed char> { static constexpr int value = 2; };
-template <> struct radix_helper<unsigned char> { static constexpr int value = 2; };
-template <> struct radix_helper<short> { static constexpr int value = 2; };
-template <> struct radix_helper<unsigned short> { static constexpr int value = 2; };
-template <> struct radix_helper<int> { static constexpr int value = 2; };
-template <> struct radix_helper<unsigned int> { static constexpr int value = 2; };
-template <> struct radix_helper<long int> { static constexpr int value = 2; };
-template <> struct radix_helper<unsigned long int> { static constexpr int value = 2; };
-template <> struct radix_helper<long long int> { static constexpr int value = 2; };
-template <> struct radix_helper<unsigned long long int> { static constexpr int value = 2; };
-template <> struct radix_helper<float> { static constexpr int value = FLT_RADIX; };
-template <> struct radix_helper<double> { static constexpr int value = FLT_RADIX; };
-template <> struct radix_helper<long double> { static constexpr int value = FLT_RADIX; };
-template <class> struct min_exponent_helper {};
-template <> struct min_exponent_helper<float> { static constexpr int value = FLT_MIN_EXP; };
-template <> struct min_exponent_helper<double> { static constexpr int value = DBL_MIN_EXP; };
-template <> struct min_exponent_helper<long double> { static constexpr int value = LDBL_MIN_EXP; };
-template <class> struct min_exponent10_helper {};
-template <> struct min_exponent10_helper<float> { static constexpr int value = FLT_MIN_10_EXP; };
-template <> struct min_exponent10_helper<double> { static constexpr int value = DBL_MIN_10_EXP; };
-template <> struct min_exponent10_helper<long double> { static constexpr int value = LDBL_MIN_10_EXP; };
-template <class> struct max_exponent_helper {};
-template <> struct max_exponent_helper<float> { static constexpr int value = FLT_MAX_EXP; };
-template <> struct max_exponent_helper<double> { static constexpr int value = DBL_MAX_EXP; };
-template <> struct max_exponent_helper<long double> { static constexpr int value = LDBL_MAX_EXP; };
-template <class> struct max_exponent10_helper{};
-template <> struct max_exponent10_helper<float> { static constexpr int value = FLT_MAX_10_EXP; };
-template <> struct max_exponent10_helper<double> { static constexpr int value = DBL_MAX_10_EXP; };
-template <> struct max_exponent10_helper<long double> { static constexpr int value = LDBL_MAX_10_EXP; };
-// clang-format on
-} // namespace Impl
-
-#if defined(KOKKOS_ENABLE_CXX17)
-#define KOKKOS_IMPL_DEFINE_TRAIT(TRAIT) \
- template <class T> \
- struct TRAIT : Impl::TRAIT##_helper<std::remove_cv_t<T>> {}; \
- template <class T> \
- inline constexpr auto TRAIT##_v = TRAIT<T>::value;
-#else
-#define KOKKOS_IMPL_DEFINE_TRAIT(TRAIT) \
- template <class T> \
- struct TRAIT : Impl::TRAIT##_helper<std::remove_cv_t<T>> {};
-#endif
-
-// Numeric distinguished value traits
-KOKKOS_IMPL_DEFINE_TRAIT(infinity)
-KOKKOS_IMPL_DEFINE_TRAIT(finite_min)
-KOKKOS_IMPL_DEFINE_TRAIT(finite_max)
-KOKKOS_IMPL_DEFINE_TRAIT(epsilon)
-KOKKOS_IMPL_DEFINE_TRAIT(round_error)
-KOKKOS_IMPL_DEFINE_TRAIT(norm_min)
-KOKKOS_IMPL_DEFINE_TRAIT(denorm_min)
-KOKKOS_IMPL_DEFINE_TRAIT(reciprocal_overflow_threshold)
-KOKKOS_IMPL_DEFINE_TRAIT(quiet_NaN)
-KOKKOS_IMPL_DEFINE_TRAIT(signaling_NaN)
-
-// Numeric characteristics traits
-KOKKOS_IMPL_DEFINE_TRAIT(digits)
-KOKKOS_IMPL_DEFINE_TRAIT(digits10)
-KOKKOS_IMPL_DEFINE_TRAIT(max_digits10)
-KOKKOS_IMPL_DEFINE_TRAIT(radix)
-KOKKOS_IMPL_DEFINE_TRAIT(min_exponent)
-KOKKOS_IMPL_DEFINE_TRAIT(min_exponent10)
-KOKKOS_IMPL_DEFINE_TRAIT(max_exponent)
-KOKKOS_IMPL_DEFINE_TRAIT(max_exponent10)
-
-#undef KOKKOS_IMPL_DEFINE_TRAIT
-
-} // namespace Experimental
-
-template <class T>
-struct reduction_identity; /*{
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T sum() { return T(); } // 0
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T prod() // 1
- { static_assert( false, "Missing specialization of
-Kokkos::reduction_identity for custom prod reduction type"); return T(); }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T max() // minimum value
- { static_assert( false, "Missing specialization of
-Kokkos::reduction_identity for custom max reduction type"); return T(); }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T min() // maximum value
- { static_assert( false, "Missing specialization of
-Kokkos::reduction_identity for custom min reduction type"); return T(); }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T bor() // 0, only for integer
-type { static_assert( false, "Missing specialization of
-Kokkos::reduction_identity for custom bor reduction type"); return T(); }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T band() // !0, only for integer
-type { static_assert( false, "Missing specialization of
-Kokkos::reduction_identity for custom band reduction type"); return T(); }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T lor() // 0, only for integer
-type { static_assert( false, "Missing specialization of
-Kokkos::reduction_identity for custom lor reduction type"); return T(); }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static T land() // !0, only for integer
-type { static_assert( false, "Missing specialization of
-Kokkos::reduction_identity for custom land reduction type"); return T(); }
-};*/
-
-template <>
-struct reduction_identity<signed char> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char sum() {
- return static_cast<signed char>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char prod() {
- return static_cast<signed char>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char max() {
- return SCHAR_MIN;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char min() {
- return SCHAR_MAX;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char bor() {
- return static_cast<signed char>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char band() {
- return ~static_cast<signed char>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char lor() {
- return static_cast<signed char>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char land() {
- return static_cast<signed char>(1);
- }
-};
-
-template <>
-struct reduction_identity<bool> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static bool lor() {
- return static_cast<bool>(false);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static bool land() {
- return static_cast<bool>(true);
- }
-};
-
-template <>
-struct reduction_identity<short> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short sum() {
- return static_cast<short>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short prod() {
- return static_cast<short>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short max() { return SHRT_MIN; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short min() { return SHRT_MAX; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short bor() {
- return static_cast<short>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short band() {
- return ~static_cast<short>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short lor() {
- return static_cast<short>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static short land() {
- return static_cast<short>(1);
- }
-};
-
-template <>
-struct reduction_identity<int> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int sum() {
- return static_cast<int>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int prod() {
- return static_cast<int>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int max() { return INT_MIN; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int min() { return INT_MAX; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int bor() {
- return static_cast<int>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int band() {
- return ~static_cast<int>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int lor() {
- return static_cast<int>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static int land() {
- return static_cast<int>(1);
- }
-};
-
-template <>
-struct reduction_identity<long> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long sum() {
- return static_cast<long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long prod() {
- return static_cast<long>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long max() { return LONG_MIN; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long min() { return LONG_MAX; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long bor() {
- return static_cast<long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long band() {
- return ~static_cast<long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long lor() {
- return static_cast<long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long land() {
- return static_cast<long>(1);
- }
-};
-
-template <>
-struct reduction_identity<long long> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long sum() {
- return static_cast<long long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long prod() {
- return static_cast<long long>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long max() {
- return LLONG_MIN;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long min() {
- return LLONG_MAX;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long bor() {
- return static_cast<long long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long band() {
- return ~static_cast<long long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long lor() {
- return static_cast<long long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static long long land() {
- return static_cast<long long>(1);
- }
-};
-
-template <>
-struct reduction_identity<unsigned char> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char sum() {
- return static_cast<unsigned char>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char prod() {
- return static_cast<unsigned char>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char max() {
- return static_cast<unsigned char>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char min() {
- return UCHAR_MAX;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char bor() {
- return static_cast<unsigned char>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char band() {
- return ~static_cast<unsigned char>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char lor() {
- return static_cast<unsigned char>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char land() {
- return static_cast<unsigned char>(1);
- }
-};
-
-template <>
-struct reduction_identity<unsigned short> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short sum() {
- return static_cast<unsigned short>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short prod() {
- return static_cast<unsigned short>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short max() {
- return static_cast<unsigned short>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short min() {
- return USHRT_MAX;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short bor() {
- return static_cast<unsigned short>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short band() {
- return ~static_cast<unsigned short>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short lor() {
- return static_cast<unsigned short>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short land() {
- return static_cast<unsigned short>(1);
- }
-};
-
-template <>
-struct reduction_identity<unsigned int> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int sum() {
- return static_cast<unsigned int>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int prod() {
- return static_cast<unsigned int>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int max() {
- return static_cast<unsigned int>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int min() {
- return UINT_MAX;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int bor() {
- return static_cast<unsigned int>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int band() {
- return ~static_cast<unsigned int>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int lor() {
- return static_cast<unsigned int>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int land() {
- return static_cast<unsigned int>(1);
- }
-};
-
-template <>
-struct reduction_identity<unsigned long> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long sum() {
- return static_cast<unsigned long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long prod() {
- return static_cast<unsigned long>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long max() {
- return static_cast<unsigned long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long min() {
- return ULONG_MAX;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long bor() {
- return static_cast<unsigned long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long band() {
- return ~static_cast<unsigned long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long lor() {
- return static_cast<unsigned long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long land() {
- return static_cast<unsigned long>(1);
- }
-};
-
-template <>
-struct reduction_identity<unsigned long long> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long sum() {
- return static_cast<unsigned long long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long prod() {
- return static_cast<unsigned long long>(1);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long max() {
- return static_cast<unsigned long long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long min() {
- return ULLONG_MAX;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long bor() {
- return static_cast<unsigned long long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long band() {
- return ~static_cast<unsigned long long>(0x0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long lor() {
- return static_cast<unsigned long long>(0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long land() {
- return static_cast<unsigned long long>(1);
- }
-};
-
-template <>
-struct reduction_identity<float> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static float sum() {
- return static_cast<float>(0.0f);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static float prod() {
- return static_cast<float>(1.0f);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static float max() { return -FLT_MAX; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static float min() { return FLT_MAX; }
-};
-
-template <>
-struct reduction_identity<double> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static double sum() {
- return static_cast<double>(0.0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static double prod() {
- return static_cast<double>(1.0);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static double max() { return -DBL_MAX; }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static double min() { return DBL_MAX; }
-};
-
-// No __host__ __device__ annotation because long double treated as double in
-// device code. May be revisited later if that is not true any more.
-template <>
-struct reduction_identity<long double> {
- constexpr static long double sum() { return static_cast<long double>(0.0); }
- constexpr static long double prod() { return static_cast<long double>(1.0); }
- constexpr static long double max() { return -LDBL_MAX; }
- constexpr static long double min() { return LDBL_MAX; }
-};
-
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERICTRAITS
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERICTRAITS
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_OPENMPTARGETSPACE_HPP
-#define KOKKOS_OPENMPTARGETSPACE_HPP
-
-#include <cstring>
-#include <string>
-#include <iosfwd>
-#include <typeinfo>
-
-#include <Kokkos_Core_fwd.hpp>
-
-#ifdef KOKKOS_ENABLE_OPENMPTARGET
-
-#include <OpenMPTarget/Kokkos_OpenMPTarget_Error.hpp>
-#include <Kokkos_HostSpace.hpp>
-#include <omp.h>
-
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Impl {
-
-/// \brief Initialize lock array for arbitrary size atomics.
-///
-/// Arbitrary atomics are implemented using a hash table of locks
-/// where the hash value is derived from the address of the
-/// object for which an atomic operation is performed.
-/// This function initializes the locks to zero (unset).
-// void init_lock_array_host_space();
-
-/// \brief Acquire a lock for the address
-///
-/// This function tries to acquire the lock for the hash value derived
-/// from the provided ptr. If the lock is successfully acquired the
-/// function returns true. Otherwise it returns false.
-// bool lock_address_host_space(void* ptr);
-
-/// \brief Release lock for the address
-///
-/// This function releases the lock for the hash value derived
-/// from the provided ptr. This function should only be called
-/// after previously successfully acquiring a lock with
-/// lock_address.
-// void unlock_address_host_space(void* ptr);
-
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-//----------------------------------------
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::Experimental::OpenMPTargetSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-//----------------------------------------
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
- Kokkos::HostSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-//----------------------------------------
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Experimental {
-
-/// \class OpenMPTargetSpace
-/// \brief Memory management for host memory.
-///
-/// OpenMPTargetSpace is a memory space that governs host memory. "Host"
-/// memory means the usual CPU-accessible memory.
-class OpenMPTargetSpace {
- public:
- //! Tag this class as a kokkos memory space
- using memory_space = OpenMPTargetSpace;
- using size_type = unsigned;
-
- /// \typedef execution_space
- /// \brief Default execution space for this memory space.
- ///
- /// Every memory space has a default execution space. This is
- /// useful for things like initializing a View (which happens in
- /// parallel using the View's default execution space).
- using execution_space = Kokkos::Experimental::OpenMPTarget;
-
- //! This memory space preferred device_type
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- /*--------------------------------*/
-
- /**\brief Default memory space instance */
- OpenMPTargetSpace();
- OpenMPTargetSpace(OpenMPTargetSpace&& rhs) = default;
- OpenMPTargetSpace(const OpenMPTargetSpace& rhs) = default;
- OpenMPTargetSpace& operator=(OpenMPTargetSpace&&) = default;
- OpenMPTargetSpace& operator=(const OpenMPTargetSpace&) = default;
- ~OpenMPTargetSpace() = default;
-
- /**\brief Allocate untracked memory in the space */
- void* allocate(const size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- /**\brief Deallocate untracked memory in the space */
- void deallocate(void* const arg_alloc_ptr,
- const std::size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- static constexpr const char* name() { return "OpenMPTargetSpace"; }
-
- private:
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
-
- friend class Kokkos::Impl::SharedAllocationRecord<
- Kokkos::Experimental::OpenMPTargetSpace, void>;
-};
-} // namespace Experimental
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace, void>
- : public HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenMPTargetSpace> {
- private:
- friend class HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenMPTargetSpace>;
- friend class SharedAllocationRecordCommon<
- Kokkos::Experimental::OpenMPTargetSpace>;
- friend Kokkos::Experimental::OpenMPTargetSpace;
-
- using base_t = HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenMPTargetSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
- /**\brief Root record for tracked allocations from this OpenMPTargetSpace
- * instance */
- static RecordBase s_root_record;
-
- const Kokkos::Experimental::OpenMPTargetSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
- SharedAllocationRecord() = default;
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate);
-
- public:
- KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
- const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc) {
- KOKKOS_IF_ON_HOST(
- (return new SharedAllocationRecord(arg_space, arg_label, arg_alloc);))
- KOKKOS_IF_ON_DEVICE(
- ((void)arg_space; (void)arg_label; (void)arg_alloc; return nullptr;))
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-// TODO: implement all possible deep_copies
-template <class ExecutionSpace>
-struct DeepCopy<Kokkos::Experimental::OpenMPTargetSpace,
- Kokkos::Experimental::OpenMPTargetSpace, ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- // In the Release and RelWithDebInfo builds, the size of the memcpy should
- // be greater than zero to avoid error. omp_target_memcpy returns zero on
- // success.
- if (n > 0)
- OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
- omp_get_default_device(),
- omp_get_default_device()));
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence(
- "Kokkos::Impl::DeepCopy<OpenMPTargetSpace, OpenMPTargetSpace>: fence "
- "before "
- "copy");
- if (n > 0)
- OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
- omp_get_default_device(),
- omp_get_default_device()));
- }
-};
-
-template <class ExecutionSpace>
-struct DeepCopy<Kokkos::Experimental::OpenMPTargetSpace, HostSpace,
- ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- if (n > 0)
- OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
- omp_get_default_device(),
- omp_get_initial_device()));
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence(
- "Kokkos::Impl::DeepCopy<OpenMPTargetSpace, HostSpace>: fence before "
- "copy");
- if (n > 0)
- OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
- omp_get_default_device(),
- omp_get_initial_device()));
- }
-};
-
-template <class ExecutionSpace>
-struct DeepCopy<HostSpace, Kokkos::Experimental::OpenMPTargetSpace,
- ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- if (n > 0)
- OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
- omp_get_initial_device(),
- omp_get_default_device()));
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence(
- "Kokkos::Impl::DeepCopy<HostSpace, OpenMPTargetSpace>: fence before "
- "copy");
- if (n > 0)
- OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
- omp_get_initial_device(),
- omp_get_default_device()));
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
-#endif /* #define KOKKOS_OPENMPTARGETSPACE_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-// Experimental unified task-data parallel manycore LDRD
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_IMPL_POINTEROWNERSHIP_HPP
-#define KOKKOS_IMPL_POINTEROWNERSHIP_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#include <Kokkos_Core_fwd.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-/// Trivial wrapper for raw pointers that express ownership.
-template <class T>
-using OwningRawPtr = T*;
-
-/// Trivial wrapper for raw pointers that do not express ownership.
-template <class T>
-using ObservingRawPtr = T*;
-
-} // end namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #ifndef KOKKOS_IMPL_POINTEROWNERSHIP_HPP */
+++ /dev/null
-/*
- //@HEADER
- // ************************************************************************
- //
- // Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
- //
- // Under the terms of Contract DE-NA0003525 with NTESS,
- // the U.S. Government retains certain rights in this software.
- //
- // Redistribution and use in source and binary forms, with or without
- // modification, are permitted provided that the following conditions are
- // met:
- //
- // 1. Redistributions of source code must retain the above copyright
- // notice, this list of conditions and the following disclaimer.
- //
- // 2. Redistributions in binary form must reproduce the above copyright
- // notice, this list of conditions and the following disclaimer in the
- // documentation and/or other materials provided with the distribution.
- //
- // 3. Neither the name of the Corporation nor the names of the
- // contributors may be used to endorse or promote products derived from
- // this software without specific prior written permission.
- //
- // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
- // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
- // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- //
- // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
- //
- // ************************************************************************
- //@HEADER
- */
-
-#ifndef KOKKOSP_PROFILE_SECTION_HPP
-#define KOKKOSP_PROFILE_SECTION_HPP
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_PROFILING_PROFILESECTION
-#endif
-
-#include <Kokkos_Macros.hpp>
-#include <impl/Kokkos_Profiling_Interface.hpp>
-#include <impl/Kokkos_Profiling.hpp>
-
-#include <string>
-
-namespace Kokkos {
-namespace Profiling {
-
-class ProfilingSection {
- public:
- ProfilingSection(ProfilingSection const&) = delete;
- ProfilingSection& operator=(ProfilingSection const&) = delete;
-
- ProfilingSection(const std::string& sectionName)
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- : secName(sectionName)
-#endif
- {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::createProfileSection(sectionName, &secID);
- }
- }
-
- void start() {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::startSection(secID);
- }
- }
-
- void stop() {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::stopSection(secID);
- }
- }
-
- ~ProfilingSection() {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::destroyProfileSection(secID);
- }
- }
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- KOKKOS_DEPRECATED std::string getName() { return secName; }
-
- KOKKOS_DEPRECATED uint32_t getSectionID() { return secID; }
-#endif
-
- protected:
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- const std::string secName;
-#endif
- uint32_t secID;
-};
-
-} // namespace Profiling
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PROFILING_PROFILESECTION
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_KOKKOS_RANK_HPP
-#define KOKKOS_KOKKOS_RANK_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <Kokkos_Layout.hpp> // Iterate
-
-namespace Kokkos {
-
-// Iteration Pattern
-template <unsigned N, Iterate OuterDir = Iterate::Default,
- Iterate InnerDir = Iterate::Default>
-struct Rank {
- static_assert(N != 0u, "Kokkos Error: rank 0 undefined");
- static_assert(N != 1u,
- "Kokkos Error: rank 1 is not a multi-dimensional range");
- static_assert(N < 7u, "Kokkos Error: Unsupported rank...");
-
- using iteration_pattern = Rank<N, OuterDir, InnerDir>;
-
- static constexpr int rank = N;
- static constexpr Iterate outer_direction = OuterDir;
- static constexpr Iterate inner_direction = InnerDir;
-};
-
-} // end namespace Kokkos
-
-#endif // KOKKOS_KOKKOS_RANK_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_SYCLSPACE_HPP
-#define KOKKOS_SYCLSPACE_HPP
-
-#include <Kokkos_Core_fwd.hpp>
-
-#ifdef KOKKOS_ENABLE_SYCL
-#include <Kokkos_Concepts.hpp>
-#include <Kokkos_HostSpace.hpp>
-#include <Kokkos_ScratchSpace.hpp>
-#include <SYCL/Kokkos_SYCL_Instance.hpp>
-#include <impl/Kokkos_SharedAlloc.hpp>
-#include <impl/Kokkos_Tools.hpp>
-
-namespace Kokkos {
-
-namespace Impl {
-template <typename T>
-struct is_sycl_type_space : public std::false_type {};
-} // namespace Impl
-
-namespace Experimental {
-
-class SYCLDeviceUSMSpace {
- public:
- using execution_space = SYCL;
- using memory_space = SYCLDeviceUSMSpace;
- using device_type = Kokkos::Device<execution_space, memory_space>;
- using size_type = Impl::SYCLInternal::size_type;
-
- SYCLDeviceUSMSpace();
- explicit SYCLDeviceUSMSpace(sycl::queue queue);
-
- void* allocate(const SYCL& exec_space,
- const std::size_t arg_alloc_size) const;
- void* allocate(const SYCL& exec_space, const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
- void* allocate(const std::size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- void deallocate(void* const arg_alloc_ptr,
- const std::size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- template <class, class, class, class>
- friend class LogicalMemorySpace;
-
- public:
- static constexpr const char* name() { return "SYCLDeviceUSM"; };
-
- private:
- sycl::queue m_queue;
-};
-
-class SYCLSharedUSMSpace {
- public:
- using execution_space = SYCL;
- using memory_space = SYCLSharedUSMSpace;
- using device_type = Kokkos::Device<execution_space, memory_space>;
- using size_type = Impl::SYCLInternal::size_type;
-
- SYCLSharedUSMSpace();
- explicit SYCLSharedUSMSpace(sycl::queue queue);
-
- void* allocate(const SYCL& exec_space,
- const std::size_t arg_alloc_size) const;
- void* allocate(const SYCL& exec_space, const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
- void* allocate(const std::size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- void deallocate(void* const arg_alloc_ptr,
- const std::size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- template <class, class, class, class>
- friend class LogicalMemorySpace;
-
- public:
- static constexpr const char* name() { return "SYCLSharedUSM"; };
-
- private:
- sycl::queue m_queue;
-};
-
-class SYCLHostUSMSpace {
- public:
- using execution_space = HostSpace::execution_space;
- using memory_space = SYCLHostUSMSpace;
- using device_type = Kokkos::Device<execution_space, memory_space>;
- using size_type = Impl::SYCLInternal::size_type;
-
- SYCLHostUSMSpace();
- explicit SYCLHostUSMSpace(sycl::queue queue);
-
- void* allocate(const SYCL& exec_space,
- const std::size_t arg_alloc_size) const;
- void* allocate(const SYCL& exec_space, const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
- void* allocate(const std::size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- void deallocate(void* const arg_alloc_ptr,
- const std::size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- private:
- template <class, class, class, class>
- friend class LogicalMemorySpace;
-
- public:
- static constexpr const char* name() { return "SYCLHostUSM"; };
-
- private:
- sycl::queue m_queue;
-};
-
-} // namespace Experimental
-
-namespace Impl {
-
-template <>
-struct is_sycl_type_space<Kokkos::Experimental::SYCLDeviceUSMSpace>
- : public std::true_type {};
-
-template <>
-struct is_sycl_type_space<Kokkos::Experimental::SYCLSharedUSMSpace>
- : public std::true_type {};
-
-template <>
-struct is_sycl_type_space<Kokkos::Experimental::SYCLHostUSMSpace>
- : public std::true_type {};
-
-static_assert(Kokkos::Impl::MemorySpaceAccess<
- Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace>::assignable,
- "");
-
-static_assert(Kokkos::Impl::MemorySpaceAccess<
- Kokkos::Experimental::SYCLSharedUSMSpace,
- Kokkos::Experimental::SYCLSharedUSMSpace>::assignable,
- "");
-
-static_assert(Kokkos::Impl::MemorySpaceAccess<
- Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace>::assignable,
- "");
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::Experimental::SYCLSharedUSMSpace> {
- // HostSpace::execution_space != SYCLSharedUSMSpace::execution_space
- enum : bool { assignable = false };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::Experimental::SYCLHostUSMSpace> {
- // HostSpace::execution_space ==
- // Experimental::SYCLHostUSMSpace::execution_space
- enum : bool { assignable = true };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::HostSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCLSharedUSMSpace> {
- // SYCLDeviceUSMSpace::execution_space == SYCLSharedUSMSpace::execution_space
- enum : bool { assignable = true };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCLHostUSMSpace> {
- // Experimental::SYCLDeviceUSMSpace::execution_space !=
- // Experimental::SYCLHostUSMSpace::execution_space
- enum : bool { assignable = false };
- enum : bool {
- accessible = true
- }; // Experimental::SYCLDeviceUSMSpace::execution_space
- enum : bool { deepcopy = true };
-};
-
-//----------------------------------------
-// SYCLSharedUSMSpace::execution_space == SYCL
-// SYCLSharedUSMSpace accessible to both SYCL and Host
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLSharedUSMSpace,
- Kokkos::HostSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false }; // SYCL cannot access HostSpace
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLSharedUSMSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace> {
- // SYCLSharedUSMSpace::execution_space == SYCLDeviceUSMSpace::execution_space
- // Can access SYCLSharedUSMSpace from Host but cannot access
- // SYCLDeviceUSMSpace from Host
- enum : bool { assignable = false };
-
- // SYCLSharedUSMSpace::execution_space can access SYCLDeviceUSMSpace
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLSharedUSMSpace,
- Kokkos::Experimental::SYCLHostUSMSpace> {
- // Experimental::SYCLSharedUSMSpace::execution_space !=
- // Experimental::SYCLHostUSMSpace::execution_space
- enum : bool { assignable = false };
- enum : bool {
- accessible = true
- }; // Experimental::SYCLSharedUSMSpace::execution_space
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLHostUSMSpace,
- Kokkos::HostSpace> {
- enum : bool { assignable = false }; // Cannot access from SYCL
- enum : bool {
- accessible = true
- }; // Experimental::SYCLHostUSMSpace::execution_space
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLHostUSMSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace> {
- enum : bool { assignable = false }; // Cannot access from Host
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<Kokkos::Experimental::SYCLHostUSMSpace,
- Kokkos::Experimental::SYCLSharedUSMSpace> {
- enum : bool { assignable = false }; // different execution_space
- enum : bool { accessible = true }; // same accessibility
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct MemorySpaceAccess<
- Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::ScratchMemorySpace<Kokkos::Experimental::SYCL>> {
- enum : bool { assignable = false };
- enum : bool { accessible = true };
- enum : bool { deepcopy = false };
-};
-
-} // namespace Impl
-
-namespace Impl {
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, void>
- : public HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLDeviceUSMSpace> {
- private:
- friend class SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLDeviceUSMSpace>;
- friend class HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLDeviceUSMSpace>;
- using base_t = HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLDeviceUSMSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord(SharedAllocationRecord&&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
-
-#ifdef KOKKOS_ENABLE_DEBUG
- static RecordBase s_root_record;
-#endif
-
- const Kokkos::Experimental::SYCLDeviceUSMSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::Experimental::SYCLDeviceUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCL& exec_space,
- const Kokkos::Experimental::SYCLDeviceUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCLDeviceUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-};
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace, void>
- : public SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLSharedUSMSpace> {
- private:
- friend class SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLSharedUSMSpace>;
- using base_t =
- SharedAllocationRecordCommon<Kokkos::Experimental::SYCLSharedUSMSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord(SharedAllocationRecord&&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
-
- static RecordBase s_root_record;
-
- const Kokkos::Experimental::SYCLSharedUSMSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
-
- SharedAllocationRecord() = default;
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCL& exec_space,
- const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-};
-
-template <>
-class SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace, void>
- : public SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLHostUSMSpace> {
- private:
- friend class SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLHostUSMSpace>;
- using base_t =
- SharedAllocationRecordCommon<Kokkos::Experimental::SYCLHostUSMSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord(SharedAllocationRecord&&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
-
- static RecordBase s_root_record;
-
- const Kokkos::Experimental::SYCLHostUSMSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
-
- SharedAllocationRecord() = default;
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCL& exec_space,
- const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-// For backward compatibility:
-#include <Kokkos_Macros.hpp>
-
-KOKKOS_IMPL_WARNING(
- "This file is deprecated. Use <Kokkos_TaskScheduler.hpp> instead.")
-
-#include <Kokkos_TaskScheduler.hpp>
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/// \file Kokkos_Vectorization.hpp
-/// \brief Declaration and definition of Kokkos::Vectorization interface.
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_VECTORIZATION_HPP
-#define KOKKOS_VECTORIZATION_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Cuda/Kokkos_Cuda_Vectorization.hpp>
-#elif defined(KOKKOS_ENABLE_HIP)
-#include <HIP/Kokkos_HIP_Vectorization.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-
-#include <OpenACC/Kokkos_OpenACC.hpp>
-#include <OpenACC/Kokkos_OpenACC_Instance.hpp>
-#include <impl/Kokkos_Profiling.hpp>
-#include <impl/Kokkos_ExecSpaceManager.hpp>
-
-#include <ostream>
-
-Kokkos::Experimental::OpenACC::OpenACC()
- : m_space_instance(Impl::OpenACCInternal::singleton()) {}
-
-void Kokkos::Experimental::OpenACC::impl_initialize(
- InitializationSettings const& settings) {
- Impl::OpenACCInternal::singleton()->initialize(settings);
-}
-
-void Kokkos::Experimental::OpenACC::impl_finalize() {
- Impl::OpenACCInternal::singleton()->finalize();
-}
-
-bool Kokkos::Experimental::OpenACC::impl_is_initialized() {
- return Impl::OpenACCInternal::singleton()->is_initialized();
-}
-
-void Kokkos::Experimental::OpenACC::print_configuration(std::ostream& os,
- bool verbose) const {
- os << "macro KOKKOS_ENABLE_OPENACC is defined\n"; // FIXME_OPENACC
- m_space_instance->print_configuration(os, verbose);
-}
-
-void Kokkos::Experimental::OpenACC::fence(std::string const& name) const {
- Impl::OpenACCInternal::singleton()->fence(name);
-}
-
-void Kokkos::Experimental::OpenACC::impl_static_fence(std::string const& name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::OpenACC>(
- name,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- GlobalDeviceSynchronization,
- [&]() { acc_wait_all(); });
-}
-
-uint32_t Kokkos::Experimental::OpenACC::impl_instance_id() const noexcept {
- return m_space_instance->instance_id();
-}
-
-namespace Kokkos {
-namespace Impl {
-int g_openacc_space_factory_initialized =
- initialize_space_factory<Experimental::OpenACC>("170_OpenACC");
-} // namespace Impl
-} // Namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-
-#ifndef KOKKOS_OPENACC_HPP
-#define KOKKOS_OPENACC_HPP
-
-#include <OpenACC/Kokkos_OpenACCSpace.hpp>
-#include <Kokkos_Concepts.hpp>
-#include <Kokkos_Layout.hpp>
-#include <Kokkos_ScratchSpace.hpp>
-#include <impl/Kokkos_InitializationSettings.hpp>
-#include <impl/Kokkos_Profiling_Interface.hpp>
-#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
-
-#include <openacc.h>
-
-#include <iosfwd>
-#include <string>
-
-namespace Kokkos::Experimental::Impl {
-class OpenACCInternal;
-}
-
-namespace Kokkos::Experimental {
-
-class OpenACC {
- Impl::OpenACCInternal* m_space_instance = nullptr;
-
- public:
- using execution_space = OpenACC;
- using memory_space = OpenACCSpace;
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- using array_layout = LayoutLeft;
- using size_type = memory_space::size_type;
-
- using scratch_memory_space = ScratchMemorySpace<OpenACC>;
-
- OpenACC();
-
- static void impl_initialize(InitializationSettings const& settings);
- static void impl_finalize();
- static bool impl_is_initialized();
-
- void print_configuration(std::ostream& os, bool verbose = false) const;
-
- void fence(std::string const& name =
- "Kokkos::OpenACC::fence(): Unnamed Instance Fence") const;
- static void impl_static_fence(std::string const& name);
-
- static char const* name() { return "OpenACC"; }
- static int concurrency() { return 256000; } // FIXME_OPENACC
- static bool in_parallel() { return acc_on_device(acc_device_not_host); }
- uint32_t impl_instance_id() const noexcept;
-};
-
-} // namespace Kokkos::Experimental
-
-template <>
-struct Kokkos::Tools::Experimental::DeviceTypeTraits<
- ::Kokkos::Experimental::OpenACC> {
- static constexpr DeviceType id =
- ::Kokkos::Profiling::Experimental::DeviceType::OpenACC;
- // FIXME_OPENACC: Need to return the device id from the execution space
- // instance. In fact, acc_get_device_num() will return the same value as the
- // device id from the execution space instance except for the host fallback
- // case, where the device id may need to be updated with the value of
- // acc_get_device_num().
- static int device_id(const Kokkos::Experimental::OpenACC&) {
- using Kokkos::Experimental::Impl::OpenACC_Traits;
- return acc_get_device_num(OpenACC_Traits::dev_type);
- }
-};
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-
-#include <OpenACC/Kokkos_OpenACC.hpp>
-#include <OpenACC/Kokkos_OpenACCSpace.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-#include <impl/Kokkos_Profiling_Interface.hpp>
-
-#include <openacc.h>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-void *Kokkos::Experimental::OpenACCSpace::allocate(
- const Kokkos::Experimental::OpenACC &exec_space,
- const size_t arg_alloc_size) const {
- return allocate(exec_space, "[unlabeled]", arg_alloc_size);
-}
-
-void *Kokkos::Experimental::OpenACCSpace::allocate(
- const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-
-void *Kokkos::Experimental::OpenACCSpace::allocate(
- const Kokkos::Experimental::OpenACC &exec_space, const char *arg_label,
- const size_t arg_alloc_size, const size_t arg_logical_size) const {
- return impl_allocate(exec_space, arg_label, arg_alloc_size, arg_logical_size);
-}
-
-void *Kokkos::Experimental::OpenACCSpace::allocate(
- const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-
-void *Kokkos::Experimental::OpenACCSpace::impl_allocate(
- const Kokkos::Experimental::OpenACC &exec_space, const char *arg_label,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- static_assert(sizeof(void *) == sizeof(uintptr_t),
- "Error sizeof(void*) != sizeof(uintptr_t)");
-
- void *ptr = nullptr;
-
- // FIXME_OPENACC multiple device instances are not yet supported, and thus
- // exec_space is ignored for now.
- (void)exec_space;
-
- ptr = acc_malloc(arg_alloc_size);
-
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
-
- return ptr;
-}
-
-void *Kokkos::Experimental::OpenACCSpace::impl_allocate(
- const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- static_assert(sizeof(void *) == sizeof(uintptr_t),
- "Error sizeof(void*) != sizeof(uintptr_t)");
-
- void *ptr = nullptr;
-
- //[DEBUG] Disabled due to the synchronous behavior of the current
- // implementation.
- /*
- OpenACC::impl_static_fence(
- "Kokkos::OpenACCSpace::impl_allocate: Pre OpenACC Allocation");
- */
-
- ptr = acc_malloc(arg_alloc_size);
-
- //[DEBUG] Disabled due to the synchronous behavior of the current
- // implementation.
- /*
- OpenACC::impl_static_fence(
- "Kokkos::OpenACCSpace::impl_allocate: Post OpenACC Allocation");
- */
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
-
- return ptr;
-}
-
-void Kokkos::Experimental::OpenACCSpace::deallocate(
- void *const arg_alloc_ptr, const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void Kokkos::Experimental::OpenACCSpace::deallocate(
- const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-
-void Kokkos::Experimental::OpenACCSpace::impl_deallocate(
- const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
-
- if (arg_alloc_ptr) {
- acc_free(arg_alloc_ptr);
- }
-}
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#ifdef KOKKOS_ENABLE_DEBUG
-Kokkos::Impl::SharedAllocationRecord<void, void> SharedAllocationRecord<
- Kokkos::Experimental::OpenACCSpace, void>::s_root_record;
-#endif
-
-Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace,
- void>::~SharedAllocationRecord() {
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- (SharedAllocationRecord<void, void>::m_alloc_size -
- sizeof(SharedAllocationHeader)));
-}
-
-Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::OpenACCSpace &arg_space,
- const std::string &arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace,
- void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, arg_label);
-
- Kokkos::Impl::DeepCopy<Experimental::OpenACCSpace, HostSpace>(
- RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
- Kokkos::fence(
- "SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace, "
- "void>::SharedAllocationRecord(): fence after copying header from "
- "HostSpace");
-}
-
-//==============================================================================
-// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
-
-#include <impl/Kokkos_SharedAlloc_timpl.hpp>
-
-// To avoid additional compilation cost for something that's (mostly?) not
-// performance sensitive, we explicitly instantiate these CRTP base classes
-// here, where we have access to the associated *_timpl.hpp header files.
-template class Kokkos::Impl::HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenACCSpace>;
-template class Kokkos::Impl::SharedAllocationRecordCommon<
- Kokkos::Experimental::OpenACCSpace>;
-
-// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
-//==============================================================================
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-
-#ifndef KOKKOS_OPENACC_SPACE_HPP
-#define KOKKOS_OPENACC_SPACE_HPP
-
-#include <Kokkos_Concepts.hpp>
-
-#include <impl/Kokkos_Tools.hpp>
-#include <impl/Kokkos_SharedAlloc.hpp>
-
-#include <openacc.h>
-#include <iosfwd>
-
-namespace Kokkos::Experimental {
-
-class OpenACC;
-
-class OpenACCSpace {
- public:
- using memory_space = OpenACCSpace;
- using execution_space = OpenACC;
- using device_type = Kokkos::Device<execution_space, memory_space>;
-
- using size_type = size_t;
-
- OpenACCSpace() = default;
-
- /**\brief Allocate untracked memory in the space */
- void* allocate(const Kokkos::Experimental::OpenACC& exec_space,
- const size_t arg_alloc_size) const;
- void* allocate(const Kokkos::Experimental::OpenACC& exec_space,
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
- void* allocate(const size_t arg_alloc_size) const;
- void* allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- /**\brief Deallocate untracked memory in the space */
- void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
- void deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0) const;
-
- static constexpr char const* name() { return "OpenACCSpace"; }
-
- private:
- void* impl_allocate(const Kokkos::Experimental::OpenACC& exec_space,
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
- void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size = 0,
- const Kokkos::Tools::SpaceHandle =
- Kokkos::Tools::make_space_handle(name())) const;
-};
-
-} // namespace Kokkos::Experimental
-
-/*--------------------------------------------------------------------------*/
-
-template <>
-struct Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
- Kokkos::Experimental::OpenACCSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
- Kokkos::HostSpace> {
- enum : bool { assignable = false };
- enum : bool { accessible = false };
- enum : bool { deepcopy = true };
-};
-
-template <>
-struct Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
- Kokkos::Experimental::OpenACCSpace> {
- enum : bool { assignable = true };
- enum : bool { accessible = true };
- enum : bool { deepcopy = true };
-};
-/*--------------------------------------------------------------------------*/
-
-template <>
-class Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace,
- void>
- : public HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenACCSpace> {
- private:
- friend class HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenACCSpace>;
- friend class SharedAllocationRecordCommon<Kokkos::Experimental::OpenACCSpace>;
- friend Kokkos::Experimental::OpenACCSpace;
-
- using base_t = HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenACCSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
- /**\brief Root record for tracked allocations from this OpenACCSpace
- * instance */
- static RecordBase s_root_record;
-
- const Kokkos::Experimental::OpenACCSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
- SharedAllocationRecord() = default;
-
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::Experimental::OpenACCSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate)
- : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
- arg_dealloc) {}
-
- SharedAllocationRecord(
- const Kokkos::Experimental::OpenACCSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &deallocate);
-
- public:
- KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
- const Kokkos::Experimental::OpenACCSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size) {
- if (acc_on_device(acc_device_host)) {
- return new SharedAllocationRecord(arg_space, arg_label, arg_alloc_size);
- } else {
- return nullptr;
- }
- }
-};
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-// FIXME_OPENACC: Need to update the DeepCopy implementations below to support
-// multiple execution space instances.
-// The current OpenACC backend implementation assumes that there is only one
-// device execution space instance, and all the device operations (e.g., memory
-// transfers, kernel launches, etc.) are implemented to be synchronous, which
-// does not violate the Kokkos execution semantics with the single execution
-// space instance.
-template <class ExecutionSpace>
-struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
- Kokkos::Experimental::OpenACCSpace,
- ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- // The behavior of acc_memcpy_device when bytes argument is zero is
- // clarified only in the latest OpenACC specification (V3.2), and thus the
- // value checking is added as a safeguard. (The current NVHPC (V22.5)
- // supports OpenACC V2.7.)
- if (n > 0) acc_memcpy_device(dst, const_cast<void*>(src), n);
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence();
- if (n > 0) acc_memcpy_device(dst, const_cast<void*>(src), n);
- }
-};
-
-template <class ExecutionSpace>
-struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
- Kokkos::HostSpace, ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- if (n > 0) acc_memcpy_to_device(dst, const_cast<void*>(src), n);
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence();
- if (n > 0) acc_memcpy_to_device(dst, const_cast<void*>(src), n);
- }
-};
-
-template <class ExecutionSpace>
-struct Kokkos::Impl::DeepCopy<
- Kokkos::HostSpace, Kokkos::Experimental::OpenACCSpace, ExecutionSpace> {
- DeepCopy(void* dst, const void* src, size_t n) {
- if (n > 0) acc_memcpy_from_device(dst, const_cast<void*>(src), n);
- }
- DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
- exec.fence();
- if (n > 0) acc_memcpy_from_device(dst, const_cast<void*>(src), n);
- }
-};
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-
-#include <OpenACC/Kokkos_OpenACC_Instance.hpp>
-#include <OpenACC/Kokkos_OpenACC.hpp>
-#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
-#include <impl/Kokkos_Profiling.hpp>
-#include <impl/Kokkos_DeviceManagement.hpp>
-
-#include <openacc.h>
-
-#include <iostream>
-
-namespace Kokkos {
-bool show_warnings() noexcept;
-}
-
-Kokkos::Experimental::Impl::OpenACCInternal*
-Kokkos::Experimental::Impl::OpenACCInternal::singleton() {
- static OpenACCInternal self;
- return &self;
-}
-
-void Kokkos::Experimental::Impl::OpenACCInternal::initialize(
- InitializationSettings const& settings) {
- if (OpenACC_Traits::may_fallback_to_host &&
- acc_get_num_devices(OpenACC_Traits::dev_type) == 0 &&
- !settings.has_device_id()) {
- if (show_warnings()) {
- std::cerr << "Warning: No GPU available for execution, falling back to"
- " using the host!"
- << std::endl;
- }
- acc_set_device_type(acc_device_host);
- // FIXME_OPENACC if multiple execution space instances are supported,
- // device id variable should be explicitly set to the value returned by
- // acc_get_device_num(acc_device_host).
- } else {
- using Kokkos::Impl::get_gpu;
- int const dev_num = get_gpu(settings);
- acc_set_device_num(dev_num, OpenACC_Traits::dev_type);
- }
- m_is_initialized = true;
-}
-
-void Kokkos::Experimental::Impl::OpenACCInternal::finalize() {
- m_is_initialized = false;
-}
-
-bool Kokkos::Experimental::Impl::OpenACCInternal::is_initialized() const {
- return m_is_initialized;
-}
-
-void Kokkos::Experimental::Impl::OpenACCInternal::print_configuration(
- std::ostream& os, bool /*verbose*/) const {
- os << "Using OpenACC\n"; // FIXME_OPENACC
-}
-
-void Kokkos::Experimental::Impl::OpenACCInternal::fence(
- std::string const& name) const {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::OpenACC>(
- name,
- Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id()},
- [&]() {
- //[DEBUG] disabled due to synchronous behaviors of the current
- // parallel construct implementations. acc_wait_all();
- });
-}
-
-uint32_t Kokkos::Experimental::Impl::OpenACCInternal::instance_id() const
- noexcept {
- return Kokkos::Tools::Experimental::Impl::idForInstance<
- Kokkos::Experimental::OpenACC>(reinterpret_cast<uintptr_t>(this));
-}
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENACC_INSTANCE_HPP
-#define KOKKOS_OPENACC_INSTANCE_HPP
-
-#include <impl/Kokkos_InitializationSettings.hpp>
-
-#include <cstdint>
-#include <iosfwd>
-#include <string>
-
-namespace Kokkos::Experimental::Impl {
-
-class OpenACCInternal {
- bool m_is_initialized = false;
-
- OpenACCInternal() = default;
- OpenACCInternal(const OpenACCInternal&) = default;
- OpenACCInternal& operator=(const OpenACCInternal&) = default;
-
- public:
- static OpenACCInternal* singleton();
-
- void initialize(InitializationSettings const& settings);
- void finalize();
- bool is_initialized() const;
-
- void print_configuration(std::ostream& os, bool verbose = false) const;
-
- void fence(std::string const& name) const;
-
- uint32_t instance_id() const noexcept;
-};
-
-} // namespace Kokkos::Experimental::Impl
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENACC_TRAITS_HPP
-#define KOKKOS_OPENACC_TRAITS_HPP
-
-#include <openacc.h>
-
-namespace Kokkos::Experimental::Impl {
-
-struct OpenACC_Traits {
-#if defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) || \
- defined(KOKKOS_ARCH_AMPERE)
- static constexpr acc_device_t dev_type = acc_device_nvidia;
- static constexpr bool may_fallback_to_host = false;
-#else
- static constexpr acc_device_t dev_type = acc_device_not_host;
- static constexpr bool may_fallback_to_host = true;
-#endif
-};
-
-} // namespace Kokkos::Experimental::Impl
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <cstdio>
-#include <cstdlib>
-
-#include <limits>
-#include <iostream>
-#include <vector>
-
-#include <Kokkos_Core.hpp>
-
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_CPUDiscovery.hpp>
-#include <impl/Kokkos_Tools.hpp>
-#include <impl/Kokkos_ExecSpaceManager.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-int g_openmp_hardware_max_threads = 1;
-
-thread_local int t_openmp_hardware_id = 0;
-// FIXME_OPENMP we can remove this after we remove partition_master
-thread_local OpenMPInternal *t_openmp_instance = nullptr;
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-void OpenMPInternal::validate_partition_impl(const int nthreads,
- int &num_partitions,
- int &partition_size) {
- if (nthreads == 1) {
- num_partitions = 1;
- partition_size = 1;
- } else if (num_partitions < 1 && partition_size < 1) {
- int idle = nthreads;
- for (int np = 2; np <= nthreads; ++np) {
- for (int ps = 1; ps <= nthreads / np; ++ps) {
- if (nthreads - np * ps < idle) {
- idle = nthreads - np * ps;
- num_partitions = np;
- partition_size = ps;
- }
- if (idle == 0) {
- break;
- }
- }
- }
- } else if (num_partitions < 1 && partition_size > 0) {
- if (partition_size <= nthreads) {
- num_partitions = nthreads / partition_size;
- } else {
- num_partitions = 1;
- partition_size = nthreads;
- }
- } else if (num_partitions > 0 && partition_size < 1) {
- if (num_partitions <= nthreads) {
- partition_size = nthreads / num_partitions;
- } else {
- num_partitions = nthreads;
- partition_size = 1;
- }
- } else if (num_partitions * partition_size > nthreads) {
- int idle = nthreads;
- const int NP = num_partitions;
- const int PS = partition_size;
- for (int np = NP; np > 0; --np) {
- for (int ps = PS; ps > 0; --ps) {
- if ((np * ps <= nthreads) && (nthreads - np * ps < idle)) {
- idle = nthreads - np * ps;
- num_partitions = np;
- partition_size = ps;
- }
- if (idle == 0) {
- break;
- }
- }
- }
- }
-}
-#endif
-
-void OpenMPInternal::clear_thread_data() {
- const size_t member_bytes =
- sizeof(int64_t) *
- HostThreadTeamData::align_to_int64(sizeof(HostThreadTeamData));
-
- const int old_alloc_bytes =
- m_pool[0] ? (member_bytes + m_pool[0]->scratch_bytes()) : 0;
-
- OpenMP::memory_space space;
-
-#pragma omp parallel num_threads(m_pool_size)
- {
- const int rank = omp_get_thread_num();
-
- if (nullptr != m_pool[rank]) {
- m_pool[rank]->disband_pool();
-
- space.deallocate(m_pool[rank], old_alloc_bytes);
-
- m_pool[rank] = nullptr;
- }
- }
- /* END #pragma omp parallel */
-}
-
-void OpenMPInternal::resize_thread_data(size_t pool_reduce_bytes,
- size_t team_reduce_bytes,
- size_t team_shared_bytes,
- size_t thread_local_bytes) {
- const size_t member_bytes =
- sizeof(int64_t) *
- HostThreadTeamData::align_to_int64(sizeof(HostThreadTeamData));
-
- HostThreadTeamData *root = m_pool[0];
-
- const size_t old_pool_reduce = root ? root->pool_reduce_bytes() : 0;
- const size_t old_team_reduce = root ? root->team_reduce_bytes() : 0;
- const size_t old_team_shared = root ? root->team_shared_bytes() : 0;
- const size_t old_thread_local = root ? root->thread_local_bytes() : 0;
- const size_t old_alloc_bytes =
- root ? (member_bytes + root->scratch_bytes()) : 0;
-
- // Allocate if any of the old allocation is tool small:
-
- const bool allocate = (old_pool_reduce < pool_reduce_bytes) ||
- (old_team_reduce < team_reduce_bytes) ||
- (old_team_shared < team_shared_bytes) ||
- (old_thread_local < thread_local_bytes);
-
- if (allocate) {
- if (pool_reduce_bytes < old_pool_reduce) {
- pool_reduce_bytes = old_pool_reduce;
- }
- if (team_reduce_bytes < old_team_reduce) {
- team_reduce_bytes = old_team_reduce;
- }
- if (team_shared_bytes < old_team_shared) {
- team_shared_bytes = old_team_shared;
- }
- if (thread_local_bytes < old_thread_local) {
- thread_local_bytes = old_thread_local;
- }
-
- const size_t alloc_bytes =
- member_bytes +
- HostThreadTeamData::scratch_size(pool_reduce_bytes, team_reduce_bytes,
- team_shared_bytes, thread_local_bytes);
-
- OpenMP::memory_space space;
-
- memory_fence();
-
-#pragma omp parallel num_threads(m_pool_size)
- {
- const int rank = omp_get_thread_num();
-
- if (nullptr != m_pool[rank]) {
- m_pool[rank]->disband_pool();
-
- space.deallocate(m_pool[rank], old_alloc_bytes);
- }
-
- void *ptr = nullptr;
- try {
- ptr = space.allocate(alloc_bytes);
- } catch (
- Kokkos::Experimental::RawMemoryAllocationFailure const &failure) {
- // For now, just rethrow the error message the existing way
- Kokkos::Impl::throw_runtime_exception(failure.get_error_message());
- }
-
- m_pool[rank] = new (ptr) HostThreadTeamData();
-
- m_pool[rank]->scratch_assign(((char *)ptr) + member_bytes, alloc_bytes,
- pool_reduce_bytes, team_reduce_bytes,
- team_shared_bytes, thread_local_bytes);
-
- memory_fence();
- }
- /* END #pragma omp parallel */
-
- HostThreadTeamData::organize_pool(m_pool, m_pool_size);
- }
-}
-
-OpenMPInternal &OpenMPInternal::singleton() {
- static OpenMPInternal *self = nullptr;
- if (self == nullptr) {
- self = new OpenMPInternal(get_current_max_threads());
- }
-
- return *self;
-}
-
-int OpenMPInternal::get_current_max_threads() noexcept {
- // Using omp_get_max_threads(); is problematic in conjunction with
- // Hwloc on Intel (essentially an initial call to the OpenMP runtime
- // without a parallel region before will set a process mask for a single core
- // The runtime will than bind threads for a parallel region to other cores on
- // the entering the first parallel region and make the process mask the
- // aggregate of the thread masks. The intend seems to be to make serial code
- // run fast, if you compile with OpenMP enabled but don't actually use
- // parallel regions or so static int omp_max_threads = omp_get_max_threads();
-
- int count = 0;
-#pragma omp parallel
- {
-#pragma omp atomic
- ++count;
- }
- return count;
-}
-
-void OpenMPInternal::initialize(int thread_count) {
- if (m_initialized) {
- Kokkos::abort(
- "Calling OpenMP::initialize after OpenMP::finalize is illegal\n");
- }
-
- if (omp_in_parallel()) {
- std::string msg("Kokkos::OpenMP::initialize ERROR : in parallel");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-
- {
- if (Kokkos::show_warnings() && nullptr == std::getenv("OMP_PROC_BIND")) {
- printf(
- "Kokkos::OpenMP::initialize WARNING: OMP_PROC_BIND environment "
- "variable not set\n");
- printf(
- " In general, for best performance with OpenMP 4.0 or better set "
- "OMP_PROC_BIND=spread and OMP_PLACES=threads\n");
- printf(" For best performance with OpenMP 3.1 set OMP_PROC_BIND=true\n");
- printf(" For unit testing set OMP_PROC_BIND=false\n");
- }
-
- OpenMP::memory_space space;
-
- // Before any other call to OMP query the maximum number of threads
- // and save the value for re-initialization unit testing.
-
- Impl::g_openmp_hardware_max_threads = get_current_max_threads();
-
- int process_num_threads = Impl::g_openmp_hardware_max_threads;
-
- if (Kokkos::hwloc::available()) {
- process_num_threads = Kokkos::hwloc::get_available_numa_count() *
- Kokkos::hwloc::get_available_cores_per_numa() *
- Kokkos::hwloc::get_available_threads_per_core();
- }
-
- // if thread_count < 0, use g_openmp_hardware_max_threads;
- // if thread_count == 0, set g_openmp_hardware_max_threads to
- // process_num_threads if thread_count > 0, set
- // g_openmp_hardware_max_threads to thread_count
- if (thread_count < 0) {
- thread_count = Impl::g_openmp_hardware_max_threads;
- } else if (thread_count == 0) {
- if (Impl::g_openmp_hardware_max_threads != process_num_threads) {
- Impl::g_openmp_hardware_max_threads = process_num_threads;
- omp_set_num_threads(Impl::g_openmp_hardware_max_threads);
- }
- } else {
- if (Kokkos::show_warnings() && thread_count > process_num_threads) {
- printf(
- "Kokkos::OpenMP::initialize WARNING: You are likely "
- "oversubscribing your CPU cores.\n");
- printf(" process threads available : %3d, requested thread : %3d\n",
- process_num_threads, thread_count);
- }
- Impl::g_openmp_hardware_max_threads = thread_count;
- omp_set_num_threads(Impl::g_openmp_hardware_max_threads);
- }
-
-// setup thread local
-#pragma omp parallel num_threads(Impl::g_openmp_hardware_max_threads)
- {
- Impl::t_openmp_hardware_id = omp_get_thread_num();
- Impl::SharedAllocationRecord<void, void>::tracking_enable();
- }
-
- auto &instance = OpenMPInternal::singleton();
- instance.m_pool_size = Impl::g_openmp_hardware_max_threads;
-
- // New, unified host thread team data:
- {
- size_t pool_reduce_bytes = 32 * thread_count;
- size_t team_reduce_bytes = 32 * thread_count;
- size_t team_shared_bytes = 1024 * thread_count;
- size_t thread_local_bytes = 1024;
-
- instance.resize_thread_data(pool_reduce_bytes, team_reduce_bytes,
- team_shared_bytes, thread_local_bytes);
- }
- }
-
- // Check for over-subscription
- if (Kokkos::show_warnings() &&
- (Impl::mpi_ranks_per_node() * long(thread_count) >
- Impl::processors_per_node())) {
- std::cerr << "Kokkos::OpenMP::initialize WARNING: You are likely "
- "oversubscribing your CPU cores."
- << std::endl;
- std::cerr << " Detected: "
- << Impl::processors_per_node() << " cores per node." << std::endl;
- std::cerr << " Detected: "
- << Impl::mpi_ranks_per_node() << " MPI_ranks per node."
- << std::endl;
- std::cerr << " Requested: "
- << thread_count << " threads per process." << std::endl;
- }
- // Init the array for used for arbitrarily sized atomics
- init_lock_array_host_space();
-
- m_initialized = true;
-}
-
-void OpenMPInternal::finalize() {
- if (omp_in_parallel()) {
- std::string msg("Kokkos::OpenMP::finalize ERROR ");
- if (this != &singleton()) msg.append(": not initialized");
- if (omp_in_parallel()) msg.append(": in parallel");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-
- if (this == &singleton()) {
- auto const &instance = singleton();
- // Silence Cuda Warning
- const int nthreads =
- instance.m_pool_size <= Impl::g_openmp_hardware_max_threads
- ? Impl::g_openmp_hardware_max_threads
- : instance.m_pool_size;
- (void)nthreads;
-
-#pragma omp parallel num_threads(nthreads)
- {
- Impl::t_openmp_hardware_id = 0;
- Impl::SharedAllocationRecord<void, void>::tracking_disable();
- }
-
- // allow main thread to track
- Impl::SharedAllocationRecord<void, void>::tracking_enable();
-
- Impl::g_openmp_hardware_max_threads = 1;
- }
-
- m_initialized = false;
-
- Kokkos::Profiling::finalize();
-}
-
-void OpenMPInternal::print_configuration(std::ostream &s) const {
- s << "Kokkos::OpenMP";
-
- if (m_initialized) {
- const int numa_count = 1;
- const int core_per_numa = Impl::g_openmp_hardware_max_threads;
- const int thread_per_core = 1;
-
- s << " thread_pool_topology[ " << numa_count << " x " << core_per_numa
- << " x " << thread_per_core << " ]" << std::endl;
- } else {
- s << " not initialized" << std::endl;
- }
-}
-
-bool OpenMPInternal::verify_is_initialized(const char *const label) const {
- if (!m_initialized) {
- std::cerr << "Kokkos::OpenMP " << label
- << " : ERROR OpenMP is not initialized" << std::endl;
- }
- return m_initialized;
-}
-} // namespace Impl
-
-//----------------------------------------------------------------------------
-
-OpenMP::OpenMP()
-#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
- : m_space_instance(&Impl::OpenMPInternal::singleton()) {
-}
-#else
- : m_space_instance(&Impl::OpenMPInternal::singleton(),
- [](Impl::OpenMPInternal *) {}) {
- Impl::OpenMPInternal::singleton().verify_is_initialized(
- "OpenMP instance constructor");
-}
-#endif
-
-int OpenMP::impl_get_current_max_threads() noexcept {
- return Impl::OpenMPInternal::get_current_max_threads();
-}
-
-void OpenMP::impl_initialize(InitializationSettings const &settings) {
- Impl::OpenMPInternal::singleton().initialize(
- settings.has_num_threads() ? settings.get_num_threads() : -1);
-}
-
-void OpenMP::impl_finalize() { Impl::OpenMPInternal::singleton().finalize(); }
-
-void OpenMP::print_configuration(std::ostream &os, bool /*verbose*/) const {
- os << "Host Parallel Execution Space:\n";
- os << " KOKKOS_ENABLE_OPENMP: yes\n";
-
- os << "OpenMP Atomics:\n";
- os << " KOKKOS_ENABLE_OPENMP_ATOMICS: ";
-#ifdef KOKKOS_ENABLE_OPENMP_ATOMICS
- os << "yes\n";
-#else
- os << "no\n";
-#endif
-
- os << "\nOpenMP Runtime Configuration:\n";
-
- m_space_instance->print_configuration(os);
-}
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-std::vector<OpenMP> OpenMP::partition(...) { return std::vector<OpenMP>(1); }
-
-OpenMP OpenMP::create_instance(...) { return OpenMP(); }
-#endif
-
-int OpenMP::concurrency() { return Impl::g_openmp_hardware_max_threads; }
-
-void OpenMP::fence(const std::string &name) const {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::OpenMP>(
- name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1}, []() {});
-}
-
-namespace Impl {
-
-int g_openmp_space_factory_initialized =
- initialize_space_factory<OpenMP>("050_OpenMP");
-
-} // namespace Impl
-
-#ifdef KOKKOS_ENABLE_CXX14
-namespace Tools {
-namespace Experimental {
-constexpr DeviceType DeviceTypeTraits<OpenMP>::id;
-}
-} // namespace Tools
-#endif
-
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMP_INSTANCE_HPP
-#define KOKKOS_OPENMP_INSTANCE_HPP
-
-#include <Kokkos_Macros.hpp>
-#if !defined(_OPENMP) && !defined(__CUDA_ARCH__) && \
- !defined(__HIP_DEVICE_COMPILE__) && !defined(__SYCL_DEVICE_ONLY__)
-#error \
- "You enabled Kokkos OpenMP support without enabling OpenMP in the compiler!"
-#endif
-
-#include <Kokkos_OpenMP.hpp>
-
-#include <impl/Kokkos_Traits.hpp>
-#include <impl/Kokkos_HostThreadTeam.hpp>
-
-#include <Kokkos_Atomic.hpp>
-
-#include <Kokkos_UniqueToken.hpp>
-#include <impl/Kokkos_ConcurrentBitset.hpp>
-
-#include <omp.h>
-
-namespace Kokkos {
-namespace Impl {
-
-class OpenMPInternal;
-
-extern int g_openmp_hardware_max_threads;
-
-extern thread_local int t_openmp_hardware_id;
-// FIXME_OPENMP we can remove this after we remove partition_master
-extern thread_local OpenMPInternal* t_openmp_instance;
-
-struct OpenMPTraits {
- static int constexpr MAX_THREAD_COUNT = 512;
-};
-
-class OpenMPInternal {
- private:
- OpenMPInternal(int arg_pool_size)
- : m_pool_size{arg_pool_size}, m_level{omp_get_level()}, m_pool() {}
-
- ~OpenMPInternal() { clear_thread_data(); }
-
- static int get_current_max_threads() noexcept;
-
- bool m_initialized = false;
-
- int m_pool_size;
- int m_level;
-
- HostThreadTeamData* m_pool[OpenMPTraits::MAX_THREAD_COUNT];
-
- public:
- friend class Kokkos::OpenMP;
-
- static OpenMPInternal& singleton();
-
- void initialize(int thread_cound);
-
- void finalize();
-
- void clear_thread_data();
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- KOKKOS_DEPRECATED static void validate_partition(const int nthreads,
- int& num_partitions,
- int& partition_size) {
- validate_partition_impl(nthreads, num_partitions, partition_size);
- }
- static void validate_partition_impl(const int nthreads, int& num_partitions,
- int& partition_size);
-#endif
-
- void resize_thread_data(size_t pool_reduce_bytes, size_t team_reduce_bytes,
- size_t team_shared_bytes, size_t thread_local_bytes);
-
- HostThreadTeamData* get_thread_data() const noexcept {
- return m_pool[m_level == omp_get_level() ? 0 : omp_get_thread_num()];
- }
-
- HostThreadTeamData* get_thread_data(int i) const noexcept {
- return m_pool[i];
- }
-
- bool is_initialized() const { return m_initialized; }
-
- bool verify_is_initialized(const char* const label) const;
-
- void print_configuration(std::ostream& s) const;
-};
-
-} // namespace Impl
-inline bool OpenMP::impl_is_initialized() noexcept {
- return Impl::OpenMPInternal::singleton().is_initialized();
-}
-
-inline bool OpenMP::in_parallel(OpenMP const&) noexcept {
- // FIXME_OPENMP We are forced to use t_openmp_instance because the function is
- // static and does not use the OpenMP object
- return ((Impl::OpenMPInternal::singleton().m_level < omp_get_level()) &&
- (!Impl::t_openmp_instance ||
- Impl::t_openmp_instance->m_level < omp_get_level()));
-}
-
-inline int OpenMP::impl_thread_pool_size() noexcept {
- // FIXME_OPENMP We are forced to use t_openmp_instance because the function is
- // static
- return OpenMP::in_parallel()
- ? omp_get_num_threads()
- : (Impl::t_openmp_instance
- ? Impl::t_openmp_instance->m_pool_size
- : Impl::OpenMPInternal::singleton().m_pool_size);
-}
-
-KOKKOS_INLINE_FUNCTION
-int OpenMP::impl_thread_pool_rank() noexcept {
- // FIXME_OPENMP We are forced to use t_openmp_instance because the function is
- // static
- KOKKOS_IF_ON_HOST(
- (return Impl::t_openmp_instance ? 0 : omp_get_thread_num();))
-
- KOKKOS_IF_ON_DEVICE((return -1;))
-}
-
-inline void OpenMP::impl_static_fence(std::string const& name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::OpenMP>(
- name,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- GlobalDeviceSynchronization,
- []() {});
-}
-
-inline bool OpenMP::is_asynchronous(OpenMP const& /*instance*/) noexcept {
- return false;
-}
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <typename F>
-KOKKOS_DEPRECATED void OpenMP::partition_master(F const& f, int num_partitions,
- int partition_size) {
-#if _OPENMP >= 201511
- if (omp_get_max_active_levels() > 1) {
-#else
- if (omp_get_nested()) {
-#endif
- using Exec = Impl::OpenMPInternal;
-
- Exec* prev_instance = &Impl::OpenMPInternal::singleton();
-
- Exec::validate_partition_impl(prev_instance->m_pool_size, num_partitions,
- partition_size);
-
- OpenMP::memory_space space;
-
-#pragma omp parallel num_threads(num_partitions)
- {
- Exec thread_local_instance(partition_size);
- Impl::t_openmp_instance = &thread_local_instance;
-
- size_t pool_reduce_bytes = 32 * partition_size;
- size_t team_reduce_bytes = 32 * partition_size;
- size_t team_shared_bytes = 1024 * partition_size;
- size_t thread_local_bytes = 1024;
-
- thread_local_instance.resize_thread_data(
- pool_reduce_bytes, team_reduce_bytes, team_shared_bytes,
- thread_local_bytes);
-
- omp_set_num_threads(partition_size);
- f(omp_get_thread_num(), omp_get_num_threads());
- Impl::t_openmp_instance = nullptr;
- }
- } else {
- // nested openmp not enabled
- f(0, 1);
- }
-}
-#endif
-
-namespace Experimental {
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <>
-class MasterLock<OpenMP> {
- public:
- void lock() { omp_set_lock(&m_lock); }
- void unlock() { omp_unset_lock(&m_lock); }
- bool try_lock() { return static_cast<bool>(omp_test_lock(&m_lock)); }
-
- KOKKOS_DEPRECATED MasterLock() { omp_init_lock(&m_lock); }
- ~MasterLock() { omp_destroy_lock(&m_lock); }
-
- MasterLock(MasterLock const&) = delete;
- MasterLock(MasterLock&&) = delete;
- MasterLock& operator=(MasterLock const&) = delete;
- MasterLock& operator=(MasterLock&&) = delete;
-
- private:
- omp_lock_t m_lock;
-};
-#endif
-
-template <>
-class UniqueToken<OpenMP, UniqueTokenScope::Instance> {
- private:
- using buffer_type = Kokkos::View<uint32_t*, Kokkos::HostSpace>;
- int m_count;
- buffer_type m_buffer_view;
- uint32_t volatile* m_buffer;
-
- public:
- using execution_space = OpenMP;
- using size_type = int;
-
- /// \brief create object size for concurrency on the given instance
- ///
- /// This object should not be shared between instances
- UniqueToken(execution_space const& = execution_space()) noexcept
- : m_count(::Kokkos::OpenMP::impl_thread_pool_size()),
- m_buffer_view(buffer_type()),
- m_buffer(nullptr) {}
-
- UniqueToken(size_type max_size, execution_space const& = execution_space())
- : m_count(max_size),
- m_buffer_view("UniqueToken::m_buffer_view",
- ::Kokkos::Impl::concurrent_bitset::buffer_bound(m_count)),
- m_buffer(m_buffer_view.data()) {}
-
- /// \brief upper bound for acquired values, i.e. 0 <= value < size()
- KOKKOS_INLINE_FUNCTION
- int size() const noexcept {
- KOKKOS_IF_ON_HOST((return m_count;))
-
- KOKKOS_IF_ON_DEVICE((return 0;))
- }
-
- /// \brief acquire value such that 0 <= value < size()
- KOKKOS_INLINE_FUNCTION
- int acquire() const noexcept {
- KOKKOS_IF_ON_HOST(
- (if (m_count >= ::Kokkos::OpenMP::impl_thread_pool_size()) return ::
- Kokkos::OpenMP::impl_thread_pool_rank();
- const ::Kokkos::pair<int, int> result =
- ::Kokkos::Impl::concurrent_bitset::acquire_bounded(
- m_buffer, m_count, ::Kokkos::Impl::clock_tic() % m_count);
-
- if (result.first < 0) {
- ::Kokkos::abort(
- "UniqueToken<OpenMP> failure to acquire tokens, no tokens "
- "available");
- }
-
- return result.first;))
-
- KOKKOS_IF_ON_DEVICE((return 0;))
- }
-
- /// \brief release a value acquired by generate
- KOKKOS_INLINE_FUNCTION
- void release(int i) const noexcept {
- KOKKOS_IF_ON_HOST(
- (if (m_count < ::Kokkos::OpenMP::impl_thread_pool_size()) {
- ::Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
- }))
-
- KOKKOS_IF_ON_DEVICE(((void)i;))
- }
-};
-
-template <>
-class UniqueToken<OpenMP, UniqueTokenScope::Global> {
- public:
- using execution_space = OpenMP;
- using size_type = int;
-
- /// \brief create object size for concurrency on the given instance
- ///
- /// This object should not be shared between instances
- UniqueToken(execution_space const& = execution_space()) noexcept {}
-
- /// \brief upper bound for acquired values, i.e. 0 <= value < size()
- KOKKOS_INLINE_FUNCTION
- int size() const noexcept {
- KOKKOS_IF_ON_HOST((return Kokkos::Impl::g_openmp_hardware_max_threads;))
-
- KOKKOS_IF_ON_DEVICE((return 0;))
- }
-
- /// \brief acquire value such that 0 <= value < size()
- KOKKOS_INLINE_FUNCTION
- int acquire() const noexcept {
- KOKKOS_IF_ON_HOST((return Kokkos::Impl::t_openmp_hardware_id;))
-
- KOKKOS_IF_ON_DEVICE((return 0;))
- }
-
- /// \brief release a value acquired by generate
- KOKKOS_INLINE_FUNCTION
- void release(int) const noexcept {}
-};
-
-} // namespace Experimental
-
-inline int OpenMP::impl_thread_pool_size(int depth) {
- return depth < 2 ? impl_thread_pool_size() : 1;
-}
-
-KOKKOS_INLINE_FUNCTION
-int OpenMP::impl_hardware_thread_id() noexcept {
- KOKKOS_IF_ON_HOST((return Impl::t_openmp_hardware_id;))
-
- KOKKOS_IF_ON_DEVICE((return -1;))
-}
-
-inline int OpenMP::impl_max_hardware_threads() noexcept {
- return Impl::g_openmp_hardware_max_threads;
-}
-
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMP_PARALLEL_HPP
-#define KOKKOS_OPENMP_PARALLEL_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_OPENMP)
-
-#include <omp.h>
-#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
-
-#include <KokkosExp_MDRangePolicy.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#define KOKKOS_PRAGMA_IVDEP_IF_ENABLED
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#undef KOKKOS_PRAGMA_IVDEP_IF_ENABLED
-#define KOKKOS_PRAGMA_IVDEP_IF_ENABLED _Pragma("ivdep")
-#endif
-
-#ifndef KOKKOS_COMPILER_NVHPC
-#define KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE , m_policy.chunk_size()
-#else
-#define KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>, Kokkos::OpenMP> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const Policy m_policy;
-
- inline static void exec_range(const FunctorType& functor, const Member ibeg,
- const Member iend) {
- KOKKOS_PRAGMA_IVDEP_IF_ENABLED
- for (auto iwork = ibeg; iwork < iend; ++iwork) {
- exec_work(functor, iwork);
- }
- }
-
- template <class Enable = WorkTag>
- inline static std::enable_if_t<std::is_void<WorkTag>::value &&
- std::is_same<Enable, WorkTag>::value>
- exec_work(const FunctorType& functor, const Member iwork) {
- functor(iwork);
- }
-
- template <class Enable = WorkTag>
- inline static std::enable_if_t<!std::is_void<WorkTag>::value &&
- std::is_same<Enable, WorkTag>::value>
- exec_work(const FunctorType& functor, const Member iwork) {
- functor(WorkTag{}, iwork);
- }
-
- template <class Policy>
- std::enable_if_t<std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value>
- execute_parallel() const {
- // prevent bug in NVHPC 21.9/CUDA 11.4 (entering zero iterations loop)
- if (m_policy.begin() >= m_policy.end()) return;
-#pragma omp parallel for schedule(dynamic KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
- num_threads(OpenMP::impl_thread_pool_size())
- KOKKOS_PRAGMA_IVDEP_IF_ENABLED
- for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
- exec_work(m_functor, iwork);
- }
- }
-
- template <class Policy>
- std::enable_if_t<!std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value>
- execute_parallel() const {
-#pragma omp parallel for schedule(static KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
- num_threads(OpenMP::impl_thread_pool_size())
- KOKKOS_PRAGMA_IVDEP_IF_ENABLED
- for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
- exec_work(m_functor, iwork);
- }
- }
-
- public:
- inline void execute() const {
- if (OpenMP::in_parallel()) {
- exec_range(m_functor, m_policy.begin(), m_policy.end());
- return;
- }
-
-#ifndef KOKKOS_INTERNAL_DISABLE_NATIVE_OPENMP
- execute_parallel<Policy>();
-#else
- constexpr bool is_dynamic =
- std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value;
-#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
-
- data.set_work_partition(m_policy.end() - m_policy.begin(),
- m_policy.chunk_size());
-
- if (is_dynamic) {
- // Make sure work partition is set before stealing
- if (data.pool_rendezvous()) data.pool_rendezvous_release();
- }
-
- std::pair<int64_t, int64_t> range(0, 0);
-
- do {
- range = is_dynamic ? data.get_work_stealing_chunk()
- : data.get_work_partition();
-
- exec_range(m_functor, range.first + m_policy.begin(),
- range.second + m_policy.begin());
-
- } while (is_dynamic && 0 <= range.first);
- }
-#endif
- }
-
- inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
- : m_instance(nullptr), m_functor(arg_functor), m_policy(arg_policy) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- }
-};
-
-// MDRangePolicy impl
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::OpenMP> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
- using WorkTag = typename MDRangePolicy::work_tag;
-
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using iterate_type = typename Kokkos::Impl::HostIterateTile<
- MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy; // construct as RangePolicy( 0, num_tiles
- // ).set_chunk_size(1) in ctor
-
- inline static void exec_range(const MDRangePolicy& mdr_policy,
- const FunctorType& functor, const Member ibeg,
- const Member iend) {
- KOKKOS_PRAGMA_IVDEP_IF_ENABLED
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- iterate_type(mdr_policy, functor)(iwork);
- }
- }
-
- template <class Policy>
- typename std::enable_if_t<std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value>
- execute_parallel() const {
-#pragma omp parallel for schedule(dynamic KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
- num_threads(OpenMP::impl_thread_pool_size())
- KOKKOS_PRAGMA_IVDEP_IF_ENABLED
- for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
- iterate_type(m_mdr_policy, m_functor)(iwork);
- }
- }
-
- template <class Policy>
- typename std::enable_if<!std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value>::type
- execute_parallel() const {
-#pragma omp parallel for schedule(static KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
- num_threads(OpenMP::impl_thread_pool_size())
- KOKKOS_PRAGMA_IVDEP_IF_ENABLED
- for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
- iterate_type(m_mdr_policy, m_functor)(iwork);
- }
- }
-
- public:
- inline void execute() const {
- if (OpenMP::in_parallel()) {
- ParallelFor::exec_range(m_mdr_policy, m_functor, m_policy.begin(),
- m_policy.end());
- return;
- }
-
-#ifndef KOKKOS_INTERNAL_DISABLE_NATIVE_OPENMP
- execute_parallel<Policy>();
-#else
- constexpr bool is_dynamic =
- std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value;
-
-#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
-
- data.set_work_partition(m_policy.end() - m_policy.begin(),
- m_policy.chunk_size());
-
- if (is_dynamic) {
- // Make sure work partition is set before stealing
- if (data.pool_rendezvous()) data.pool_rendezvous_release();
- }
-
- std::pair<int64_t, int64_t> range(0, 0);
-
- do {
- range = is_dynamic ? data.get_work_stealing_chunk()
- : data.get_work_partition();
-
- ParallelFor::exec_range(m_mdr_policy, m_functor,
- range.first + m_policy.begin(),
- range.second + m_policy.begin());
-
- } while (is_dynamic && 0 <= range.first);
- }
- // END #pragma omp parallel
-#endif
- }
-
- inline ParallelFor(const FunctorType& arg_functor, MDRangePolicy arg_policy)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- }
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
- Kokkos::OpenMP> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
- void>;
-
- // Static Assert WorkTag void if ReducerType not InvalidType
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const FunctorType& functor, const Member ibeg, const Member iend,
- reference_type update) {
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- functor(iwork, update);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const FunctorType& functor, const Member ibeg, const Member iend,
- reference_type update) {
- const TagType t{};
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- functor(t, iwork, update);
- }
- }
-
- public:
- inline void execute() const {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- if (m_policy.end() <= m_policy.begin()) {
- if (m_result_ptr) {
- final_reducer.init(m_result_ptr);
- final_reducer.final(m_result_ptr);
- }
- return;
- }
- enum {
- is_dynamic = std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value
- };
-
- const size_t pool_reduce_bytes =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
-
- m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
- ,
- 0 // team_shared_bytes
- ,
- 0 // thread_local_bytes
- );
-
- const int pool_size = OpenMP::impl_thread_pool_size();
-#pragma omp parallel num_threads(pool_size)
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
-
- data.set_work_partition(m_policy.end() - m_policy.begin(),
- m_policy.chunk_size());
-
- if (is_dynamic) {
- // Make sure work partition is set before stealing
- if (data.pool_rendezvous()) data.pool_rendezvous_release();
- }
-
- reference_type update = final_reducer.init(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()));
-
- std::pair<int64_t, int64_t> range(0, 0);
-
- do {
- range = is_dynamic ? data.get_work_stealing_chunk()
- : data.get_work_partition();
-
- ParallelReduce::template exec_range<WorkTag>(
- m_functor, range.first + m_policy.begin(),
- range.second + m_policy.begin(), update);
-
- } while (is_dynamic && 0 <= range.first);
- }
-
- // Reduction:
-
- const pointer_type ptr =
- pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
-
- for (int i = 1; i < pool_size; ++i) {
- final_reducer.join(
- ptr, reinterpret_cast<pointer_type>(
- m_instance->get_thread_data(i)->pool_reduce_local()));
- }
-
- final_reducer.final(ptr);
-
- if (m_result_ptr) {
- const int n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
-
- for (int j = 0; j < n; ++j) {
- m_result_ptr[j] = ptr[j];
- }
- }
- }
-
- //----------------------------------------
-
- template <class ViewType>
- inline ParallelReduce(
- const FunctorType& arg_functor, Policy arg_policy,
- const ViewType& arg_view,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_view.data()) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-
- inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
- const ReducerType& reducer)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-};
-
-// MDRangePolicy impl
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::OpenMP> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
-
- using WorkTag = typename MDRangePolicy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
- void>;
-
- using Analysis = FunctorAnalysis<FunctorPatternInterface::REDUCE,
- MDRangePolicy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
-
- using iterate_type =
- typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
- WorkTag, reference_type>;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy; // construct as RangePolicy( 0, num_tiles
- // ).set_chunk_size(1) in ctor
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
-
- inline static void exec_range(const MDRangePolicy& mdr_policy,
- const FunctorType& functor, const Member ibeg,
- const Member iend, reference_type update) {
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- iterate_type(mdr_policy, functor, update)(iwork);
- }
- }
-
- public:
- inline void execute() const {
- enum {
- is_dynamic = std::is_same<typename Policy::schedule_type::type,
- Kokkos::Dynamic>::value
- };
-
- const size_t pool_reduce_bytes =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
-
- m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
- ,
- 0 // team_shared_bytes
- ,
- 0 // thread_local_bytes
- );
-
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- const int pool_size = OpenMP::impl_thread_pool_size();
-#pragma omp parallel num_threads(pool_size)
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
-
- data.set_work_partition(m_policy.end() - m_policy.begin(),
- m_policy.chunk_size());
-
- if (is_dynamic) {
- // Make sure work partition is set before stealing
- if (data.pool_rendezvous()) data.pool_rendezvous_release();
- }
-
- reference_type update = final_reducer.init(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()));
-
- std::pair<int64_t, int64_t> range(0, 0);
-
- do {
- range = is_dynamic ? data.get_work_stealing_chunk()
- : data.get_work_partition();
-
- ParallelReduce::exec_range(m_mdr_policy, m_functor,
- range.first + m_policy.begin(),
- range.second + m_policy.begin(), update);
-
- } while (is_dynamic && 0 <= range.first);
- }
- // END #pragma omp parallel
-
- // Reduction:
-
- const pointer_type ptr =
- pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
-
- for (int i = 1; i < pool_size; ++i) {
- final_reducer.join(
- ptr, reinterpret_cast<pointer_type>(
- m_instance->get_thread_data(i)->pool_reduce_local()));
- }
-
- final_reducer.final(ptr);
-
- if (m_result_ptr) {
- const int n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
-
- for (int j = 0; j < n; ++j) {
- m_result_ptr[j] = ptr[j];
- }
- }
- }
-
- //----------------------------------------
-
- template <class ViewType>
- inline ParallelReduce(
- const FunctorType& arg_functor, MDRangePolicy arg_policy,
- const ViewType& arg_view,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(InvalidType()),
- m_result_ptr(arg_view.data()) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-
- inline ParallelReduce(const FunctorType& arg_functor,
- MDRangePolicy arg_policy, const ReducerType& reducer)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::OpenMP> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
-
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const Policy m_policy;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const FunctorType& functor, const Member ibeg, const Member iend,
- reference_type update, const bool final) {
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- functor(iwork, update, final);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const FunctorType& functor, const Member ibeg, const Member iend,
- reference_type update, const bool final) {
- const TagType t{};
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- functor(t, iwork, update, final);
- }
- }
-
- public:
- inline void execute() const {
- const int value_count = Analysis::value_count(m_functor);
- const size_t pool_reduce_bytes = 2 * Analysis::value_size(m_functor);
-
- m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
- ,
- 0 // team_shared_bytes
- ,
- 0 // thread_local_bytes
- );
-
-#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
- typename Analysis::Reducer final_reducer(&m_functor);
-
- const WorkRange range(m_policy, omp_get_thread_num(),
- omp_get_num_threads());
-
- reference_type update_sum = final_reducer.init(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()));
-
- ParallelScan::template exec_range<WorkTag>(
- m_functor, range.begin(), range.end(), update_sum, false);
-
- if (data.pool_rendezvous()) {
- pointer_type ptr_prev = nullptr;
-
- const int n = omp_get_num_threads();
-
- for (int i = 0; i < n; ++i) {
- pointer_type ptr =
- (pointer_type)data.pool_member(i)->pool_reduce_local();
-
- if (i) {
- for (int j = 0; j < value_count; ++j) {
- ptr[j + value_count] = ptr_prev[j + value_count];
- }
- final_reducer.join(ptr + value_count, ptr_prev);
- } else {
- final_reducer.init(ptr + value_count);
- }
-
- ptr_prev = ptr;
- }
-
- data.pool_rendezvous_release();
- }
-
- reference_type update_base = final_reducer.reference(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()) +
- value_count);
-
- ParallelScan::template exec_range<WorkTag>(
- m_functor, range.begin(), range.end(), update_base, true);
- }
- }
-
- //----------------------------------------
-
- inline ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_instance(nullptr), m_functor(arg_functor), m_policy(arg_policy) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- }
-
- //----------------------------------------
-};
-
-template <class FunctorType, class ReturnType, class... Traits>
-class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
- ReturnType, Kokkos::OpenMP> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
-
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const Policy m_policy;
- ReturnType& m_returnvalue;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const FunctorType& functor, const Member ibeg, const Member iend,
- reference_type update, const bool final) {
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- functor(iwork, update, final);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const FunctorType& functor, const Member ibeg, const Member iend,
- reference_type update, const bool final) {
- const TagType t{};
- for (Member iwork = ibeg; iwork < iend; ++iwork) {
- functor(t, iwork, update, final);
- }
- }
-
- public:
- inline void execute() const {
- const int value_count = Analysis::value_count(m_functor);
- const size_t pool_reduce_bytes = 2 * Analysis::value_size(m_functor);
-
- m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
- ,
- 0 // team_shared_bytes
- ,
- 0 // thread_local_bytes
- );
-
-#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
- typename Analysis::Reducer final_reducer(&m_functor);
-
- const WorkRange range(m_policy, omp_get_thread_num(),
- omp_get_num_threads());
- reference_type update_sum = final_reducer.init(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()));
-
- ParallelScanWithTotal::template exec_range<WorkTag>(
- m_functor, range.begin(), range.end(), update_sum, false);
-
- if (data.pool_rendezvous()) {
- pointer_type ptr_prev = nullptr;
-
- const int n = omp_get_num_threads();
-
- for (int i = 0; i < n; ++i) {
- pointer_type ptr =
- (pointer_type)data.pool_member(i)->pool_reduce_local();
-
- if (i) {
- for (int j = 0; j < value_count; ++j) {
- ptr[j + value_count] = ptr_prev[j + value_count];
- }
- final_reducer.join(ptr + value_count, ptr_prev);
- } else {
- final_reducer.init(ptr + value_count);
- }
-
- ptr_prev = ptr;
- }
-
- data.pool_rendezvous_release();
- }
-
- reference_type update_base = final_reducer.reference(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()) +
- value_count);
-
- ParallelScanWithTotal::template exec_range<WorkTag>(
- m_functor, range.begin(), range.end(), update_base, true);
-
- if (omp_get_thread_num() == omp_get_num_threads() - 1) {
- m_returnvalue = update_base;
- }
- }
- }
-
- //----------------------------------------
-
- inline ParallelScanWithTotal(const FunctorType& arg_functor,
- const Policy& arg_policy,
- ReturnType& arg_returnvalue)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_returnvalue(arg_returnvalue) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- }
-
- //----------------------------------------
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Properties>
-class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
- Kokkos::OpenMP> {
- private:
- enum { TEAM_REDUCE_SIZE = 512 };
-
- using Policy =
- Kokkos::Impl::TeamPolicyInternal<Kokkos::OpenMP, Properties...>;
- using WorkTag = typename Policy::work_tag;
- using SchedTag = typename Policy::schedule_type::type;
- using Member = typename Policy::member_type;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const Policy m_policy;
- const size_t m_shmem_size;
-
- template <class TagType>
- inline static std::enable_if_t<(std::is_void<TagType>::value)> exec_team(
- const FunctorType& functor, HostThreadTeamData& data,
- const int league_rank_begin, const int league_rank_end,
- const int league_size) {
- for (int r = league_rank_begin; r < league_rank_end;) {
- functor(Member(data, r, league_size));
-
- if (++r < league_rank_end) {
- // Don't allow team members to lap one another
- // so that they don't overwrite shared memory.
- if (data.team_rendezvous()) {
- data.team_rendezvous_release();
- }
- }
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<(!std::is_void<TagType>::value)> exec_team(
- const FunctorType& functor, HostThreadTeamData& data,
- const int league_rank_begin, const int league_rank_end,
- const int league_size) {
- const TagType t{};
-
- for (int r = league_rank_begin; r < league_rank_end;) {
- functor(t, Member(data, r, league_size));
-
- if (++r < league_rank_end) {
- // Don't allow team members to lap one another
- // so that they don't overwrite shared memory.
- if (data.team_rendezvous()) {
- data.team_rendezvous_release();
- }
- }
- }
- }
-
- public:
- inline void execute() const {
- enum { is_dynamic = std::is_same<SchedTag, Kokkos::Dynamic>::value };
-
- const size_t pool_reduce_size = 0; // Never shrinks
- const size_t team_reduce_size = TEAM_REDUCE_SIZE * m_policy.team_size();
- const size_t team_shared_size = m_shmem_size;
- const size_t thread_local_size = 0; // Never shrinks
-
- m_instance->resize_thread_data(pool_reduce_size, team_reduce_size,
- team_shared_size, thread_local_size);
-
-#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
-
- const int active = data.organize_team(m_policy.team_size());
-
- if (active) {
- data.set_work_partition(
- m_policy.league_size(),
- (0 < m_policy.chunk_size() ? m_policy.chunk_size()
- : m_policy.team_iter()));
- }
-
- if (is_dynamic) {
- // Must synchronize to make sure each team has set its
- // partition before beginning the work stealing loop.
- if (data.pool_rendezvous()) data.pool_rendezvous_release();
- }
-
- if (active) {
- std::pair<int64_t, int64_t> range(0, 0);
-
- do {
- range = is_dynamic ? data.get_work_stealing_chunk()
- : data.get_work_partition();
-
- ParallelFor::template exec_team<WorkTag>(m_functor, data, range.first,
- range.second,
- m_policy.league_size());
-
- } while (is_dynamic && 0 <= range.first);
- }
-
- data.disband_team();
- }
- }
-
- inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- }
-};
-
-//----------------------------------------------------------------------------
-
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::OpenMP> {
- private:
- enum { TEAM_REDUCE_SIZE = 512 };
-
- using Policy =
- Kokkos::Impl::TeamPolicyInternal<Kokkos::OpenMP, Properties...>;
-
- using WorkTag = typename Policy::work_tag;
- using SchedTag = typename Policy::schedule_type::type;
- using Member = typename Policy::member_type;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
-
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
- void>;
-
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- OpenMPInternal* m_instance;
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const int m_shmem_size;
-
- template <class TagType>
- inline static std::enable_if_t<(std::is_void<TagType>::value)> exec_team(
- const FunctorType& functor, HostThreadTeamData& data,
- reference_type& update, const int league_rank_begin,
- const int league_rank_end, const int league_size) {
- for (int r = league_rank_begin; r < league_rank_end;) {
- functor(Member(data, r, league_size), update);
-
- if (++r < league_rank_end) {
- // Don't allow team members to lap one another
- // so that they don't overwrite shared memory.
- if (data.team_rendezvous()) {
- data.team_rendezvous_release();
- }
- }
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<(!std::is_void<TagType>::value)> exec_team(
- const FunctorType& functor, HostThreadTeamData& data,
- reference_type& update, const int league_rank_begin,
- const int league_rank_end, const int league_size) {
- const TagType t{};
-
- for (int r = league_rank_begin; r < league_rank_end;) {
- functor(t, Member(data, r, league_size), update);
-
- if (++r < league_rank_end) {
- // Don't allow team members to lap one another
- // so that they don't overwrite shared memory.
- if (data.team_rendezvous()) {
- data.team_rendezvous_release();
- }
- }
- }
- }
-
- public:
- inline void execute() const {
- enum { is_dynamic = std::is_same<SchedTag, Kokkos::Dynamic>::value };
-
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- if (m_policy.league_size() == 0 || m_policy.team_size() == 0) {
- if (m_result_ptr) {
- final_reducer.init(m_result_ptr);
- final_reducer.final(m_result_ptr);
- }
- return;
- }
-
- const size_t pool_reduce_size =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
-
- const size_t team_reduce_size = TEAM_REDUCE_SIZE * m_policy.team_size();
- const size_t team_shared_size = m_shmem_size + m_policy.scratch_size(1);
- const size_t thread_local_size = 0; // Never shrinks
-
- m_instance->resize_thread_data(pool_reduce_size, team_reduce_size,
- team_shared_size, thread_local_size);
-
- const int pool_size = OpenMP::impl_thread_pool_size();
-#pragma omp parallel num_threads(pool_size)
- {
- HostThreadTeamData& data = *(m_instance->get_thread_data());
-
- const int active = data.organize_team(m_policy.team_size());
-
- if (active) {
- data.set_work_partition(
- m_policy.league_size(),
- (0 < m_policy.chunk_size() ? m_policy.chunk_size()
- : m_policy.team_iter()));
- }
-
- if (is_dynamic) {
- // Must synchronize to make sure each team has set its
- // partition before beginning the work stealing loop.
- if (data.pool_rendezvous()) data.pool_rendezvous_release();
- }
-
- if (active) {
- reference_type update = final_reducer.init(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()));
-
- std::pair<int64_t, int64_t> range(0, 0);
-
- do {
- range = is_dynamic ? data.get_work_stealing_chunk()
- : data.get_work_partition();
-
- ParallelReduce::template exec_team<WorkTag>(m_functor, data, update,
- range.first, range.second,
- m_policy.league_size());
-
- } while (is_dynamic && 0 <= range.first);
- } else {
- final_reducer.init(
- reinterpret_cast<pointer_type>(data.pool_reduce_local()));
- }
-
- data.disband_team();
-
- // This thread has updated 'pool_reduce_local()' with its
- // contributions to the reduction. The parallel region is
- // about to terminate and the master thread will load and
- // reduce each 'pool_reduce_local()' contribution.
- // Must 'memory_fence()' to guarantee that storing the update to
- // 'pool_reduce_local()' will complete before this thread
- // exits the parallel region.
-
- memory_fence();
- }
-
- // Reduction:
-
- const pointer_type ptr =
- pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
-
- for (int i = 1; i < pool_size; ++i) {
- final_reducer.join(
- ptr, reinterpret_cast<pointer_type>(
- m_instance->get_thread_data(i)->pool_reduce_local()));
- }
-
- final_reducer.final(ptr);
-
- if (m_result_ptr) {
- const int n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
-
- for (int j = 0; j < n; ++j) {
- m_result_ptr[j] = ptr[j];
- }
- }
- }
-
- //----------------------------------------
-
- template <class ViewType>
- inline ParallelReduce(
- const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- }
-
- inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
- const ReducerType& reducer)
- : m_instance(nullptr),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())) {
- if (t_openmp_instance) {
- m_instance = t_openmp_instance;
- } else {
- m_instance = arg_policy.space().impl_internal_space_instance();
- }
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#undef KOKKOS_PRAGMA_IVDEP_IF_ENABLED
-#undef KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE
-
-#endif
-#endif /* KOKKOS_OPENMP_PARALLEL_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_OPENMP) && defined(KOKKOS_ENABLE_TASKDAG)
-
-#include <Kokkos_Core.hpp>
-
-#include <impl/Kokkos_TaskQueue_impl.hpp>
-#include <impl/Kokkos_HostThreadTeam.hpp>
-#include <OpenMP/Kokkos_OpenMP_Task.hpp>
-#include <cassert>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template class TaskQueue<Kokkos::OpenMP, typename Kokkos::OpenMP::memory_space>;
-
-HostThreadTeamData& HostThreadTeamDataSingleton::singleton() {
- static HostThreadTeamDataSingleton s;
- return s;
-}
-
-HostThreadTeamDataSingleton::HostThreadTeamDataSingleton()
- : HostThreadTeamData() {
- Kokkos::OpenMP::memory_space space;
- const size_t num_pool_reduce_bytes = 32;
- const size_t num_team_reduce_bytes = 32;
- const size_t num_team_shared_bytes = 1024;
- const size_t num_thread_local_bytes = 1024;
- const size_t alloc_bytes = HostThreadTeamData::scratch_size(
- num_pool_reduce_bytes, num_team_reduce_bytes, num_team_shared_bytes,
- num_thread_local_bytes);
-
- void* ptr = nullptr;
- try {
- ptr = space.allocate(alloc_bytes);
- } catch (Kokkos::Experimental::RawMemoryAllocationFailure const& f) {
- // For now, just rethrow the error message with a note
- // Note that this could, in turn, trigger an out of memory exception,
- // but it's pretty unlikely, so we won't worry about it for now.
- // TODO reasonable error message when `std::string` causes OOM error
- Kokkos::Impl::throw_runtime_exception(
- std::string("Failure to allocate scratch memory: ") +
- f.get_error_message());
- }
-
- HostThreadTeamData::scratch_assign(
- ptr, alloc_bytes, num_pool_reduce_bytes, num_team_reduce_bytes,
- num_team_shared_bytes, num_thread_local_bytes);
-}
-
-HostThreadTeamDataSingleton::~HostThreadTeamDataSingleton() {
- Kokkos::OpenMP::memory_space space;
- space.deallocate(HostThreadTeamData::scratch_buffer(),
- static_cast<size_t>(HostThreadTeamData::scratch_bytes()));
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-#else
-void KOKKOS_CORE_SRC_OPENMP_KOKKOS_OPENMP_TASK_PREVENT_LINK_ERROR() {}
-#endif /* #if defined( KOKKOS_ENABLE_OPENMP ) && defined( \
- KOKKOS_ENABLE_TASKDAG ) */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP
-#define KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP
-
-#include <Kokkos_OpenMP.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
- Kokkos::OpenMP> {
- private:
- using Policy = Kokkos::WorkGraphPolicy<Traits...>;
-
- Policy m_policy;
- FunctorType m_functor;
-
- template <class TagType>
- std::enable_if_t<std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- m_functor(w);
- }
-
- template <class TagType>
- std::enable_if_t<!std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- const TagType t{};
- m_functor(t, w);
- }
-
- public:
- inline void execute() {
-#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
- {
- // Spin until COMPLETED_TOKEN.
- // END_TOKEN indicates no work is currently available.
-
- for (std::int32_t w = Policy::END_TOKEN;
- Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
- if (Policy::END_TOKEN != w) {
- exec_one<typename Policy::work_tag>(w);
- m_policy.completed_work(w);
- }
- }
- }
- }
-
- inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_policy(arg_policy), m_functor(arg_functor) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* #define KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <algorithm>
-#include <omp.h>
-
-/*--------------------------------------------------------------------------*/
-
-#include <stdlib.h>
-#include <stdint.h>
-#include <memory.h>
-
-#include <iostream>
-#include <sstream>
-#include <cstring>
-
-#include <Kokkos_OpenMPTarget.hpp>
-#include <Kokkos_OpenMPTargetSpace.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <Kokkos_Atomic.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Experimental {
-/* Default allocation mechanism */
-OpenMPTargetSpace::OpenMPTargetSpace() {}
-
-void* OpenMPTargetSpace::impl_allocate(
-
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- static_assert(sizeof(void*) == sizeof(uintptr_t),
- "Error sizeof(void*) != sizeof(uintptr_t)");
-
- void* ptr;
-
- ptr = omp_target_alloc(arg_alloc_size, omp_get_default_device());
-
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
-
- return ptr;
-}
-
-void* OpenMPTargetSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-
-void* OpenMPTargetSpace::allocate(const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-
-void OpenMPTargetSpace::impl_deallocate(
- const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- if (arg_alloc_ptr) {
- omp_target_free(arg_alloc_ptr, omp_get_default_device());
- }
-}
-
-void OpenMPTargetSpace::deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void OpenMPTargetSpace::deallocate(const char* arg_label,
- void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const
-
-{
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-#ifdef KOKKOS_ENABLE_DEBUG
-SharedAllocationRecord<void, void> SharedAllocationRecord<
- Kokkos::Experimental::OpenMPTargetSpace, void>::s_root_record;
-#endif
-
-SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace,
- void>::~SharedAllocationRecord() {
- auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- alloc_size, (alloc_size - sizeof(SharedAllocationHeader)));
-}
-
-SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace,
- void>::s_root_record,
-#endif
- Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, arg_label);
-
- // TODO DeepCopy
- // DeepCopy
- Kokkos::Impl::DeepCopy<Experimental::OpenMPTargetSpace, HostSpace>(
- RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
- Kokkos::fence(
- "SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace, "
- "void>::SharedAllocationRecord(): fence after copying header from "
- "HostSpace");
-}
-
-//----------------------------------------------------------------------------
-
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-/*
-namespace Kokkos {
-namespace {
- const unsigned HOST_SPACE_ATOMIC_MASK = 0xFFFF;
- const unsigned HOST_SPACE_ATOMIC_XOR_MASK = 0x5A39;
- static int HOST_SPACE_ATOMIC_LOCKS[HOST_SPACE_ATOMIC_MASK+1];
-}
-
-namespace Impl {
-void init_lock_array_host_space() {
- static int is_initialized = 0;
- if(! is_initialized)
- for(int i = 0; i < static_cast<int> (HOST_SPACE_ATOMIC_MASK+1); i++)
- HOST_SPACE_ATOMIC_LOCKS[i] = 0;
-}
-
-bool lock_address_host_space(void* ptr) {
- return 0 == atomic_compare_exchange( &HOST_SPACE_ATOMIC_LOCKS[
- (( size_t(ptr) >> 2 ) & HOST_SPACE_ATOMIC_MASK) ^
-HOST_SPACE_ATOMIC_XOR_MASK] , 0 , 1);
-}
-
-void unlock_address_host_space(void* ptr) {
- atomic_exchange( &HOST_SPACE_ATOMIC_LOCKS[
- (( size_t(ptr) >> 2 ) & HOST_SPACE_ATOMIC_MASK) ^
-HOST_SPACE_ATOMIC_XOR_MASK] , 0);
-}
-
-}
-}*/
-
-//==============================================================================
-// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
-
-#include <impl/Kokkos_SharedAlloc_timpl.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-// To avoid additional compilation cost for something that's (mostly?) not
-// performance sensitive, we explicity instantiate these CRTP base classes here,
-// where we have access to the associated *_timpl.hpp header files.
-template class HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::OpenMPTargetSpace>;
-template class SharedAllocationRecordCommon<
- Kokkos::Experimental::OpenMPTargetSpace>;
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
-//==============================================================================
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMPTARGET_ABORT_HPP
-#define KOKKOS_OPENMPTARGET_ABORT_HPP
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_OPENMPTARGET
-
-namespace Kokkos {
-namespace Impl {
-
-KOKKOS_INLINE_FUNCTION void OpenMPTarget_abort(char const *msg) {
- fprintf(stderr, "%s.\n", msg);
- std::abort();
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMPTARGET_ERROR_HPP
-#define KOKKOS_OPENMPTARGET_ERROR_HPP
-
-#include <impl/Kokkos_Error.hpp>
-#include <sstream>
-
-namespace Kokkos {
-namespace Impl {
-
-inline void ompt_internal_safe_call(int e, const char* name,
- const char* file = nullptr,
- const int line = 0) {
- if (e != 0) {
- std::ostringstream out;
- out << name << " return value of " << e << " indicates failure";
- if (file) {
- out << " " << file << ":" << line;
- }
- throw_runtime_exception(out.str());
- }
-}
-
-#define OMPT_SAFE_CALL(call) \
- Kokkos::Impl::ompt_internal_safe_call(call, #call, __FILE__, __LINE__)
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <stdio.h>
-#include <limits>
-#include <iostream>
-#include <vector>
-#include <Kokkos_Core.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <iostream>
-#include <impl/Kokkos_CPUDiscovery.hpp>
-#include <impl/Kokkos_Tools.hpp>
-
-#ifdef KOKKOS_ENABLE_OPENMPTARGET
-
-// FIXME_OPENMPTARGET currently unused
-/*
-namespace Kokkos {
-namespace Impl {
-namespace {
-
-KOKKOS_INLINE_FUNCTION
-int kokkos_omp_in_parallel();
-
-KOKKOS_INLINE_FUNCTION
-int kokkos_omp_in_parallel() { return omp_in_parallel(); }
-
-bool s_using_hwloc = false;
-
-} // namespace
-} // namespace Impl
-} // namespace Kokkos
-*/
-
-namespace Kokkos {
-namespace Impl {
-
-void OpenMPTargetExec::verify_is_process(const char* const label) {
- // Fails if the current task is in a parallel region or is not on the host.
- if (omp_in_parallel() && (!omp_is_initial_device())) {
- std::string msg(label);
- msg.append(" ERROR: in parallel or on device");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-}
-
-void OpenMPTargetExec::verify_initialized(const char* const label) {
- if (0 == Kokkos::Experimental::OpenMPTarget().impl_is_initialized()) {
- std::string msg(label);
- msg.append(" ERROR: not initialized");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-}
-
-void* OpenMPTargetExec::m_scratch_ptr = nullptr;
-int64_t OpenMPTargetExec::m_scratch_size = 0;
-int* OpenMPTargetExec::m_lock_array = nullptr;
-int64_t OpenMPTargetExec::m_lock_size = 0;
-uint32_t* OpenMPTargetExec::m_uniquetoken_ptr = nullptr;
-
-void OpenMPTargetExec::clear_scratch() {
- Kokkos::Experimental::OpenMPTargetSpace space;
- space.deallocate(m_scratch_ptr, m_scratch_size);
- m_scratch_ptr = nullptr;
- m_scratch_size = 0;
-}
-
-void OpenMPTargetExec::clear_lock_array() {
- if (m_lock_array != nullptr) {
- Kokkos::Experimental::OpenMPTargetSpace space;
- space.deallocate(m_lock_array, m_lock_size);
- m_lock_array = nullptr;
- m_lock_size = 0;
- }
-}
-
-void* OpenMPTargetExec::get_scratch_ptr() { return m_scratch_ptr; }
-
-void OpenMPTargetExec::resize_scratch(int64_t team_size, int64_t shmem_size_L0,
- int64_t shmem_size_L1,
- int64_t league_size) {
- Kokkos::Experimental::OpenMPTargetSpace space;
- const int64_t shmem_size =
- shmem_size_L0 + shmem_size_L1; // L0 + L1 scratch memory per team.
- const int64_t padding = shmem_size * 10 / 100; // Padding per team.
- // Total amount of scratch memory allocated is depenedent
- // on the maximum number of in-flight teams possible.
- int64_t total_size =
- (shmem_size + OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE + padding) *
- std::min(MAX_ACTIVE_THREADS / team_size, league_size);
-
- if (total_size > m_scratch_size) {
- space.deallocate(m_scratch_ptr, m_scratch_size);
- m_scratch_size = total_size;
- m_scratch_ptr = space.allocate(total_size);
- }
-}
-
-int* OpenMPTargetExec::get_lock_array(int num_teams) {
- Kokkos::Experimental::OpenMPTargetSpace space;
- int max_active_league_size = MAX_ACTIVE_THREADS / 32;
- int lock_array_elem =
- (num_teams > max_active_league_size) ? num_teams : max_active_league_size;
- if (m_lock_size < (lock_array_elem * sizeof(int))) {
- space.deallocate(m_lock_array, m_lock_size);
- m_lock_size = lock_array_elem * sizeof(int);
- m_lock_array = static_cast<int*>(space.allocate(m_lock_size));
-
- // FIXME_OPENMPTARGET - Creating a target region here to initialize the
- // lock_array with 0's fails. Hence creating an equivalent host array to
- // achieve the same. Value of host array are then copied to the lock_array.
- int* h_lock_array = static_cast<int*>(
- omp_target_alloc(m_lock_size, omp_get_initial_device()));
-
- for (int i = 0; i < lock_array_elem; ++i) h_lock_array[i] = 0;
-
- OMPT_SAFE_CALL(omp_target_memcpy(m_lock_array, h_lock_array, m_lock_size, 0,
- 0, omp_get_default_device(),
- omp_get_initial_device()));
-
- omp_target_free(h_lock_array, omp_get_initial_device());
- }
-
- return m_lock_array;
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif // KOKKOS_ENABLE_OPENMPTARGET
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMPTARGETEXEC_HPP
-#define KOKKOS_OPENMPTARGETEXEC_HPP
-
-#include <impl/Kokkos_Traits.hpp>
-#include <impl/Kokkos_Spinwait.hpp>
-
-#include <Kokkos_Atomic.hpp>
-#include "Kokkos_OpenMPTarget_Abort.hpp"
-
-// FIXME_OPENMPTARGET - Using this macro to implement a workaround for
-// hierarchical reducers. It avoids hitting the code path which we wanted to
-// write but doesn't work. undef'ed at the end.
-// Intel compilers prefer the non-workaround version.
-#ifndef KOKKOS_ARCH_INTEL_GPU
-#define KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
-#endif
-
-// FIXME_OPENMPTARGET - Using this macro to implement a workaround for
-// hierarchical scan. It avoids hitting the code path which we wanted to
-// write but doesn't work. undef'ed at the end.
-#ifndef KOKKOS_ARCH_INTEL_GPU
-#define KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
-#endif
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class Reducer>
-struct OpenMPTargetReducerWrapper {
- using value_type = typename Reducer::value_type;
-
- // Using a generic unknown Reducer for the OpenMPTarget backend is not
- // implemented.
- KOKKOS_INLINE_FUNCTION
- static void join(value_type&, const value_type&) = delete;
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type&, const volatile value_type&) = delete;
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type&) = delete;
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<Sum<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) { dest += src; }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest += src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::sum();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<Prod<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) { dest *= src; }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest *= src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::prod();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<Min<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src < dest) dest = src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src < dest) dest = src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::min();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<Max<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src > dest) dest = src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src > dest) dest = src;
- }
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::max();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<LAnd<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest = dest && src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest = dest && src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::land();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<LOr<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- using result_view_type = Kokkos::View<value_type, Space>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest = dest || src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest = dest || src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::lor();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<BAnd<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest = dest & src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest = dest & src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::band();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<BOr<Scalar, Space>> {
- public:
- // Required
- using value_type = std::remove_cv_t<Scalar>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest = dest | src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest = dest | src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val = reduction_identity<value_type>::bor();
- }
-};
-
-template <class Scalar, class Index, class Space>
-struct OpenMPTargetReducerWrapper<MinLoc<Scalar, Index, Space>> {
- private:
- using scalar_type = std::remove_cv_t<Scalar>;
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = ValLocScalar<scalar_type, index_type>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src.val < dest.val) dest = src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src.val < dest.val) dest = src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.val = reduction_identity<scalar_type>::min();
- val.loc = reduction_identity<index_type>::min();
- }
-};
-
-template <class Scalar, class Index, class Space>
-struct OpenMPTargetReducerWrapper<MaxLoc<Scalar, Index, Space>> {
- private:
- using scalar_type = std::remove_cv_t<Scalar>;
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = ValLocScalar<scalar_type, index_type>;
-
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src.val > dest.val) dest = src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src.val > dest.val) dest = src;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.val = reduction_identity<scalar_type>::max();
- val.loc = reduction_identity<index_type>::min();
- }
-};
-
-template <class Scalar, class Space>
-struct OpenMPTargetReducerWrapper<MinMax<Scalar, Space>> {
- private:
- using scalar_type = std::remove_cv_t<Scalar>;
-
- public:
- // Required
- using value_type = MinMaxScalar<scalar_type>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src.min_val < dest.min_val) {
- dest.min_val = src.min_val;
- }
- if (src.max_val > dest.max_val) {
- dest.max_val = src.max_val;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src.min_val < dest.min_val) {
- dest.min_val = src.min_val;
- }
- if (src.max_val > dest.max_val) {
- dest.max_val = src.max_val;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.max_val = reduction_identity<scalar_type>::max();
- val.min_val = reduction_identity<scalar_type>::min();
- }
-};
-
-template <class Scalar, class Index, class Space>
-struct OpenMPTargetReducerWrapper<MinMaxLoc<Scalar, Index, Space>> {
- private:
- using scalar_type = std::remove_cv_t<Scalar>;
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = MinMaxLocScalar<scalar_type, index_type>;
-
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src.min_val < dest.min_val) {
- dest.min_val = src.min_val;
- dest.min_loc = src.min_loc;
- }
- if (src.max_val > dest.max_val) {
- dest.max_val = src.max_val;
- dest.max_loc = src.max_loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src.min_val < dest.min_val) {
- dest.min_val = src.min_val;
- dest.min_loc = src.min_loc;
- }
- if (src.max_val > dest.max_val) {
- dest.max_val = src.max_val;
- dest.max_loc = src.max_loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.max_val = reduction_identity<scalar_type>::max();
- val.min_val = reduction_identity<scalar_type>::min();
- val.max_loc = reduction_identity<index_type>::min();
- val.min_loc = reduction_identity<index_type>::min();
- }
-};
-
-//
-// specialize for MaxFirstLoc
-//
-template <class Scalar, class Index, class Space>
-struct OpenMPTargetReducerWrapper<MaxFirstLoc<Scalar, Index, Space>> {
- private:
- using scalar_type = std::remove_cv_t<Scalar>;
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = ValLocScalar<scalar_type, index_type>;
-
-// WORKAROUND OPENMPTARGET
-// This pragma omp declare target should not be necessary, but Intel compiler
-// fails without it
-#pragma omp declare target
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (dest.val < src.val) {
- dest = src;
- } else if (!(src.val < dest.val)) {
- dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (dest.val < src.val) {
- dest = src;
- } else if (!(src.val < dest.val)) {
- dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.val = reduction_identity<scalar_type>::max();
- val.loc = reduction_identity<index_type>::min();
- }
-#pragma omp end declare target
-};
-
-//
-// specialize for MinFirstLoc
-//
-template <class Scalar, class Index, class Space>
-struct OpenMPTargetReducerWrapper<MinFirstLoc<Scalar, Index, Space>> {
- private:
- using scalar_type = std::remove_cv_t<Scalar>;
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = ValLocScalar<scalar_type, index_type>;
-
-// WORKAROUND OPENMPTARGET
-// This pragma omp declare target should not be necessary, but Intel compiler
-// fails without it
-#pragma omp declare target
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src.val < dest.val) {
- dest = src;
- } else if (!(dest.val < src.val)) {
- dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src.val < dest.val) {
- dest = src;
- } else if (!(dest.val < src.val)) {
- dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.val = reduction_identity<scalar_type>::min();
- val.loc = reduction_identity<index_type>::min();
- }
-#pragma omp end declare target
-};
-
-//
-// specialize for MinMaxFirstLastLoc
-//
-template <class Scalar, class Index, class Space>
-struct OpenMPTargetReducerWrapper<MinMaxFirstLastLoc<Scalar, Index, Space>> {
- private:
- using scalar_type = std::remove_cv_t<Scalar>;
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = MinMaxLocScalar<scalar_type, index_type>;
-
-// WORKAROUND OPENMPTARGET
-// This pragma omp declare target should not be necessary, but Intel compiler
-// fails without it
-#pragma omp declare target
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- if (src.min_val < dest.min_val) {
- dest.min_val = src.min_val;
- dest.min_loc = src.min_loc;
- } else if (!(dest.min_val < src.min_val)) {
- dest.min_loc = (src.min_loc < dest.min_loc) ? src.min_loc : dest.min_loc;
- }
-
- if (dest.max_val < src.max_val) {
- dest.max_val = src.max_val;
- dest.max_loc = src.max_loc;
- } else if (!(src.max_val < dest.max_val)) {
- dest.max_loc = (src.max_loc > dest.max_loc) ? src.max_loc : dest.max_loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- if (src.min_val < dest.min_val) {
- dest.min_val = src.min_val;
- dest.min_loc = src.min_loc;
- } else if (!(dest.min_val < src.min_val)) {
- dest.min_loc = (src.min_loc < dest.min_loc) ? src.min_loc : dest.min_loc;
- }
-
- if (dest.max_val < src.max_val) {
- dest.max_val = src.max_val;
- dest.max_loc = src.max_loc;
- } else if (!(src.max_val < dest.max_val)) {
- dest.max_loc = (src.max_loc > dest.max_loc) ? src.max_loc : dest.max_loc;
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.max_val = reduction_identity<scalar_type>::max();
- val.min_val = reduction_identity<scalar_type>::min();
- val.max_loc = reduction_identity<index_type>::max();
- val.min_loc = reduction_identity<index_type>::min();
- }
-#pragma omp end declare target
-};
-
-//
-// specialize for FirstLoc
-//
-template <class Index, class Space>
-struct OpenMPTargetReducerWrapper<FirstLoc<Index, Space>> {
- private:
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = FirstLocScalar<index_type>;
-
-// WORKAROUND OPENMPTARGET
-// This pragma omp declare target should not be necessary, but Intel compiler
-// fails without it
-#pragma omp declare target
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest.min_loc_true = (src.min_loc_true < dest.min_loc_true)
- ? src.min_loc_true
- : dest.min_loc_true;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest.min_loc_true = (src.min_loc_true < dest.min_loc_true)
- ? src.min_loc_true
- : dest.min_loc_true;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.min_loc_true = reduction_identity<index_type>::min();
- }
-#pragma omp end declare target
-};
-
-//
-// specialize for LastLoc
-//
-template <class Index, class Space>
-struct OpenMPTargetReducerWrapper<LastLoc<Index, Space>> {
- private:
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = LastLocScalar<index_type>;
-
-// WORKAROUND OPENMPTARGET
-// This pragma omp declare target should not be necessary, but Intel compiler
-// fails without it
-#pragma omp declare target
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest.max_loc_true = (src.max_loc_true > dest.max_loc_true)
- ? src.max_loc_true
- : dest.max_loc_true;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest.max_loc_true = (src.max_loc_true > dest.max_loc_true)
- ? src.max_loc_true
- : dest.max_loc_true;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.max_loc_true = reduction_identity<index_type>::max();
- }
-#pragma omp end declare target
-};
-
-//
-// specialize for StdIsPartitioned
-//
-template <class Index, class Space>
-struct OpenMPTargetReducerWrapper<StdIsPartitioned<Index, Space>> {
- private:
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = StdIsPartScalar<index_type>;
-
-// WORKAROUND OPENMPTARGET
-// This pragma omp declare target should not be necessary, but Intel compiler
-// fails without it
-#pragma omp declare target
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest.max_loc_true = (dest.max_loc_true < src.max_loc_true)
- ? src.max_loc_true
- : dest.max_loc_true;
-
- dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
- ? dest.min_loc_false
- : src.min_loc_false;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest.max_loc_true = (dest.max_loc_true < src.max_loc_true)
- ? src.max_loc_true
- : dest.max_loc_true;
-
- dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
- ? dest.min_loc_false
- : src.min_loc_false;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.max_loc_true = ::Kokkos::reduction_identity<index_type>::max();
- val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
- }
-#pragma omp end declare target
-};
-
-//
-// specialize for StdPartitionPoint
-//
-template <class Index, class Space>
-struct OpenMPTargetReducerWrapper<StdPartitionPoint<Index, Space>> {
- private:
- using index_type = std::remove_cv_t<Index>;
-
- public:
- // Required
- using value_type = StdPartPointScalar<index_type>;
-
-// WORKAROUND OPENMPTARGET
-// This pragma omp declare target should not be necessary, but Intel compiler
-// fails without it
-#pragma omp declare target
- // Required
- KOKKOS_INLINE_FUNCTION
- static void join(value_type& dest, const value_type& src) {
- dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
- ? dest.min_loc_false
- : src.min_loc_false;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void join(volatile value_type& dest, const volatile value_type& src) {
- dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
- ? dest.min_loc_false
- : src.min_loc_false;
- }
-
- KOKKOS_INLINE_FUNCTION
- static void init(value_type& val) {
- val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
- }
-#pragma omp end declare target
-};
-
-/*
-template<class ReducerType>
-class OpenMPTargetReducerWrapper {
- public:
- const ReducerType& reducer;
- using value_type = typename ReducerType::value_type;
- value_type& value;
-
- KOKKOS_INLINE_FUNCTION
- void join(const value_type& upd) {
- reducer.join(value,upd);
- }
-
- KOKKOS_INLINE_FUNCTION
- void init(const value_type& upd) {
- reducer.init(value,upd);
- }
-};*/
-
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-//----------------------------------------------------------------------------
-/** \brief Data for OpenMPTarget thread execution */
-
-class OpenMPTargetExec {
- public:
- // FIXME_OPENMPTARGET - Currently the maximum number of
- // teams possible is calculated based on NVIDIA's Volta GPU. In
- // future this value should be based on the chosen architecture for the
- // OpenMPTarget backend.
- enum { MAX_ACTIVE_THREADS = 2080 * 80 };
- enum { MAX_ACTIVE_TEAMS = MAX_ACTIVE_THREADS / 32 };
-
- private:
- static void* scratch_ptr;
-
- public:
- static void verify_is_process(const char* const);
- static void verify_initialized(const char* const);
-
- static int* get_lock_array(int num_teams);
- static void* get_scratch_ptr();
- static void clear_scratch();
- static void clear_lock_array();
- static void resize_scratch(int64_t team_reduce_bytes,
- int64_t team_shared_bytes,
- int64_t thread_local_bytes, int64_t league_size);
-
- static void* m_scratch_ptr;
- static int64_t m_scratch_size;
- static int* m_lock_array;
- static int64_t m_lock_size;
- static uint32_t* m_uniquetoken_ptr;
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-class OpenMPTargetExecTeamMember {
- public:
- enum { TEAM_REDUCE_SIZE = 512 };
-
- /** \brief Thread states for team synchronization */
- enum { Active = 0, Rendezvous = 1 };
-
- using execution_space = Kokkos::Experimental::OpenMPTarget;
- using scratch_memory_space = execution_space::scratch_memory_space;
-
- scratch_memory_space m_team_shared;
- size_t m_team_scratch_size[2];
- int m_team_rank;
- int m_team_size;
- int m_league_rank;
- int m_league_size;
- int m_vector_length;
- int m_vector_lane;
- int m_shmem_block_index;
- void* m_glb_scratch;
- void* m_reduce_scratch;
-
- public:
- KOKKOS_INLINE_FUNCTION
- const execution_space::scratch_memory_space& team_shmem() const {
- return m_team_shared.set_team_thread_mode(0, 1, 0);
- }
-
- // set_team_thread_mode routine parameters for future understanding:
- // first parameter - scratch level.
- // second parameter - size multiplier for advancing scratch ptr after a
- // request was serviced. third parameter - offset size multiplier from current
- // scratch ptr when returning a ptr for a request.
- KOKKOS_INLINE_FUNCTION
- const execution_space::scratch_memory_space& team_scratch(int level) const {
- return m_team_shared.set_team_thread_mode(level, 1, 0);
- }
-
- KOKKOS_INLINE_FUNCTION
- const execution_space::scratch_memory_space& thread_scratch(int level) const {
- return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
- }
-
- KOKKOS_INLINE_FUNCTION int league_rank() const { return m_league_rank; }
- KOKKOS_INLINE_FUNCTION int league_size() const { return m_league_size; }
- KOKKOS_INLINE_FUNCTION int team_rank() const { return m_team_rank; }
- KOKKOS_INLINE_FUNCTION int team_size() const { return m_team_size; }
- KOKKOS_INLINE_FUNCTION void* impl_reduce_scratch() const {
- return m_reduce_scratch;
- }
-
- KOKKOS_INLINE_FUNCTION void team_barrier() const {
-#pragma omp barrier
- }
-
- template <class ValueType>
- KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType& value,
- int thread_id) const {
- // Make sure there is enough scratch space:
- using type = std::conditional_t<(sizeof(ValueType) < TEAM_REDUCE_SIZE),
- ValueType, void>;
- type* team_scratch =
- reinterpret_cast<type*>(static_cast<char*>(m_glb_scratch) +
- TEAM_REDUCE_SIZE * omp_get_team_num());
-#pragma omp barrier
- if (team_rank() == thread_id) *team_scratch = value;
-#pragma omp barrier
- value = *team_scratch;
- }
-
- template <class Closure, class ValueType>
- KOKKOS_INLINE_FUNCTION void team_broadcast(const Closure& f, ValueType& value,
- const int& thread_id) const {
- f(value);
- team_broadcast(value, thread_id);
- }
-
- // FIXME_OPENMPTARGET this function has the wrong interface and currently
- // ignores the reducer passed.
- template <class ValueType, class JoinOp>
- KOKKOS_INLINE_FUNCTION ValueType team_reduce(const ValueType& value,
- const JoinOp&) const {
-#pragma omp barrier
-
- using value_type = ValueType;
- // const JoinLambdaAdapter<value_type, JoinOp> op(op_in);
-
- // Make sure there is enough scratch space:
- using type = std::conditional_t<(sizeof(value_type) < TEAM_REDUCE_SIZE),
- value_type, void>;
-
- const int n_values = TEAM_REDUCE_SIZE / sizeof(value_type);
- type* team_scratch =
- reinterpret_cast<type*>(static_cast<char*>(m_glb_scratch) +
- TEAM_REDUCE_SIZE * omp_get_team_num());
- for (int i = m_team_rank; i < n_values; i += m_team_size) {
- team_scratch[i] = value_type();
- }
-
-#pragma omp barrier
-
- for (int k = 0; k < m_team_size; k += n_values) {
- if ((k <= m_team_rank) && (k + n_values > m_team_rank))
- team_scratch[m_team_rank % n_values] += value;
-#pragma omp barrier
- }
-
- for (int d = 1; d < n_values; d *= 2) {
- if ((m_team_rank + d < n_values) && (m_team_rank % (2 * d) == 0)) {
- team_scratch[m_team_rank] += team_scratch[m_team_rank + d];
- }
-#pragma omp barrier
- }
- return team_scratch[0];
- }
- /** \brief Intra-team exclusive prefix sum with team_rank() ordering
- * with intra-team non-deterministic ordering accumulation.
- *
- * The global inter-team accumulation value will, at the end of the
- * league's parallel execution, be the scan's total.
- * Parallel execution ordering of the league's teams is non-deterministic.
- * As such the base value for each team's scan operation is similarly
- * non-deterministic.
- */
- template <typename ArgType>
- KOKKOS_INLINE_FUNCTION ArgType
- team_scan(const ArgType& /*value*/, ArgType* const /*global_accum*/) const {
- // FIXME_OPENMPTARGET
- /* // Make sure there is enough scratch space:
- using type =
- std::conditional_t<(sizeof(ArgType) < TEAM_REDUCE_SIZE), ArgType, void>;
-
- volatile type * const work_value = ((type*) m_exec.scratch_thread());
-
- *work_value = value ;
-
- memory_fence();
-
- if ( team_fan_in() ) {
- // The last thread to synchronize returns true, all other threads wait
- for team_fan_out()
- // m_team_base[0] == highest ranking team member
- // m_team_base[ m_team_size - 1 ] == lowest ranking team member
- //
- // 1) copy from lower to higher rank, initialize lowest rank to zero
- // 2) prefix sum from lowest to highest rank, skipping lowest rank
-
- type accum = 0 ;
-
- if ( global_accum ) {
- for ( int i = m_team_size ; i-- ; ) {
- type & val = *((type*) m_exec.pool_rev( m_team_base_rev + i
- )->scratch_thread()); accum += val ;
- }
- accum = atomic_fetch_add( global_accum , accum );
- }
-
- for ( int i = m_team_size ; i-- ; ) {
- type & val = *((type*) m_exec.pool_rev( m_team_base_rev + i
- )->scratch_thread()); const type offset = accum ; accum += val ; val =
- offset ;
- }
-
- memory_fence();
- }
-
- team_fan_out();
-
- return *work_value ;*/
- return ArgType();
- }
-
- /** \brief Intra-team exclusive prefix sum with team_rank() ordering.
- *
- * The highest rank thread can compute the reduction total as
- * reduction_total = dev.team_scan( value ) + value ;
- */
- template <typename Type>
- KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value) const {
- return this->template team_scan<Type>(value, 0);
- }
-
- //----------------------------------------
- // Private for the driver
-
- private:
- using space = execution_space::scratch_memory_space;
-
- public:
- // FIXME_OPENMPTARGET - 512(16*32) bytes at the begining of the scratch space
- // for each league is saved for reduction. It should actually be based on the
- // ValueType of the reduction variable.
- inline OpenMPTargetExecTeamMember(
- const int league_rank, const int league_size, const int team_size,
- const int vector_length // const TeamPolicyInternal< OpenMPTarget,
- // Properties ...> & team
- ,
- void* const glb_scratch, const int shmem_block_index,
- const size_t shmem_size_L0, const size_t shmem_size_L1)
- : m_team_scratch_size{shmem_size_L0, shmem_size_L1},
- m_team_rank(0),
- m_team_size(team_size),
- m_league_rank(league_rank),
- m_league_size(league_size),
- m_vector_length(vector_length),
- m_shmem_block_index(shmem_block_index),
- m_glb_scratch(glb_scratch) {
- const int omp_tid = omp_get_thread_num();
-
- // The scratch memory allocated is a sum of TEAM_REDUCE_SIZE, L0 shmem size
- // and L1 shmem size. TEAM_REDUCE_SIZE = 512 bytes saved per team for
- // hierarchical reduction. There is an additional 10% of the requested
- // scratch memory allocated per team as padding. Hence the product with 0.1.
- const int reduce_offset =
- m_shmem_block_index *
- (shmem_size_L0 + shmem_size_L1 +
- ((shmem_size_L0 + shmem_size_L1) * 0.1) + TEAM_REDUCE_SIZE);
- const int l0_offset = reduce_offset + TEAM_REDUCE_SIZE;
- const int l1_offset = l0_offset + shmem_size_L0;
- m_team_shared = scratch_memory_space(
- (static_cast<char*>(glb_scratch) + l0_offset), shmem_size_L0,
- static_cast<char*>(glb_scratch) + l1_offset, shmem_size_L1);
- m_reduce_scratch = static_cast<char*>(glb_scratch) + reduce_offset;
- m_league_rank = league_rank;
- m_team_rank = omp_tid;
- m_vector_lane = 0;
- }
-
- static inline int team_reduce_size() { return TEAM_REDUCE_SIZE; }
-};
-
-template <class... Properties>
-class TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget, Properties...>
- : public PolicyTraits<Properties...> {
- public:
- //! Tag this class as a kokkos execution policy
- using execution_policy = TeamPolicyInternal;
-
- using traits = PolicyTraits<Properties...>;
-
- //----------------------------------------
-
- template <class FunctorType>
- inline static int team_size_max(const FunctorType&, const ParallelForTag&) {
- return 256;
- }
-
- template <class FunctorType>
- inline static int team_size_max(const FunctorType&,
- const ParallelReduceTag&) {
- return 256;
- }
-
- template <class FunctorType, class ReducerType>
- inline static int team_size_max(const FunctorType&, const ReducerType&,
- const ParallelReduceTag&) {
- return 256;
- }
-
- template <class FunctorType>
- inline static int team_size_recommended(const FunctorType&,
- const ParallelForTag&) {
- return 128;
- }
-
- template <class FunctorType>
- inline static int team_size_recommended(const FunctorType&,
- const ParallelReduceTag&) {
- return 128;
- }
-
- template <class FunctorType, class ReducerType>
- inline static int team_size_recommended(const FunctorType&,
- const ReducerType&,
- const ParallelReduceTag&) {
- return 128;
- }
-
- //----------------------------------------
-
- private:
- int m_league_size;
- int m_team_size;
- int m_vector_length;
- int m_team_alloc;
- int m_team_iter;
- std::array<size_t, 2> m_team_scratch_size;
- std::array<size_t, 2> m_thread_scratch_size;
- bool m_tune_team_size;
- bool m_tune_vector_length;
- constexpr const static size_t default_team_size = 256;
- int m_chunk_size;
-
- inline void init(const int league_size_request, const int team_size_request,
- const int vector_length_request) {
- m_league_size = league_size_request;
-
- // Minimum team size should be 32 for OpenMPTarget backend.
- if (team_size_request < 32) {
- Kokkos::Impl::OpenMPTarget_abort(
- "OpenMPTarget backend requires a minimum of 32 threads per team.\n");
- } else
- m_team_size = team_size_request;
-
- m_vector_length = vector_length_request;
- set_auto_chunk_size();
- }
-
- template <typename ExecSpace, typename... OtherProperties>
- friend class TeamPolicyInternal;
-
- public:
- // FIXME_OPENMPTARGET : Currently this routine is a copy of the Cuda
- // implementation, but this has to be tailored to be architecture specific.
- inline static int scratch_size_max(int level) {
- return (
- level == 0 ? 1024 * 40 : // 48kB is the max for CUDA, but we need some
- // for team_member.reduce etc.
- 20 * 1024 *
- 1024); // arbitrarily setting this to 20MB, for a Volta V100
- // that would give us about 3.2GB for 2 teams per SM
- }
- inline bool impl_auto_team_size() const { return m_tune_team_size; }
- inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
- inline void impl_set_team_size(const size_t size) { m_team_size = size; }
- inline void impl_set_vector_length(const size_t length) {
- m_tune_vector_length = length;
- }
- inline int impl_vector_length() const { return m_vector_length; }
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- KOKKOS_DEPRECATED inline int vector_length() const {
- return impl_vector_length();
- }
-#endif
- inline int team_size() const { return m_team_size; }
- inline int league_size() const { return m_league_size; }
- inline size_t scratch_size(const int& level, int team_size_ = -1) const {
- if (team_size_ < 0) team_size_ = m_team_size;
- return m_team_scratch_size[level] +
- team_size_ * m_thread_scratch_size[level];
- }
-
- inline Kokkos::Experimental::OpenMPTarget space() const {
- return Kokkos::Experimental::OpenMPTarget();
- }
-
- template <class... OtherProperties>
- TeamPolicyInternal(const TeamPolicyInternal<OtherProperties...>& p)
- : m_league_size(p.m_league_size),
- m_team_size(p.m_team_size),
- m_vector_length(p.m_vector_length),
- m_team_alloc(p.m_team_alloc),
- m_team_iter(p.m_team_iter),
- m_team_scratch_size(p.m_team_scratch_size),
- m_thread_scratch_size(p.m_thread_scratch_size),
- m_tune_team_size(p.m_tune_team_size),
- m_tune_vector_length(p.m_tune_vector_length),
- m_chunk_size(p.m_chunk_size) {}
-
- /** \brief Specify league size, request team size */
- TeamPolicyInternal(const typename traits::execution_space&,
- int league_size_request, int team_size_request,
- int vector_length_request = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(false),
- m_tune_vector_length(false),
- m_chunk_size(0) {
- init(league_size_request, team_size_request, vector_length_request);
- }
-
- TeamPolicyInternal(const typename traits::execution_space&,
- int league_size_request,
- const Kokkos::AUTO_t& /* team_size_request */
- ,
- int vector_length_request = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(true),
- m_tune_vector_length(false),
- m_chunk_size(0) {
- init(league_size_request, default_team_size / vector_length_request,
- vector_length_request);
- }
-
- TeamPolicyInternal(const typename traits::execution_space&,
- int league_size_request,
- const Kokkos::AUTO_t& /* team_size_request */
- ,
- const Kokkos::AUTO_t& /* vector_length_request */)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(true),
- m_tune_vector_length(true),
- m_chunk_size(0) {
- init(league_size_request, default_team_size, 1);
- }
- TeamPolicyInternal(const typename traits::execution_space&,
- int league_size_request, int team_size_request,
- const Kokkos::AUTO_t& /* vector_length_request */)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(false),
- m_tune_vector_length(true),
- m_chunk_size(0) {
- init(league_size_request, team_size_request, 1);
- }
-
- TeamPolicyInternal(int league_size_request, int team_size_request,
- int vector_length_request = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(false),
- m_tune_vector_length(false),
- m_chunk_size(0) {
- init(league_size_request, team_size_request, vector_length_request);
- }
-
- TeamPolicyInternal(int league_size_request,
- const Kokkos::AUTO_t& /* team_size_request */
- ,
- int vector_length_request = 1)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(true),
- m_tune_vector_length(false),
- m_chunk_size(0) {
- init(league_size_request, default_team_size / vector_length_request,
- vector_length_request);
- }
-
- TeamPolicyInternal(int league_size_request,
- const Kokkos::AUTO_t& /* team_size_request */
- ,
- const Kokkos::AUTO_t& /* vector_length_request */)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(true),
- m_tune_vector_length(true),
- m_chunk_size(0) {
- init(league_size_request, default_team_size, 1);
- }
- TeamPolicyInternal(int league_size_request, int team_size_request,
- const Kokkos::AUTO_t& /* vector_length_request */)
- : m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_tune_team_size(false),
- m_tune_vector_length(true),
- m_chunk_size(0) {
- init(league_size_request, team_size_request, 1);
- }
- inline static size_t vector_length_max() {
- return 32; /* TODO: this is bad. Need logic that is compiler and backend
- aware */
- }
- inline int team_alloc() const { return m_team_alloc; }
- inline int team_iter() const { return m_team_iter; }
-
- inline int chunk_size() const { return m_chunk_size; }
-
- /** \brief set chunk_size to a discrete value*/
- inline TeamPolicyInternal& set_chunk_size(
- typename traits::index_type chunk_size_) {
- m_chunk_size = chunk_size_;
- return *this;
- }
-
- /** \brief set per team scratch size for a specific level of the scratch
- * hierarchy */
- inline TeamPolicyInternal& set_scratch_size(const int& level,
- const PerTeamValue& per_team) {
- m_team_scratch_size[level] = per_team.value;
- return *this;
- }
-
- /** \brief set per thread scratch size for a specific level of the scratch
- * hierarchy */
- inline TeamPolicyInternal& set_scratch_size(
- const int& level, const PerThreadValue& per_thread) {
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-
- /** \brief set per thread and per team scratch size for a specific level of
- * the scratch hierarchy */
- inline TeamPolicyInternal& set_scratch_size(
- const int& level, const PerTeamValue& per_team,
- const PerThreadValue& per_thread) {
- m_team_scratch_size[level] = per_team.value;
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-
- private:
- /** \brief finalize chunk_size if it was set to AUTO*/
- inline void set_auto_chunk_size() {
- int concurrency = 2048 * 128;
-
- if (concurrency == 0) concurrency = 1;
-
- if (m_chunk_size > 0) {
- if (!Impl::is_integral_power_of_two(m_chunk_size))
- Kokkos::abort("TeamPolicy blocking granularity must be power of two");
- }
-
- int new_chunk_size = 1;
- while (new_chunk_size * 100 * concurrency < m_league_size)
- new_chunk_size *= 2;
- if (new_chunk_size < 128) {
- new_chunk_size = 1;
- while ((new_chunk_size * 40 * concurrency < m_league_size) &&
- (new_chunk_size < 128))
- new_chunk_size *= 2;
- }
- m_chunk_size = new_chunk_size;
- }
-
- public:
- using member_type = Impl::OpenMPTargetExecTeamMember;
-};
-} // namespace Impl
-
-} // namespace Kokkos
-
-namespace Kokkos {
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>
-TeamThreadRange(const Impl::OpenMPTargetExecTeamMember& thread,
- const iType& count) {
- return Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
-}
-
-template <typename iType1, typename iType2>
-KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
- std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
-TeamThreadRange(const Impl::OpenMPTargetExecTeamMember& thread,
- const iType1& begin, const iType2& end) {
- using iType = std::common_type_t<iType1, iType2>;
- return Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(begin),
- iType(end));
-}
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>
-ThreadVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
- const iType& count) {
- return Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
-}
-
-template <typename iType1, typename iType2>
-KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
- std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
-ThreadVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
- const iType1& arg_begin, const iType2& arg_end) {
- using iType = std::common_type_t<iType1, iType2>;
- return Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(arg_begin),
- iType(arg_end));
-}
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>
-TeamVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
- const iType& count) {
- return Impl::TeamVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
-}
-
-template <typename iType1, typename iType2>
-KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
- std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
-TeamVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
- const iType1& arg_begin, const iType2& arg_end) {
- using iType = std::common_type_t<iType1, iType2>;
- return Impl::TeamVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(arg_begin),
- iType(arg_end));
-}
-
-KOKKOS_INLINE_FUNCTION
-Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember> PerTeam(
- const Impl::OpenMPTargetExecTeamMember& thread) {
- return Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>(thread);
-}
-
-KOKKOS_INLINE_FUNCTION
-Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember> PerThread(
- const Impl::OpenMPTargetExecTeamMember& thread) {
- return Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>(thread);
-}
-} // namespace Kokkos
-
-namespace Kokkos {
-
-/** \brief Inter-thread parallel_for. Executes lambda(iType i) for each
- * i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all threads of the the calling thread team.
- */
-template <typename iType, class Lambda>
-KOKKOS_INLINE_FUNCTION void parallel_for(
- const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda) {
-#pragma omp for nowait schedule(static, 1)
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
-}
-
-/** \brief Inter-thread vector parallel_reduce. Executes lambda(iType i,
- * ValueType & val) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all threads of the the calling thread team
- * and a summation of val is performed and put into result.
- */
-
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
-parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ValueType& result) {
- // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
- // elements in the array <= 32. For reduction we allocate, 16 bytes per
- // element in the scratch space, hence, 16*32 = 512.
- static_assert(sizeof(ValueType) <=
- Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
-
- ValueType* TeamThread_scratch =
- static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
-
-#pragma omp barrier
- TeamThread_scratch[0] = ValueType();
-#pragma omp barrier
-
- if constexpr (std::is_arithmetic<ValueType>::value) {
-#pragma omp for reduction(+ : TeamThread_scratch[:1])
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- ValueType tmp = ValueType();
- lambda(i, tmp);
- TeamThread_scratch[0] += tmp;
- }
- } else {
-#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
-
-#pragma omp for reduction(custom : TeamThread_scratch[:1])
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- ValueType tmp = ValueType();
- lambda(i, tmp);
- TeamThread_scratch[0] += tmp;
- }
- }
-
- result = TeamThread_scratch[0];
-}
-
-#if !defined(KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND)
-// For some reason the actual version we wanted to write doesn't work
-// and crashes. We should try this with every new compiler
-// This is the variant we actually wanted to write
-template <typename iType, class Lambda, typename ReducerType>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
-parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ReducerType result) {
- using ValueType = typename ReducerType::value_type;
-
-#pragma omp declare reduction( \
- custominner:ValueType \
- : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer( \
- Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
- // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
- // elements in the array <= 32. For reduction we allocate, 16 bytes per
- // element in the scratch space, hence, 16*32 = 512.
- static_assert(sizeof(ValueType) <=
- Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
-
- ValueType* TeamThread_scratch =
- static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
-
-#pragma omp barrier
- Impl::OpenMPTargetReducerWrapper<ReducerType>::init(TeamThread_scratch[0]);
-#pragma omp barrier
-
-#pragma omp for reduction(custominner : TeamThread_scratch[:1])
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- lambda(i, TeamThread_scratch[0]);
- }
- result.reference() = TeamThread_scratch[0];
-}
-#else
-template <typename iType, class Lambda, typename ReducerType>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
-parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ReducerType result) {
- using ValueType = typename ReducerType::value_type;
-
- // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
- // elements in the array <= 32. For reduction we allocate, 16 bytes per
- // element in the scratch space, hence, 16*32 = 512.
- static_assert(sizeof(ValueType) <=
- Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
-
- ValueType* TeamThread_scratch =
- static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
-
-#pragma omp declare reduction( \
- omp_red_teamthread_reducer:ValueType \
- : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer( \
- Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp barrier
- ValueType tmp;
- result.init(tmp);
- TeamThread_scratch[0] = tmp;
-#pragma omp barrier
-
- iType team_size = iType(omp_get_num_threads());
-#pragma omp for reduction(omp_red_teamthread_reducer \
- : TeamThread_scratch[:1]) schedule(static, 1)
- for (iType t = 0; t < team_size; t++) {
- ValueType tmp2;
- result.init(tmp2);
-
- for (iType i = loop_boundaries.start + t; i < loop_boundaries.end;
- i += team_size) {
- lambda(i, tmp2);
- }
-
- // FIXME_OPENMPTARGET: Join should work but doesn't. Every threads gets a
- // private TeamThread_scratch[0] and at the end of the for-loop the `join`
- // operation is performed by OpenMP itself and hence the simple assignment
- // works.
- // result.join(TeamThread_scratch[0], tmp2);
- TeamThread_scratch[0] = tmp2;
- }
-
- result.reference() = TeamThread_scratch[0];
-}
-#endif // KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
-
-/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
- * ValueType & val) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
- * and a reduction of val is performed using JoinType(ValueType& val, const
- * ValueType& update) and put into init_result. The input value of init_result
- * is used as initializer for temporary variables of ValueType. Therefore the
- * input value should be the neutral element with respect to the join operation
- * (e.g. '0 for +-' or '1 for *').
- */
-template <typename iType, class Lambda, typename ValueType, class JoinType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, const JoinType& join, ValueType& init_result) {
- ValueType* TeamThread_scratch =
- static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
-
- // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
- // elements in the array <= 32. For reduction we allocate, 16 bytes per
- // element in the scratch space, hence, 16*32 = 512.
- static_assert(sizeof(ValueType) <=
- Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
-
- // FIXME_OPENMPTARGET: Still need to figure out how to get value_count here.
- const int value_count = 1;
-
-#pragma omp barrier
- TeamThread_scratch[0] = init_result;
-#pragma omp barrier
-
-#pragma omp for
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- lambda(i, TeamThread_scratch[omp_get_num_threads() * value_count]);
- }
-
- // Reduce all partial results within a team.
- const int team_size = omp_get_num_threads();
- int tree_neighbor_offset = 1;
- do {
-#pragma omp for
- for (int i = 0; i < team_size - tree_neighbor_offset;
- i += 2 * tree_neighbor_offset) {
- const int neighbor = i + tree_neighbor_offset;
- join(lambda, &TeamThread_scratch[i * value_count],
- &TeamThread_scratch[neighbor * value_count]);
- }
- tree_neighbor_offset *= 2;
- } while (tree_neighbor_offset < team_size);
- init_result = TeamThread_scratch[0];
-}
-
-// This is largely the same code as in HIP and CUDA except for the member name
-template <typename iType, class FunctorType>
-KOKKOS_INLINE_FUNCTION void parallel_scan(
- const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_bounds,
- const FunctorType& lambda) {
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- TeamPolicy<Experimental::OpenMPTarget>,
- FunctorType>;
- using value_type = typename Analysis::value_type;
-
- const auto start = loop_bounds.start;
- const auto end = loop_bounds.end;
- // Note this thing is called .member in the CUDA specialization of
- // TeamThreadRangeBoundariesStruct
- auto& member = loop_bounds.team;
- const auto team_size = member.team_size();
- const auto team_rank = member.team_rank();
-
-#if defined(KOKKOS_IMPL_TEAM_SCAN_WORKAROUND)
- value_type scan_val = value_type();
-
- if (team_rank == 0) {
- for (iType i = start; i < end; ++i) {
- lambda(i, scan_val, true);
- }
- }
-#pragma omp barrier
-#else
- const auto nchunk = (end - start + team_size - 1) / team_size;
- value_type accum = 0;
- // each team has to process one or
- // more chunks of the prefix scan
- for (iType i = 0; i < nchunk; ++i) {
- auto ii = start + i * team_size + team_rank;
- // local accumulation for this chunk
- value_type local_accum = 0;
- // user updates value with prefix value
- if (ii < loop_bounds.end) lambda(ii, local_accum, false);
- // perform team scan
- local_accum = member.team_scan(local_accum);
- // add this blocks accum to total accumulation
- auto val = accum + local_accum;
- // user updates their data with total accumulation
- if (ii < loop_bounds.end) lambda(ii, val, true);
- // the last value needs to be propogated to next chunk
- if (team_rank == team_size - 1) accum = val;
- // broadcast last value to rest of the team
- member.team_broadcast(accum, team_size - 1);
- }
-#endif
-}
-
-} // namespace Kokkos
-
-namespace Kokkos {
-/** \brief Intra-thread vector parallel_for. Executes lambda(iType i) for each
- * i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling thread.
- */
-template <typename iType, class Lambda>
-KOKKOS_INLINE_FUNCTION void parallel_for(
- const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda) {
-#pragma omp simd
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
-}
-
-/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
- * ValueType & val) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
- * and a summation of val is performed and put into result.
- */
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ValueType& result) {
- ValueType vector_reduce = ValueType();
-
- if constexpr (std::is_arithmetic<ValueType>::value) {
-#pragma omp simd reduction(+ : vector_reduce)
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- ValueType tmp = ValueType();
- lambda(i, tmp);
- vector_reduce += tmp;
- }
- } else {
-#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
-
-#pragma omp simd reduction(custom : vector_reduce)
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- lambda(i, vector_reduce);
- }
- }
-
- result = vector_reduce;
-}
-
-template <typename iType, class Lambda, typename ReducerType>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
-parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ReducerType const& result) {
- using ValueType = typename ReducerType::value_type;
-
-#pragma omp declare reduction( \
- custom:ValueType \
- : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer( \
- Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
- ValueType vector_reduce;
- Impl::OpenMPTargetReducerWrapper<ReducerType>::init(vector_reduce);
-
-#pragma omp simd reduction(custom : vector_reduce)
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- lambda(i, vector_reduce);
- }
-
- result.reference() = vector_reduce;
-}
-
-/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
- * ValueType & val) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
- * and a reduction of val is performed using JoinType(ValueType& val, const
- * ValueType& update) and put into init_result. The input value of init_result
- * is used as initializer for temporary variables of ValueType. Therefore the
- * input value should be the neutral element with respect to the join operation
- * (e.g. '0 for +-' or '1 for *').
- */
-template <typename iType, class Lambda, typename ValueType, class JoinType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, const JoinType& join, ValueType& init_result) {
- ValueType result = init_result;
-
- // FIXME_OPENMPTARGET think about omp simd
- // join does not work with omp reduction clause
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- ValueType tmp = ValueType();
- lambda(i, tmp);
- join(result, tmp);
- }
-
- init_result = result;
-}
-
-/** \brief Intra-thread vector parallel exclusive prefix sum. Executes
- * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes in the thread and a scan
- * operation is performed. Depending on the target execution space the operator
- * might be called twice: once with final=false and once with final=true. When
- * final==true val contains the prefix sum value. The contribution of this "i"
- * needs to be added to val no matter whether final==true or not. In a serial
- * execution (i.e. team_size==1) the operator is only called once with
- * final==true. Scan_val will be set to the final sum value over all vector
- * lanes.
- */
-template <typename iType, class FunctorType>
-KOKKOS_INLINE_FUNCTION void parallel_scan(
- const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const FunctorType& lambda) {
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- TeamPolicy<Experimental::OpenMPTarget>,
- FunctorType>;
- using value_type = typename Analysis::value_type;
-
- value_type scan_val = value_type();
-
-#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
-#pragma ivdep
-#endif
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; ++i) {
- lambda(i, scan_val, true);
- }
-}
-
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
-#undef KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
-#endif
-
-namespace Kokkos {
-/** \brief Intra-team vector parallel_for. Executes lambda(iType i) for each
- * i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling team.
- */
-template <typename iType, class Lambda>
-KOKKOS_INLINE_FUNCTION void parallel_for(
- const Impl::TeamVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda) {
-#pragma omp for simd nowait schedule(static, 1)
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
-}
-
-/** \brief Intra-team vector parallel_reduce. Executes lambda(iType i,
- * ValueType & val) for each i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all vector lanes of the the calling team
- * and a summation of val is performed and put into result.
- */
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::TeamVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ValueType& result) {
- // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
- // elements in the array <= 32. For reduction we allocate, 16 bytes per
- // element in the scratch space, hence, 16*32 = 512.
- static_assert(sizeof(ValueType) <=
- Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
-
- ValueType* TeamVector_scratch =
- static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
-
-#pragma omp barrier
- TeamVector_scratch[0] = ValueType();
-#pragma omp barrier
-
- if constexpr (std::is_arithmetic<ValueType>::value) {
-#pragma omp for simd reduction(+ : TeamVector_scratch[:1])
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- ValueType tmp = ValueType();
- lambda(i, tmp);
- TeamVector_scratch[0] += tmp;
- }
- } else {
-#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
-
-#pragma omp for simd reduction(custom : TeamVector_scratch[:1])
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- ValueType tmp = ValueType();
- lambda(i, tmp);
- TeamVector_scratch[0] += tmp;
- }
- }
-
- result = TeamVector_scratch[0];
-}
-
-#if !defined(KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND)
-template <typename iType, class Lambda, typename ReducerType>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
-parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ReducerType const& result) {
- using ValueType = typename ReducerType::value_type;
-
- // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
- // elements in the array <= 32. For reduction we allocate, 16 bytes per
- // element in the scratch space, hence, 16*32 = 512.
- static_assert(sizeof(ValueType) <=
- Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
-
-#pragma omp declare reduction( \
- custom:ValueType \
- : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer( \
- Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
- ValueType* TeamVector_scratch =
- static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
-
-#pragma omp barrier
- Impl::OpenMPTargetReducerWrapper<ReducerType>::init(TeamVector_scratch[0]);
-#pragma omp barrier
-
-#pragma omp for simd reduction(custom : TeamVector_scratch[:1])
- for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
- lambda(i, TeamVector_scratch[0]);
- }
-
- result.reference() = TeamVector_scratch[0];
-}
-#else
-template <typename iType, class Lambda, typename ReducerType>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
-parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
- iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
- const Lambda& lambda, ReducerType const& result) {
- using ValueType = typename ReducerType::value_type;
-
- // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
- // elements in the array <= 32. For reduction we allocate, 16 bytes per
- // element in the scratch space, hence, 16*32 = 512.
- static_assert(sizeof(ValueType) <=
- Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
-
- ValueType* TeamVector_scratch =
- static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
-
-#pragma omp declare reduction( \
- omp_red_teamthread_reducer:ValueType \
- : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer( \
- Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp barrier
- ValueType tmp;
- result.init(tmp);
- TeamVector_scratch[0] = tmp;
-#pragma omp barrier
-
- iType team_size = iType(omp_get_num_threads());
-#pragma omp for simd reduction(omp_red_teamthread_reducer \
- : TeamVector_scratch[:1]) schedule(static, 1)
- for (iType t = 0; t < team_size; t++) {
- ValueType tmp2;
- result.init(tmp2);
-
- for (iType i = loop_boundaries.start + t; i < loop_boundaries.end;
- i += team_size) {
- lambda(i, tmp2);
- }
- TeamVector_scratch[0] = tmp2;
- }
-
- result.reference() = TeamVector_scratch[0];
-}
-#endif // KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
-} // namespace Kokkos
-
-#ifdef KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
-#undef KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
-#endif
-
-namespace Kokkos {
-
-template <class FunctorType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>&
- /*single_struct*/,
- const FunctorType& lambda) {
- lambda();
-}
-
-template <class FunctorType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>&
- single_struct,
- const FunctorType& lambda) {
- if (single_struct.team_member.team_rank() == 0) lambda();
-}
-
-template <class FunctorType, class ValueType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>&
- /*single_struct*/,
- const FunctorType& lambda, ValueType& val) {
- lambda(val);
-}
-
-template <class FunctorType, class ValueType>
-KOKKOS_INLINE_FUNCTION void single(
- const Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>&
- single_struct,
- const FunctorType& lambda, ValueType& val) {
- if (single_struct.team_member.team_rank() == 0) {
- lambda(val);
- }
- single_struct.team_member.team_broadcast(val, 0);
-}
-} // namespace Kokkos
-
-#endif /* #ifndef KOKKOS_OPENMPTARGETEXEC_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMPTARGET_INSTANCE_HPP
-#define KOKKOS_OPENMPTARGET_INSTANCE_HPP
-
-#include <Kokkos_Core.hpp>
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-enum class openmp_fence_is_static { yes, no };
-
-class OpenMPTargetInternal {
- private:
- OpenMPTargetInternal() = default;
- OpenMPTargetInternal(const OpenMPTargetInternal&) = default;
- OpenMPTargetInternal& operator=(const OpenMPTargetInternal&) = default;
-
- public:
- void fence(openmp_fence_is_static is_static = openmp_fence_is_static::no);
- void fence(const std::string& name,
- openmp_fence_is_static is_static = openmp_fence_is_static::no);
-
- /** \brief Return the maximum amount of concurrency. */
- int concurrency();
-
- //! Print configuration information to the given output stream.
- void print_configuration(std::ostream& os, bool verbose) const;
-
- static const char* name();
-
- //! Free any resources being consumed by the device.
- void impl_finalize();
-
- //! Has been initialized
- int impl_is_initialized();
- uint32_t impl_get_instance_id() const noexcept;
- //! Initialize, telling the CUDA run-time library which device to use.
- void impl_initialize();
-
- static OpenMPTargetInternal* impl_singleton();
-
- private:
- bool m_is_initialized = false;
- uint32_t m_instance_id = Kokkos::Tools::Experimental::Impl::idForInstance<
- Kokkos::Experimental::OpenMPTarget>(reinterpret_cast<uintptr_t>(this));
-};
-} // Namespace Impl
-} // Namespace Experimental
-} // Namespace Kokkos
-
-#endif // KOKKOS_OPENMPTARGET_INSTANCE_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMPTARGET_PARALLEL_HPP
-#define KOKKOS_OPENMPTARGET_PARALLEL_HPP
-
-#include <omp.h>
-#include <sstream>
-#include <Kokkos_Parallel.hpp>
-#include <OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::OpenMPTarget> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- public:
- void execute() const { execute_impl<WorkTag>(); }
-
- template <class TagType>
- void execute_impl() const {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- const auto begin = m_policy.begin();
- const auto end = m_policy.end();
-
- if (end <= begin) return;
-
- FunctorType a_functor(m_functor);
-
-#pragma omp target teams distribute parallel for map(to : a_functor)
- for (auto i = begin; i < end; ++i) {
- if constexpr (std::is_void<TagType>::value) {
- a_functor(i);
- } else {
- a_functor(TagType(), i);
- }
- }
- }
-
- ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-// This class has the memcpy routine that is commonly used by ParallelReduce
-// over RangePolicy and TeamPolicy.
-template <class PointerType>
-struct ParallelReduceCommon {
- // Copy the result back to device if the view is on the device.
- static void memcpy_result(PointerType dest, PointerType src, size_t size,
- bool ptr_on_device) {
- if (ptr_on_device) {
- OMPT_SAFE_CALL(omp_target_memcpy(dest, src, size, 0, 0,
- omp_get_default_device(),
- omp_get_initial_device()));
- } else {
- *dest = *src;
- }
- }
-};
-
-template <class FunctorType, class PolicyType, class ReducerType,
- class PointerType, class ValueType>
-struct ParallelReduceSpecialize {
- inline static void execute(const FunctorType& /*f*/, const PolicyType& /*p*/,
- PointerType /*result_ptr*/) {
- constexpr int FunctorHasJoin =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
- FunctorType>::has_join_member_function;
- constexpr int UseReducerType = is_reducer<ReducerType>::value;
-
- std::stringstream error_message;
- error_message << "Error: Invalid Specialization " << FunctorHasJoin << ' '
- << UseReducerType << '\n';
- // FIXME_OPENMPTARGET
- OpenMPTarget_abort(error_message.str().c_str());
- }
-};
-
-template <class FunctorType, class ReducerType, class PointerType,
- class ValueType, class... PolicyArgs>
-struct ParallelReduceSpecialize<FunctorType, Kokkos::RangePolicy<PolicyArgs...>,
- ReducerType, PointerType, ValueType> {
- using PolicyType = Kokkos::RangePolicy<PolicyArgs...>;
- using TagType = typename PolicyType::work_tag;
- using ReducerTypeFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- PolicyType, ReducerTypeFwd>;
- using ReferenceType = typename Analysis::reference_type;
-
- using ParReduceCommon = ParallelReduceCommon<PointerType>;
-
- static void execute_reducer(const FunctorType& f, const PolicyType& p,
- PointerType result_ptr, bool ptr_on_device) {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- const auto begin = p.begin();
- const auto end = p.end();
-
- ValueType result;
- OpenMPTargetReducerWrapper<ReducerType>::init(result);
-
- // Initialize and copy back the result even if it is a zero length
- // reduction.
- if (end <= begin) {
- ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
- ptr_on_device);
- return;
- }
-
-#pragma omp declare reduction( \
- custom:ValueType \
- : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp target teams distribute parallel for map(to \
- : f) reduction(custom \
- : result)
- for (auto i = begin; i < end; ++i) {
- if constexpr (std::is_void<TagType>::value) {
- f(i, result);
- } else {
- f(TagType(), i, result);
- }
- }
-
- ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
- ptr_on_device);
- }
-
- template <class TagType, int NumReductions>
- static void execute_array(const FunctorType& f, const PolicyType& p,
- PointerType result_ptr, bool ptr_on_device) {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- const auto begin = p.begin();
- const auto end = p.end();
-
- // Enter the loop if the reduction is on a scalar type.
- if constexpr (NumReductions == 1) {
- ValueType result = ValueType();
-
- // Initialize and copy back the result even if it is a zero length
- // reduction.
- if (end <= begin) {
- ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
- ptr_on_device);
- return;
- }
- // Case where reduction is on a native data type.
- if constexpr (std::is_arithmetic<ValueType>::value) {
-#pragma omp target teams distribute parallel for \
- map(to:f) reduction(+: result)
- for (auto i = begin; i < end; ++i)
-
- if constexpr (std::is_void<TagType>::value) {
- f(i, result);
- } else {
- f(TagType(), i, result);
- }
- } else {
-#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
-#pragma omp target teams distribute parallel for map(to \
- : f) reduction(custom \
- : result)
- for (auto i = begin; i < end; ++i)
-
- if constexpr (std::is_void<TagType>::value) {
- f(i, result);
- } else {
- f(TagType(), i, result);
- }
- }
-
- ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
- ptr_on_device);
- } else {
- ValueType result[NumReductions] = {};
-
- // Initialize and copy back the result even if it is a zero length
- // reduction.
- if (end <= begin) {
- ParReduceCommon::memcpy_result(result_ptr, result,
- NumReductions * sizeof(ValueType),
- ptr_on_device);
- return;
- }
-#pragma omp target teams distribute parallel for map(to:f) reduction(+:result[:NumReductions])
- for (auto i = begin; i < end; ++i) {
- if constexpr (std::is_void<TagType>::value) {
- f(i, result);
- } else {
- f(TagType(), i, result);
- }
- }
-
- ParReduceCommon::memcpy_result(
- result_ptr, result, NumReductions * sizeof(ValueType), ptr_on_device);
- }
- }
-
- static void execute_init_join(const FunctorType& f, const PolicyType& p,
- PointerType ptr, const bool ptr_on_device) {
- const auto begin = p.begin();
- const auto end = p.end();
-
- using FunctorAnalysis =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
- FunctorType>;
- constexpr int HasInit = FunctorAnalysis::has_init_member_function;
-
- // Initialize the result pointer.
-
- const auto size = end - begin;
-
- // FIXME_OPENMPTARGET: The team size and MAX_ACTIVE_THREADS are currently
- // based on NVIDIA-V100 and should be modifid to be based on the
- // architecture in the future.
- const int max_team_threads = 32;
- const int max_teams =
- OpenMPTargetExec::MAX_ACTIVE_THREADS / max_team_threads;
- // Number of elements in the reduction
- const auto value_count = FunctorAnalysis::value_count(f);
-
- // Allocate scratch per active thread. Achieved by setting the first
- // parameter of `resize_scratch=1`.
- OpenMPTargetExec::resize_scratch(1, 0, value_count * sizeof(ValueType),
- std::numeric_limits<int64_t>::max());
- ValueType* scratch_ptr =
- static_cast<ValueType*>(OpenMPTargetExec::get_scratch_ptr());
-
-#pragma omp target map(to : f) is_device_ptr(scratch_ptr)
- {
- typename FunctorAnalysis::Reducer final_reducer(&f);
- // Enter this loop if the functor has an `init`
- if constexpr (HasInit) {
- // The `init` routine needs to be called on the device since it might
- // need device members.
- final_reducer.init(scratch_ptr);
- final_reducer.final(scratch_ptr);
- } else {
- for (int i = 0; i < value_count; ++i) {
- static_cast<ValueType*>(scratch_ptr)[i] = ValueType();
- }
-
- final_reducer.final(scratch_ptr);
- }
- }
-
- if (end <= begin) {
- // If there is no work to be done, copy back the initialized values and
- // exit.
- if (!ptr_on_device)
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_initial_device(), omp_get_default_device()));
- else
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_default_device(), omp_get_default_device()));
-
- return;
- }
-
-#pragma omp target teams num_teams(max_teams) thread_limit(max_team_threads) \
- map(to \
- : f) is_device_ptr(scratch_ptr)
- {
- typename FunctorAnalysis::Reducer final_reducer(&f);
-#pragma omp parallel
- {
- const int team_num = omp_get_team_num();
- const int num_teams = omp_get_num_teams();
- const auto chunk_size = size / num_teams;
- const auto team_begin = begin + team_num * chunk_size;
- const auto team_end =
- (team_num == num_teams - 1) ? end : (team_begin + chunk_size);
- ValueType* team_scratch =
- scratch_ptr + team_num * max_team_threads * value_count;
- ReferenceType result = final_reducer.init(
- &team_scratch[omp_get_thread_num() * value_count]);
-
- // Accumulate partial results in thread specific storage.
-#pragma omp for simd
- for (auto i = team_begin; i < team_end; ++i) {
- if constexpr (std::is_void<TagType>::value) {
- f(i, result);
- } else {
- f(TagType(), i, result);
- }
- }
-
- // Reduce all paritial results within a team.
- const int team_size = max_team_threads;
- int tree_neighbor_offset = 1;
- do {
-#pragma omp for simd
- for (int i = 0; i < team_size - tree_neighbor_offset;
- i += 2 * tree_neighbor_offset) {
- const int neighbor = i + tree_neighbor_offset;
- final_reducer.join(&team_scratch[i * value_count],
- &team_scratch[neighbor * value_count]);
- }
- tree_neighbor_offset *= 2;
- } while (tree_neighbor_offset < team_size);
- } // end parallel
- } // end target
-
- int tree_neighbor_offset = 1;
- do {
-#pragma omp target teams distribute parallel for simd map(to \
- : f) \
- is_device_ptr(scratch_ptr)
- for (int i = 0; i < max_teams - tree_neighbor_offset;
- i += 2 * tree_neighbor_offset) {
- typename FunctorAnalysis::Reducer final_reducer(&f);
- ValueType* team_scratch = scratch_ptr;
- const int team_offset = max_team_threads * value_count;
- final_reducer.join(
- &team_scratch[i * team_offset],
- &team_scratch[(i + tree_neighbor_offset) * team_offset]);
-
- // If `final` is provided by the functor.
- // Do the final only once at the end.
- if (tree_neighbor_offset * 2 >= max_teams && omp_get_team_num() == 0 &&
- omp_get_thread_num() == 0) {
- final_reducer.final(scratch_ptr);
- }
- }
- tree_neighbor_offset *= 2;
- } while (tree_neighbor_offset < max_teams);
-
- // If the result view is on the host, copy back the values via memcpy.
- if (!ptr_on_device)
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_initial_device(), omp_get_default_device()));
- else
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_default_device(), omp_get_default_device()));
- }
-};
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::OpenMPTarget> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
-
- using ReducerTypeFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- static constexpr int HasJoin =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, Policy,
- FunctorType>::has_join_member_function;
- static constexpr int UseReducer = is_reducer<ReducerType>::value;
- static constexpr int IsArray = std::is_pointer<reference_type>::value;
-
- using ParReduceSpecialize =
- ParallelReduceSpecialize<FunctorType, Policy, ReducerType, pointer_type,
- typename Analysis::value_type>;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- bool m_result_ptr_on_device;
- const int m_result_ptr_num_elems;
- using TagType = typename Policy::work_tag;
-
- public:
- void execute() const {
- if constexpr (HasJoin) {
- // Enter this loop if the Functor has a init-join.
- ParReduceSpecialize::execute_init_join(m_functor, m_policy, m_result_ptr,
- m_result_ptr_on_device);
- } else if constexpr (UseReducer) {
- // Enter this loop if the Functor is a reducer type.
- ParReduceSpecialize::execute_reducer(m_functor, m_policy, m_result_ptr,
- m_result_ptr_on_device);
- } else if constexpr (IsArray) {
- // Enter this loop if the reduction is on an array and the routine is
- // templated over the size of the array.
- if (m_result_ptr_num_elems <= 2) {
- ParReduceSpecialize::template execute_array<TagType, 2>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 4) {
- ParReduceSpecialize::template execute_array<TagType, 4>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 8) {
- ParReduceSpecialize::template execute_array<TagType, 8>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 16) {
- ParReduceSpecialize::template execute_array<TagType, 16>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 32) {
- ParReduceSpecialize::template execute_array<TagType, 32>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else {
- Kokkos::abort("array reduction length must be <= 32");
- }
- } else {
- // This loop handles the basic scalar reduction.
- ParReduceSpecialize::template execute_array<TagType, 1>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- }
- }
-
- template <class ViewType>
- ParallelReduce(const FunctorType& arg_functor, Policy& arg_policy,
- const ViewType& arg_result_view,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result_view.data()),
- m_result_ptr_on_device(
- MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
- typename ViewType::memory_space>::accessible),
- m_result_ptr_num_elems(arg_result_view.size()) {}
-
- ParallelReduce(const FunctorType& arg_functor, Policy& arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_on_device(
- MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_result_ptr_num_elems(reducer.view().size()) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::OpenMPTarget> {
- protected:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
- using idx_type = typename Policy::index_type;
-
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- Policy, FunctorType>;
-
- using value_type = typename Analysis::value_type;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- template <class TagType>
- std::enable_if_t<std::is_void<TagType>::value> call_with_tag(
- const FunctorType& f, const idx_type& idx, value_type& val,
- const bool& is_final) const {
- f(idx, val, is_final);
- }
- template <class TagType>
- std::enable_if_t<!std::is_void<TagType>::value> call_with_tag(
- const FunctorType& f, const idx_type& idx, value_type& val,
- const bool& is_final) const {
- f(WorkTag(), idx, val, is_final);
- }
-
- public:
- void impl_execute(
- Kokkos::View<value_type**, Kokkos::LayoutRight,
- Kokkos::Experimental::OpenMPTargetSpace>
- element_values,
- Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
- chunk_values,
- Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count)
- const {
- const idx_type N = m_policy.end() - m_policy.begin();
- const idx_type chunk_size = 128;
- const idx_type n_chunks = (N + chunk_size - 1) / chunk_size;
- idx_type nteams = n_chunks > 512 ? 512 : n_chunks;
- idx_type team_size = 128;
-
- FunctorType a_functor(m_functor);
-#pragma omp target teams distribute map(to \
- : a_functor) num_teams(nteams) \
- thread_limit(team_size)
- for (idx_type team_id = 0; team_id < n_chunks; ++team_id) {
- typename Analysis::Reducer final_reducer(&a_functor);
-#pragma omp parallel num_threads(team_size)
- {
- const idx_type local_offset = team_id * chunk_size;
-
-#pragma omp for
- for (idx_type i = 0; i < chunk_size; ++i) {
- const idx_type idx = local_offset + i;
- value_type val;
- final_reducer.init(&val);
- if (idx < N) call_with_tag<WorkTag>(a_functor, idx, val, false);
- element_values(team_id, i) = val;
- }
-#pragma omp barrier
- if (omp_get_thread_num() == 0) {
- value_type sum;
- final_reducer.init(&sum);
- for (idx_type i = 0; i < chunk_size; ++i) {
- final_reducer.join(&sum, &element_values(team_id, i));
- element_values(team_id, i) = sum;
- }
- chunk_values(team_id) = sum;
- }
-#pragma omp barrier
- if (omp_get_thread_num() == 0) {
- if (Kokkos::atomic_fetch_add(&count(), 1) == n_chunks - 1) {
- value_type sum;
- final_reducer.init(&sum);
- for (idx_type i = 0; i < n_chunks; ++i) {
- final_reducer.join(&sum, &chunk_values(i));
- chunk_values(i) = sum;
- }
- }
- }
- }
- }
-
-#pragma omp target teams distribute map(to \
- : a_functor) num_teams(nteams) \
- thread_limit(team_size)
- for (idx_type team_id = 0; team_id < n_chunks; ++team_id) {
- typename Analysis::Reducer final_reducer(&a_functor);
-#pragma omp parallel num_threads(team_size)
- {
- const idx_type local_offset = team_id * chunk_size;
- value_type offset_value;
- if (team_id > 0)
- offset_value = chunk_values(team_id - 1);
- else
- final_reducer.init(&offset_value);
-
-#pragma omp for
- for (idx_type i = 0; i < chunk_size; ++i) {
- const idx_type idx = local_offset + i;
- value_type local_offset_value;
- if (i > 0) {
- local_offset_value = element_values(team_id, i - 1);
- // FIXME_OPENMPTARGET We seem to access memory illegaly on AMD GPUs
-#ifdef KOKKOS_ARCH_VEGA
- if constexpr (Analysis::has_join_member_function) {
- if constexpr (std::is_void_v<WorkTag>)
- a_functor.join(local_offset_value, offset_value);
- else
- a_functor.join(WorkTag{}, local_offset_value, offset_value);
- } else
- local_offset_value += offset_value;
-#else
- final_reducer.join(&local_offset_value, &offset_value);
-#endif
- } else
- local_offset_value = offset_value;
- if (idx < N)
- call_with_tag<WorkTag>(a_functor, idx, local_offset_value, true);
- }
- }
- }
- }
-
- void execute() const {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- const idx_type N = m_policy.end() - m_policy.begin();
- const idx_type chunk_size = 128;
- const idx_type n_chunks = (N + chunk_size - 1) / chunk_size;
-
- // This could be scratch memory per team
- Kokkos::View<value_type**, Kokkos::LayoutRight,
- Kokkos::Experimental::OpenMPTargetSpace>
- element_values("element_values", n_chunks, chunk_size);
- Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
- chunk_values("chunk_values", n_chunks);
- Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count(
- "Count");
-
- impl_execute(element_values, chunk_values, count);
- }
-
- //----------------------------------------
-
- ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-
- //----------------------------------------
-};
-
-template <class FunctorType, class ReturnType, class... Traits>
-class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
- ReturnType, Kokkos::Experimental::OpenMPTarget>
- : public ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::OpenMPTarget> {
- using base_t = ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::OpenMPTarget>;
- using value_type = typename base_t::value_type;
- value_type& m_returnvalue;
-
- public:
- void execute() const {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- const int64_t N = base_t::m_policy.end() - base_t::m_policy.begin();
- const int chunk_size = 128;
- const int64_t n_chunks = (N + chunk_size - 1) / chunk_size;
-
- if (N > 0) {
- // This could be scratch memory per team
- Kokkos::View<value_type**, Kokkos::LayoutRight,
- Kokkos::Experimental::OpenMPTargetSpace>
- element_values("element_values", n_chunks, chunk_size);
- Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
- chunk_values("chunk_values", n_chunks);
- Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count(
- "Count");
-
- base_t::impl_execute(element_values, chunk_values, count);
-
- const int size = base_t::Analysis::value_size(base_t::m_functor);
- DeepCopy<HostSpace, Kokkos::Experimental::OpenMPTargetSpace>(
- &m_returnvalue, chunk_values.data() + (n_chunks - 1), size);
- } else {
- m_returnvalue = 0;
- }
- }
-
- ParallelScanWithTotal(const FunctorType& arg_functor,
- const typename base_t::Policy& arg_policy,
- ReturnType& arg_returnvalue)
- : base_t(arg_functor, arg_policy), m_returnvalue(arg_returnvalue) {}
-};
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Properties>
-class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
- Kokkos::Experimental::OpenMPTarget> {
- private:
- using Policy =
- Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget,
- Properties...>;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const size_t m_shmem_size;
-
- public:
- void execute() const {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- execute_impl<WorkTag>();
- }
-
- private:
- template <class TagType>
- void execute_impl() const {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- const auto league_size = m_policy.league_size();
- const auto team_size = m_policy.team_size();
- const auto vector_length = m_policy.impl_vector_length();
-
- const size_t shmem_size_L0 = m_policy.scratch_size(0, team_size);
- const size_t shmem_size_L1 = m_policy.scratch_size(1, team_size);
- OpenMPTargetExec::resize_scratch(team_size, shmem_size_L0, shmem_size_L1,
- league_size);
-
- void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
- FunctorType a_functor(m_functor);
-
- // FIXME_OPENMPTARGET - If the team_size is not a multiple of 32, the
- // scratch implementation does not work in the Release or RelWithDebugInfo
- // mode but works in the Debug mode.
-
- // Maximum active teams possible.
- int max_active_teams = OpenMPTargetExec::MAX_ACTIVE_THREADS / team_size;
- // nteams should not exceed the maximum in-flight teams possible.
- const auto nteams =
- league_size < max_active_teams ? league_size : max_active_teams;
-
- // If the league size is <=0, do not launch the kernel.
- if (nteams <= 0) return;
-
-// Performing our own scheduling of teams to avoid separation of code between
-// teams-distribute and parallel. Gave a 2x performance boost in test cases with
-// the clang compiler. atomic_compare_exchange can be avoided since the standard
-// guarantees that the number of teams specified in the `num_teams` clause is
-// always less than or equal to the maximum concurrently running teams.
-#pragma omp target teams num_teams(nteams) thread_limit(team_size) \
- map(to \
- : a_functor) is_device_ptr(scratch_ptr)
-#pragma omp parallel
- {
- const int blockIdx = omp_get_team_num();
- const int gridDim = omp_get_num_teams();
-
- // Iterate through the number of teams until league_size and assign the
- // league_id accordingly
- // Guarantee that the compilers respect the `num_teams` clause
- if (gridDim <= nteams) {
- for (int league_id = blockIdx; league_id < league_size;
- league_id += gridDim) {
- typename Policy::member_type team(
- league_id, league_size, team_size, vector_length, scratch_ptr,
- blockIdx, shmem_size_L0, shmem_size_L1);
- if constexpr (std::is_void<TagType>::value)
- m_functor(team);
- else
- m_functor(TagType(), team);
- }
- } else
- Kokkos::abort("`num_teams` clause was not respected.\n");
- }
- }
-
- public:
- ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())) {}
-};
-
-template <class FunctorType, class ReducerType, class PointerType,
- class ValueType, class... PolicyArgs>
-struct ParallelReduceSpecialize<FunctorType, TeamPolicyInternal<PolicyArgs...>,
- ReducerType, PointerType, ValueType> {
- using PolicyType = TeamPolicyInternal<PolicyArgs...>;
- using TagType = typename PolicyType::work_tag;
- using ReducerTypeFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- PolicyType, ReducerTypeFwd>;
-
- using ReferenceType = typename Analysis::reference_type;
-
- using ParReduceCommon = ParallelReduceCommon<PointerType>;
-
- static void execute_reducer(const FunctorType& f, const PolicyType& p,
- PointerType result_ptr, bool ptr_on_device) {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
-
- const int league_size = p.league_size();
- const int team_size = p.team_size();
- const int vector_length = p.impl_vector_length();
-
- const size_t shmem_size_L0 = p.scratch_size(0, team_size);
- const size_t shmem_size_L1 = p.scratch_size(1, team_size);
- OpenMPTargetExec::resize_scratch(PolicyType::member_type::TEAM_REDUCE_SIZE,
- shmem_size_L0, shmem_size_L1, league_size);
- void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
-
- ValueType result = ValueType();
-
- // Maximum active teams possible.
- int max_active_teams = OpenMPTargetExec::MAX_ACTIVE_THREADS / team_size;
- const auto nteams =
- league_size < max_active_teams ? league_size : max_active_teams;
-
- // If the league size is <=0, do not launch the kernel.
- if (nteams <= 0) return;
-
-#pragma omp declare reduction( \
- custom:ValueType \
- : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to \
- : f) \
- is_device_ptr(scratch_ptr) reduction(custom \
- : result)
-#pragma omp parallel reduction(custom : result)
- {
- const int blockIdx = omp_get_team_num();
- const int gridDim = omp_get_num_teams();
-
- // Guarantee that the compilers respect the `num_teams` clause
- if (gridDim <= nteams) {
- for (int league_id = blockIdx; league_id < league_size;
- league_id += gridDim) {
- typename PolicyType::member_type team(
- league_id, league_size, team_size, vector_length, scratch_ptr,
- blockIdx, shmem_size_L0, shmem_size_L1);
- if constexpr (std::is_void<TagType>::value)
- f(team, result);
- else
- f(TagType(), team, result);
- }
- } else
- Kokkos::abort("`num_teams` clause was not respected.\n");
- }
-
- // Copy results back to device if `parallel_reduce` is on a device view.
- ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
- ptr_on_device);
- }
-
- template <int NumReductions>
- static void execute_array(const FunctorType& f, const PolicyType& p,
- PointerType result_ptr, bool ptr_on_device) {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
-
- const int league_size = p.league_size();
- const int team_size = p.team_size();
- const int vector_length = p.impl_vector_length();
-
- const size_t shmem_size_L0 = p.scratch_size(0, team_size);
- const size_t shmem_size_L1 = p.scratch_size(1, team_size);
- OpenMPTargetExec::resize_scratch(PolicyType::member_type::TEAM_REDUCE_SIZE,
- shmem_size_L0, shmem_size_L1, league_size);
- void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
-
- // Maximum active teams possible.
- int max_active_teams = OpenMPTargetExec::MAX_ACTIVE_THREADS / team_size;
- const auto nteams =
- league_size < max_active_teams ? league_size : max_active_teams;
-
- // If the league size is <=0, do not launch the kernel.
- if (nteams <= 0) return;
-
- // Case where the number of reduction items is 1.
- if constexpr (NumReductions == 1) {
- ValueType result = ValueType();
-
- // Case where reduction is on a native data type.
- if constexpr (std::is_arithmetic<ValueType>::value) {
-#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to \
- : f) \
- is_device_ptr(scratch_ptr) reduction(+: result)
-#pragma omp parallel reduction(+ : result)
- {
- const int blockIdx = omp_get_team_num();
- const int gridDim = omp_get_num_teams();
-
- // Guarantee that the compilers respect the `num_teams` clause
- if (gridDim <= nteams) {
- for (int league_id = blockIdx; league_id < league_size;
- league_id += gridDim) {
- typename PolicyType::member_type team(
- league_id, league_size, team_size, vector_length, scratch_ptr,
- blockIdx, shmem_size_L0, shmem_size_L1);
- if constexpr (std::is_void<TagType>::value)
- f(team, result);
- else
- f(TagType(), team, result);
- }
- } else
- Kokkos::abort("`num_teams` clause was not respected.\n");
- }
- } else {
- // Case where the reduction is on a non-native data type.
-#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
-#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to \
- : f) \
- is_device_ptr(scratch_ptr) reduction(custom \
- : result)
-#pragma omp parallel reduction(custom : result)
- {
- const int blockIdx = omp_get_team_num();
- const int gridDim = omp_get_num_teams();
-
- // Guarantee that the compilers respect the `num_teams` clause
- if (gridDim <= nteams) {
- for (int league_id = blockIdx; league_id < league_size;
- league_id += gridDim) {
- typename PolicyType::member_type team(
- league_id, league_size, team_size, vector_length, scratch_ptr,
- blockIdx, shmem_size_L0, shmem_size_L1);
- if constexpr (std::is_void<TagType>::value)
- f(team, result);
- else
- f(TagType(), team, result);
- }
- } else
- Kokkos::abort("`num_teams` clause was not respected.\n");
- }
- }
-
- // Copy results back to device if `parallel_reduce` is on a device view.
- ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
- ptr_on_device);
- } else {
- ValueType result[NumReductions] = {};
- // Case where the reduction is on an array.
-#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to \
- : f) \
- is_device_ptr(scratch_ptr) reduction(+ : result[:NumReductions])
-#pragma omp parallel reduction(+ : result[:NumReductions])
- {
- const int blockIdx = omp_get_team_num();
- const int gridDim = omp_get_num_teams();
-
- // Guarantee that the compilers respect the `num_teams` clause
- if (gridDim <= nteams) {
- for (int league_id = blockIdx; league_id < league_size;
- league_id += gridDim) {
- typename PolicyType::member_type team(
- league_id, league_size, team_size, vector_length, scratch_ptr,
- blockIdx, shmem_size_L0, shmem_size_L1);
- if constexpr (std::is_void<TagType>::value)
- f(team, result);
- else
- f(TagType(), team, result);
- }
- } else
- Kokkos::abort("`num_teams` clause was not respected.\n");
- }
-
- // Copy results back to device if `parallel_reduce` is on a device view.
- ParReduceCommon::memcpy_result(
- result_ptr, result, NumReductions * sizeof(ValueType), ptr_on_device);
- }
- }
-
- // FIXME_OPENMPTARGET : This routine is a copy from `parallel_reduce` over
- // RangePolicy. Need a new implementation.
- static void execute_init_join(const FunctorType& f, const PolicyType& p,
- PointerType ptr, const bool ptr_on_device) {
- using FunctorAnalysis =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
- FunctorType>;
- constexpr int HasInit = FunctorAnalysis::has_init_member_function;
-
- const int league_size = p.league_size();
- const int team_size = p.team_size();
- const int vector_length = p.impl_vector_length();
-
- auto begin = 0;
- auto end = league_size * team_size + team_size * vector_length;
-
- const size_t shmem_size_L0 = p.scratch_size(0, team_size);
- const size_t shmem_size_L1 = p.scratch_size(1, team_size);
-
- // FIXME_OPENMPTARGET: This would oversubscribe scratch memory since we are
- // already using the available scratch memory to create temporaries for each
- // thread.
- if ((shmem_size_L0 + shmem_size_L1) > 0) {
- Kokkos::abort(
- "OpenMPTarget: Scratch memory is not supported in `parallel_reduce` "
- "over functors with init/join.");
- }
-
- const auto nteams = league_size;
-
- // Number of elements in the reduction
- const auto value_count = FunctorAnalysis::value_count(f);
-
- // Allocate scratch per active thread.
- OpenMPTargetExec::resize_scratch(1, 0, value_count * sizeof(ValueType),
- league_size);
- void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
-
- // Enter this loop if the functor has an `init`
- if constexpr (HasInit) {
- // The `init` routine needs to be called on the device since it might need
- // device members.
-#pragma omp target map(to : f) is_device_ptr(scratch_ptr)
- {
- typename FunctorAnalysis::Reducer final_reducer(&f);
- final_reducer.init(scratch_ptr);
- final_reducer.final(scratch_ptr);
- }
- } else {
-#pragma omp target map(to : f) is_device_ptr(scratch_ptr)
- {
- for (int i = 0; i < value_count; ++i) {
- static_cast<ValueType*>(scratch_ptr)[i] = ValueType();
- }
-
- typename FunctorAnalysis::Reducer final_reducer(&f);
- final_reducer.final(static_cast<ValueType*>(scratch_ptr));
- }
- }
-
- if (end <= begin) {
- // If there is no work to be done, copy back the initialized values and
- // exit.
- if (!ptr_on_device)
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_initial_device(), omp_get_default_device()));
- else
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_default_device(), omp_get_default_device()));
-
- return;
- }
-
-#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to \
- : f) \
- is_device_ptr(scratch_ptr)
- {
-#pragma omp parallel
- {
- const int team_num = omp_get_team_num();
- const int num_teams = omp_get_num_teams();
- ValueType* team_scratch = static_cast<ValueType*>(scratch_ptr) +
- team_num * team_size * value_count;
- typename FunctorAnalysis::Reducer final_reducer(&f);
- ReferenceType result = final_reducer.init(&team_scratch[0]);
-
- for (int league_id = team_num; league_id < league_size;
- league_id += num_teams) {
- typename PolicyType::member_type team(
- league_id, league_size, team_size, vector_length, scratch_ptr,
- team_num, shmem_size_L0, shmem_size_L1);
- if constexpr (std::is_void<TagType>::value) {
- f(team, result);
- } else {
- f(TagType(), team, result);
- }
- }
- } // end parallel
- } // end target
-
- int tree_neighbor_offset = 1;
- do {
-#pragma omp target teams distribute parallel for simd map(to \
- : f) \
- is_device_ptr(scratch_ptr)
- for (int i = 0; i < nteams - tree_neighbor_offset;
- i += 2 * tree_neighbor_offset) {
- ValueType* team_scratch = static_cast<ValueType*>(scratch_ptr);
- const int team_offset = team_size * value_count;
- typename FunctorAnalysis::Reducer final_reducer(&f);
- final_reducer.join(
- &team_scratch[i * team_offset],
- &team_scratch[(i + tree_neighbor_offset) * team_offset]);
-
- // If `final` is provided by the functor.
- // Do the final only once at the end.
- if (tree_neighbor_offset * 2 >= nteams && omp_get_team_num() == 0 &&
- omp_get_thread_num() == 0) {
- final_reducer.final(scratch_ptr);
- }
- }
- tree_neighbor_offset *= 2;
- } while (tree_neighbor_offset < nteams);
-
- // If the result view is on the host, copy back the values via memcpy.
- if (!ptr_on_device)
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_initial_device(), omp_get_default_device()));
- else
- OMPT_SAFE_CALL(omp_target_memcpy(
- ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
- omp_get_default_device(), omp_get_default_device()));
- }
-};
-
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Experimental::OpenMPTarget> {
- private:
- using Policy =
- Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget,
- Properties...>;
-
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
- using ReducerTypeFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using WorkTagFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
- void>;
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
- using value_type = typename Analysis::value_type;
-
- bool m_result_ptr_on_device;
- const int m_result_ptr_num_elems;
-
- static constexpr int HasJoin =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, Policy,
- FunctorType>::has_join_member_function;
- static constexpr int UseReducer = is_reducer<ReducerType>::value;
- static constexpr int IsArray = std::is_pointer<reference_type>::value;
-
- using ParReduceSpecialize =
- ParallelReduceSpecialize<FunctorType, Policy, ReducerType, pointer_type,
- typename Analysis::value_type>;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const size_t m_shmem_size;
-
- public:
- void execute() const {
- if constexpr (HasJoin) {
- ParReduceSpecialize::execute_init_join(m_functor, m_policy, m_result_ptr,
- m_result_ptr_on_device);
- } else if constexpr (UseReducer) {
- ParReduceSpecialize::execute_reducer(m_functor, m_policy, m_result_ptr,
- m_result_ptr_on_device);
- } else if constexpr (IsArray) {
- if (m_result_ptr_num_elems <= 2) {
- ParReduceSpecialize::template execute_array<2>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 4) {
- ParReduceSpecialize::template execute_array<4>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 8) {
- ParReduceSpecialize::template execute_array<8>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 16) {
- ParReduceSpecialize::template execute_array<16>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else if (m_result_ptr_num_elems <= 32) {
- ParReduceSpecialize::template execute_array<32>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- } else {
- Kokkos::abort("array reduction length must be <= 32");
- }
- } else {
- ParReduceSpecialize::template execute_array<1>(
- m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
- }
- }
-
- template <class ViewType>
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_result_ptr_on_device(
- MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
- typename ViewType::memory_space>::accessible),
- m_result_ptr_num_elems(arg_result.size()),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())) {}
-
- ParallelReduce(const FunctorType& arg_functor, Policy& arg_policy,
- const ReducerType& reducer)
- : m_result_ptr_on_device(
- MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_result_ptr_num_elems(reducer.view().size()),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, arg_policy.team_size())) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-template <typename iType>
-struct TeamThreadRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
- using index_type = iType;
- const iType start;
- const iType end;
- const OpenMPTargetExecTeamMember& team;
-
- TeamThreadRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
- iType count)
- : start(0), end(count), team(thread_) {}
- TeamThreadRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
- iType begin_, iType end_)
- : start(begin_), end(end_), team(thread_) {}
-};
-
-template <typename iType>
-struct ThreadVectorRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
- using index_type = iType;
- const index_type start;
- const index_type end;
- const OpenMPTargetExecTeamMember& team;
-
- ThreadVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
- index_type count)
- : start(0), end(count), team(thread_) {}
- ThreadVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
- index_type begin_, index_type end_)
- : start(begin_), end(end_), team(thread_) {}
-};
-
-template <typename iType>
-struct TeamVectorRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
- using index_type = iType;
- const index_type start;
- const index_type end;
- const OpenMPTargetExecTeamMember& team;
-
- TeamVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
- index_type count)
- : start(0), end(count), team(thread_) {}
- TeamVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
- index_type begin_, index_type end_)
- : start(begin_), end(end_), team(thread_) {}
-};
-
-} // namespace Impl
-
-} // namespace Kokkos
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* KOKKOS_OPENMPTARGET_PARALLEL_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMPTARGET_PARALLEL_MDRANGE_HPP
-#define KOKKOS_OPENMPTARGET_PARALLEL_MDRANGE_HPP
-
-#include <omp.h>
-#include <Kokkos_Parallel.hpp>
-#include <OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp>
-
-// WORKAROUND OPENMPTARGET: sometimes tile sizes don't make it correctly,
-// this was tracked down to a bug in clang with regards of mapping structs
-// with arrays of long in it. Arrays of int might be fine though ...
-#define KOKKOS_IMPL_MDRANGE_USE_NO_TILES // undef EOF
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::Experimental::OpenMPTarget> {
- private:
- using Policy = Kokkos::MDRangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
- using Index = typename Policy::index_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- public:
- inline void execute() const {
- OpenMPTargetExec::verify_is_process(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- OpenMPTargetExec::verify_initialized(
- "Kokkos::Experimental::OpenMPTarget parallel_for");
- FunctorType functor(m_functor);
- Policy policy = m_policy;
-
-#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
- typename Policy::point_type unused;
-
- execute_tile<Policy::rank>(unused, functor, policy);
-#else
- const int64_t begin = 0;
- const int64_t end = m_policy.m_num_tiles;
-
-#pragma omp target teams distribute map(to : functor) num_teams(end - begin)
- {
- for (ptrdiff_t tile_idx = begin; tile_idx < end; ++tile_idx) {
-
-#pragma omp parallel
- {
- typename Policy::point_type offset;
- if (Policy::outer_direction == Policy::Left) {
- for (int i = 0; i < Policy::rank; ++i) {
- offset[i] = (tile_idx % policy.m_tile_end[i]) * policy.m_tile[i] +
- policy.m_lower[i];
- tile_idx /= policy.m_tile_end[i];
- }
- } else {
- for (int i = Policy::rank - 1; i >= 0; --i) {
- offset[i] = (tile_idx % policy.m_tile_end[i]) * policy.m_tile[i] +
- policy.m_lower[i];
- tile_idx /= policy.m_tile_end[i];
- }
- }
- execute_tile<Policy::rank>(offset, functor, policy);
- }
- }
- }
-#endif
- }
-
- template <int Rank>
- inline std::enable_if_t<Rank == 2> execute_tile(
- typename Policy::point_type offset, const FunctorType& functor,
- const Policy& policy) const {
-#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
- (void)offset;
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
-
-#pragma omp target teams distribute parallel for collapse(2) map(to : functor)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1);
- else
- functor(typename Policy::work_tag(), i0, i1);
- }
- }
-#else
- const ptrdiff_t begin_0 = offset[0];
- ptrdiff_t end_0 = begin_0 + policy.m_tile[0];
- end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
-
- const ptrdiff_t begin_1 = offset[1];
- ptrdiff_t end_1 = begin_1 + policy.m_tile[1];
- end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
-
-#pragma omp for collapse(2)
- for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
- for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1);
- else
- functor(typename Policy::work_tag(), i0, i1);
- }
-#endif
- }
-
- template <int Rank>
- inline std::enable_if_t<Rank == 3> execute_tile(
- typename Policy::point_type offset, const FunctorType& functor,
- const Policy& policy) const {
-#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
- (void)offset;
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[2];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
-
-#pragma omp target teams distribute parallel for collapse(3) map(to : functor)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, i2);
- else
- functor(typename Policy::work_tag(), i0, i1, i2);
- }
- }
- }
-#else
- const ptrdiff_t begin_0 = offset[0];
- ptrdiff_t end_0 = begin_0 + policy.m_tile[0];
- end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
-
- const ptrdiff_t begin_1 = offset[1];
- ptrdiff_t end_1 = begin_1 + policy.m_tile[1];
- end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
-
- const ptrdiff_t begin_2 = offset[2];
- ptrdiff_t end_2 = begin_2 + policy.m_tile[2];
- end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
-
-#pragma omp for collapse(3)
- for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
- for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
- for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, i2);
- else
- functor(typename Policy::work_tag(), i0, i1, i2);
- }
-#endif
- }
-
- template <int Rank>
- inline std::enable_if_t<Rank == 4> execute_tile(
- typename Policy::point_type offset, const FunctorType& functor,
- const Policy& policy) const {
-#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
- (void)offset;
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[2];
- const Index begin_3 = policy.m_lower[3];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
- const Index end_3 = policy.m_upper[3];
-
-#pragma omp target teams distribute parallel for collapse(4) map(to : functor)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, i2, i3);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3);
- }
- }
- }
- }
-#else
- const ptrdiff_t begin_0 = offset[0];
- ptrdiff_t end_0 = begin_0 + policy.m_tile[0];
- end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
-
- const ptrdiff_t begin_1 = offset[1];
- ptrdiff_t end_1 = begin_1 + policy.m_tile[1];
- end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
-
- const ptrdiff_t begin_2 = offset[2];
- ptrdiff_t end_2 = begin_2 + policy.m_tile[2];
- end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
-
- const ptrdiff_t begin_3 = offset[3];
- ptrdiff_t end_3 = begin_3 + policy.m_tile[3];
- end_3 = end_3 < policy.m_upper[3] ? end_3 : policy.m_upper[3];
-
-#pragma omp for collapse(4)
- for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
- for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
- for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2)
- for (ptrdiff_t i3 = begin_3; i3 < end_3; ++i3) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, i2, i3);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3);
- }
-#endif
- }
-
- template <int Rank>
- inline std::enable_if_t<Rank == 5> execute_tile(
- typename Policy::point_type offset, const FunctorType& functor,
- const Policy& policy) const {
-#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
- (void)offset;
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[2];
- const Index begin_3 = policy.m_lower[3];
- const Index begin_4 = policy.m_lower[4];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
- const Index end_3 = policy.m_upper[3];
- const Index end_4 = policy.m_upper[4];
-
-#pragma omp target teams distribute parallel for collapse(5) map(to : functor)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- for (auto i4 = begin_4; i4 < end_4; ++i4) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4);
- }
- }
- }
- }
- }
-#else
- const ptrdiff_t begin_0 = offset[0];
- ptrdiff_t end_0 = begin_0 + policy.m_tile[0];
- end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
-
- const ptrdiff_t begin_1 = offset[1];
- ptrdiff_t end_1 = begin_1 + policy.m_tile[1];
- end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
-
- const ptrdiff_t begin_2 = offset[2];
- ptrdiff_t end_2 = begin_2 + policy.m_tile[2];
- end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
-
- const ptrdiff_t begin_3 = offset[3];
- ptrdiff_t end_3 = begin_3 + policy.m_tile[3];
- end_3 = end_3 < policy.m_upper[3] ? end_3 : policy.m_upper[3];
-
- const ptrdiff_t begin_4 = offset[4];
- ptrdiff_t end_4 = begin_4 + policy.m_tile[4];
- end_4 = end_4 < policy.m_upper[4] ? end_4 : policy.m_upper[4];
-
-#pragma omp for collapse(5)
- for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
- for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
- for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2)
- for (ptrdiff_t i3 = begin_3; i3 < end_3; ++i3)
- for (ptrdiff_t i4 = begin_4; i4 < end_4; ++i4) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4);
- }
-#endif
- }
-
- template <int Rank>
- inline std::enable_if_t<Rank == 6> execute_tile(
- typename Policy::point_type offset, const FunctorType& functor,
- const Policy& policy) const {
-#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
- (void)offset;
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[2];
- const Index begin_3 = policy.m_lower[3];
- const Index begin_4 = policy.m_lower[4];
- const Index begin_5 = policy.m_lower[5];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
- const Index end_3 = policy.m_upper[3];
- const Index end_4 = policy.m_upper[4];
- const Index end_5 = policy.m_upper[5];
-
-#pragma omp target teams distribute parallel for collapse(6) map(to : functor)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- for (auto i4 = begin_4; i4 < end_4; ++i4) {
- for (auto i5 = begin_5; i5 < end_5; ++i5) {
- {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4, i5);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4,
- i5);
- }
- }
- }
- }
- }
- }
- }
-#else
- const ptrdiff_t begin_0 = offset[0];
- ptrdiff_t end_0 = begin_0 + policy.m_tile[0];
- end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
-
- const ptrdiff_t begin_1 = offset[1];
- ptrdiff_t end_1 = begin_1 + policy.m_tile[1];
- end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
-
- const ptrdiff_t begin_2 = offset[2];
- ptrdiff_t end_2 = begin_2 + policy.m_tile[2];
- end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
-
- const ptrdiff_t begin_3 = offset[3];
- ptrdiff_t end_3 = begin_3 + policy.m_tile[3];
- end_3 = end_3 < policy.m_upper[3] ? end_3 : policy.m_upper[3];
-
- const ptrdiff_t begin_4 = offset[4];
- ptrdiff_t end_4 = begin_4 + policy.m_tile[4];
- end_4 = end_4 < policy.m_upper[4] ? end_4 : policy.m_upper[4];
-
- const ptrdiff_t begin_5 = offset[5];
- ptrdiff_t end_5 = begin_5 + policy.m_tile[5];
- end_5 = end_5 < policy.m_upper[5] ? end_5 : policy.m_upper[5];
-
-#pragma omp for collapse(6)
- for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
- for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
- for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2)
- for (ptrdiff_t i3 = begin_3; i3 < end_3; ++i3)
- for (ptrdiff_t i4 = begin_4; i4 < end_4; ++i4)
- for (ptrdiff_t i5 = begin_5; i5 < end_5; ++i5) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4, i5);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4, i5);
- }
-#endif
- }
-
- inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
- // TODO DZP: based on a conversation with Christian, we're using 256 as a
- // heuristic here. We need something better once we can query these kinds of
- // properties
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- return 256;
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::OpenMPTarget> {
- private:
- using Policy = Kokkos::MDRangePolicy<Traits...>;
-
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
- using Index = typename Policy::index_type;
-
- using ReducerConditional =
- std::conditional<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- enum {
- HasJoin =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, Policy,
- FunctorType>::has_join_member_function
- };
- enum { UseReducer = is_reducer<ReducerType>::value };
-
- const pointer_type m_result_ptr;
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
-
- using ParReduceCommon = ParallelReduceCommon<pointer_type>;
-
- bool m_result_ptr_on_device;
-
- public:
- inline void execute() const {
- execute_tile<Policy::rank, typename Analysis::value_type>(
- m_functor, m_policy, m_result_ptr);
- }
-
- template <class ViewType>
- inline ParallelReduce(
- const FunctorType& arg_functor, Policy arg_policy,
- const ViewType& arg_result_view,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = NULL)
- : m_result_ptr(arg_result_view.data()),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr_on_device(
- MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
- typename ViewType::memory_space>::accessible) {}
-
- inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
- const ReducerType& reducer)
- : m_result_ptr(reducer.view().data()),
- m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr_on_device(
- MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible) {}
-
- template <int Rank, class ValueType>
- inline std::enable_if_t<Rank == 2> execute_tile(const FunctorType& functor,
- const Policy& policy,
- pointer_type ptr) const {
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
-
- ValueType result = ValueType();
-
- // FIXME_OPENMPTARGET: Unable to separate directives and their companion
- // loops which leads to code duplication for different reduction types.
- if constexpr (UseReducer) {
-#pragma omp declare reduction( \
- custom:ValueType \
- : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp target teams distribute parallel for collapse(2) map(to \
- : functor) \
- reduction(custom \
- : result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, result);
- else
- functor(typename Policy::work_tag(), i0, i1, result);
- }
- }
- } else {
-#pragma omp target teams distribute parallel for collapse(2) map(to : functor) \
-reduction(+:result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, result);
- else
- functor(typename Policy::work_tag(), i0, i1, result);
- }
- }
- }
-
- ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
- m_result_ptr_on_device);
- }
-
- template <int Rank, class ValueType>
- inline std::enable_if_t<Rank == 3> execute_tile(const FunctorType& functor,
- const Policy& policy,
- pointer_type ptr) const {
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[2];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
-
- ValueType result = ValueType();
-
- // FIXME_OPENMPTARGET: Unable to separate directives and their companion
- // loops which leads to code duplication for different reduction types.
- if constexpr (UseReducer) {
-#pragma omp declare reduction( \
- custom:ValueType \
- : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp target teams distribute parallel for collapse(3) map(to \
- : functor) \
- reduction(custom \
- : result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, i2, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, result);
- }
- }
- }
- } else {
-#pragma omp target teams distribute parallel for collapse(3) map(to : functor) \
-reduction(+:result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- if constexpr (std::is_void<typename Policy::work_tag>::value)
- functor(i0, i1, i2, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, result);
- }
- }
- }
- }
-
- ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
- m_result_ptr_on_device);
- }
-
- template <int Rank, class ValueType>
- inline std::enable_if_t<Rank == 4> execute_tile(const FunctorType& functor,
- const Policy& policy,
- pointer_type ptr) const {
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[3];
- const Index begin_3 = policy.m_lower[2];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
- const Index end_3 = policy.m_upper[3];
-
- ValueType result = ValueType();
-
- // FIXME_OPENMPTARGET: Unable to separate directives and their companion
- // loops which leads to code duplication for different reduction types.
- if constexpr (UseReducer) {
-#pragma omp declare reduction( \
- custom:ValueType \
- : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp target teams distribute parallel for collapse(4) map(to \
- : functor) \
- reduction(custom \
- : result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, result);
- }
- }
- }
- }
- } else {
-#pragma omp target teams distribute parallel for collapse(4) map(to : functor) \
-reduction(+:result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, result);
- }
- }
- }
- }
- }
-
- ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
- m_result_ptr_on_device);
- }
-
- template <int Rank, class ValueType>
- inline std::enable_if_t<Rank == 5> execute_tile(const FunctorType& functor,
- const Policy& policy,
- pointer_type ptr) const {
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[2];
- const Index begin_3 = policy.m_lower[3];
- const Index begin_4 = policy.m_lower[4];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
- const Index end_3 = policy.m_upper[3];
- const Index end_4 = policy.m_upper[4];
-
- ValueType result = ValueType();
-
- // FIXME_OPENMPTARGET: Unable to separate directives and their companion
- // loops which leads to code duplication for different reduction types.
- if constexpr (UseReducer) {
-#pragma omp declare reduction( \
- custom:ValueType \
- : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp target teams distribute parallel for collapse(5) map(to \
- : functor) \
- reduction(custom \
- : result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- for (auto i4 = begin_4; i4 < end_4; ++i4) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4,
- result);
- }
- }
- }
- }
- }
- } else {
-#pragma omp target teams distribute parallel for collapse(5) map(to : functor) \
-reduction(+:result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- for (auto i4 = begin_4; i4 < end_4; ++i4) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4,
- result);
- }
- }
- }
- }
- }
- }
-
- ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
- m_result_ptr_on_device);
- }
-
- template <int Rank, class ValueType>
- inline std::enable_if_t<Rank == 6> execute_tile(const FunctorType& functor,
- const Policy& policy,
- pointer_type ptr) const {
- const Index begin_0 = policy.m_lower[0];
- const Index begin_1 = policy.m_lower[1];
- const Index begin_2 = policy.m_lower[2];
- const Index begin_3 = policy.m_lower[3];
- const Index begin_4 = policy.m_lower[4];
- const Index begin_5 = policy.m_lower[5];
-
- const Index end_0 = policy.m_upper[0];
- const Index end_1 = policy.m_upper[1];
- const Index end_2 = policy.m_upper[2];
- const Index end_3 = policy.m_upper[3];
- const Index end_4 = policy.m_upper[4];
- const Index end_5 = policy.m_upper[5];
-
- ValueType result = ValueType();
-
- // FIXME_OPENMPTARGET: Unable to separate directives and their companion
- // loops which leads to code duplication for different reduction types.
- if constexpr (UseReducer) {
-#pragma omp declare reduction( \
- custom:ValueType \
- : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
- initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
-
-#pragma omp target teams distribute parallel for collapse(6) map(to \
- : functor) \
- reduction(custom \
- : result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- for (auto i4 = begin_4; i4 < end_4; ++i4) {
- for (auto i5 = begin_5; i5 < end_5; ++i5) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4, i5, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4, i5,
- result);
- }
- }
- }
- }
- }
- }
- } else {
-#pragma omp target teams distribute parallel for collapse(6) map(to : functor) \
-reduction(+:result)
- for (auto i0 = begin_0; i0 < end_0; ++i0) {
- for (auto i1 = begin_1; i1 < end_1; ++i1) {
- for (auto i2 = begin_2; i2 < end_2; ++i2) {
- for (auto i3 = begin_3; i3 < end_3; ++i3) {
- for (auto i4 = begin_4; i4 < end_4; ++i4) {
- for (auto i5 = begin_5; i5 < end_5; ++i5) {
- if constexpr (std::is_same<typename Policy::work_tag,
- void>::value)
- functor(i0, i1, i2, i3, i4, i5, result);
- else
- functor(typename Policy::work_tag(), i0, i1, i2, i3, i4, i5,
- result);
- }
- }
- }
- }
- }
- }
- }
-
- ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
- m_result_ptr_on_device);
- }
-
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- return 256;
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-#undef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
-#endif /* KOKKOS_OPENMPTARGET_PARALLEL_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Core.hpp>
-
-#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(KOKKOS_ENABLE_TASKPOLICY)
-
-#include <impl/Kokkos_TaskQueue_impl.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template class TaskQueue<Kokkos::Experimental::OpenMPTarget>;
-
-//----------------------------------------------------------------------------
-
-TaskExec<Kokkos::Experimental::OpenMPTarget>::TaskExec()
- : m_self_exec(0),
- m_team_exec(0),
- m_sync_mask(0),
- m_sync_value(0),
- m_sync_step(0),
- m_group_rank(0),
- m_team_rank(0),
- m_team_size(1) {}
-
-TaskExec<Kokkos::Experimental::OpenMPTarget>::TaskExec(
- Kokkos::Impl::OpenMPTargetExec &arg_exec, int const arg_team_size)
- : m_self_exec(&arg_exec),
- m_team_exec(arg_exec.pool_rev(arg_exec.pool_rank_rev() / arg_team_size)),
- m_sync_mask(0),
- m_sync_value(0),
- m_sync_step(0),
- m_group_rank(arg_exec.pool_rank_rev() / arg_team_size),
- m_team_rank(arg_exec.pool_rank_rev() % arg_team_size),
- m_team_size(arg_team_size) {
- // This team spans
- // m_self_exec->pool_rev( team_size * group_rank )
- // m_self_exec->pool_rev( team_size * ( group_rank + 1 ) - 1 )
-
- int64_t volatile *const sync = (int64_t *)m_self_exec->scratch_reduce();
-
- sync[0] = int64_t(0);
- sync[1] = int64_t(0);
-
- for (int i = 0; i < m_team_size; ++i) {
- m_sync_value |= int64_t(1) << (8 * i);
- m_sync_mask |= int64_t(3) << (8 * i);
- }
-
- Kokkos::memory_fence();
-}
-
-void TaskExec<Kokkos::Experimental::OpenMPTarget>::team_barrier_impl() const {
- if (m_team_exec->scratch_reduce_size() < int(2 * sizeof(int64_t))) {
- Kokkos::abort("TaskQueue<OpenMPTarget> scratch_reduce memory too small");
- }
-
- // Use team shared memory to synchronize.
- // Alternate memory locations between barriers to avoid a sequence
- // of barriers overtaking one another.
-
- int64_t volatile *const sync =
- ((int64_t *)m_team_exec->scratch_reduce()) + (m_sync_step & 0x01);
-
- // This team member sets one byte within the sync variable
- int8_t volatile *const sync_self = ((int8_t *)sync) + m_team_rank;
-
- *sync_self = int8_t(m_sync_value & 0x03); // signal arrival
-
- while (m_sync_value != *sync)
- ; // wait for team to arrive
-
- ++m_sync_step;
-
- if (0 == (0x01 & m_sync_step)) { // Every other step
- m_sync_value ^= m_sync_mask;
- if (1000 < m_sync_step) m_sync_step = 0;
- }
-}
-
-//----------------------------------------------------------------------------
-
-void TaskQueueSpecialization<Kokkos::Experimental::OpenMPTarget>::execute(
- TaskQueue<Kokkos::Experimental::OpenMPTarget> *const queue) {
- using execution_space = Kokkos::Experimental::OpenMPTarget;
- using queue_type = TaskQueue<execution_space>;
- using task_root_type = TaskBase<execution_space, void, void>;
- using PoolExec = Kokkos::Impl::OpenMPTargetExec;
- using Member = TaskExec<execution_space>;
-
- task_root_type *const end = (task_root_type *)task_root_type::EndTag;
-
- // Required: team_size <= 8
-
- const int team_size = PoolExec::pool_size(2); // Threads per core
- // const int team_size = PoolExec::pool_size(1); // Threads per NUMA
-
- if (8 < team_size) {
- Kokkos::abort("TaskQueue<OpenMPTarget> unsupported team size");
- }
-
-#pragma omp parallel
- {
- PoolExec &self = *PoolExec::get_thread_omp();
-
- Member single_exec;
- Member team_exec(self, team_size);
-
- // Team shared memory
- task_root_type *volatile *const task_shared =
- (task_root_type **)team_exec.m_team_exec->scratch_thread();
-
-// Barrier across entire OpenMPTarget thread pool to insure initialization
-#pragma omp barrier
-
- // Loop until all queues are empty and no tasks in flight
-
- do {
- task_root_type *task = 0;
-
- // Each team lead attempts to acquire either a thread team task
- // or a single thread task for the team.
-
- if (0 == team_exec.team_rank()) {
- task = 0 < *((volatile int *)&queue->m_ready_count) ? end : 0;
-
- // Loop by priority and then type
- for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
- for (int j = 0; j < 2 && end == task; ++j) {
- task = queue_type::pop_task(&queue->m_ready[i][j]);
- }
- }
- }
-
- // Team lead broadcast acquired task to team members:
-
- if (1 < team_exec.team_size()) {
- if (0 == team_exec.team_rank()) *task_shared = task;
-
- // Fence to be sure task_shared is stored before the barrier
- Kokkos::memory_fence();
-
- // Whole team waits for every team member to reach this statement
- team_exec.team_barrier();
-
- // Fence to be sure task_shared is stored
- Kokkos::memory_fence();
-
- task = *task_shared;
- }
-
- if (0 == task) break; // 0 == m_ready_count
-
- if (end == task) {
- // All team members wait for whole team to reach this statement.
- // Is necessary to prevent task_shared from being updated
- // before it is read by all threads.
- team_exec.team_barrier();
- } else if (task_root_type::TaskTeam == task->m_task_type) {
- // Thread Team Task
- (*task->m_apply)(task, &team_exec);
-
- // The m_apply function performs a barrier
-
- if (0 == team_exec.team_rank()) {
- // team member #0 completes the task, which may delete the task
- queue->complete(task);
- }
- } else {
- // Single Thread Task
-
- if (0 == team_exec.team_rank()) {
- (*task->m_apply)(task, &single_exec);
-
- queue->complete(task);
- }
-
- // All team members wait for whole team to reach this statement.
- // Not necessary to complete the task.
- // Is necessary to prevent task_shared from being updated
- // before it is read by all threads.
- team_exec.team_barrier();
- }
- } while (1);
- }
- // END #pragma omp parallel
-}
-
-void TaskQueueSpecialization<Kokkos::Experimental::OpenMPTarget>::
- iff_single_thread_recursive_execute(
- TaskQueue<Kokkos::Experimental::OpenMPTarget> *const queue) {
- using execution_space = Kokkos::Experimental::OpenMPTarget;
- using queue_type = TaskQueue<execution_space>;
- using task_root_type = TaskBase<execution_space, void, void>;
- using Member = TaskExec<execution_space>;
-
- if (1 == omp_get_num_threads()) {
- task_root_type *const end = (task_root_type *)task_root_type::EndTag;
-
- Member single_exec;
-
- task_root_type *task = end;
-
- do {
- task = end;
-
- // Loop by priority and then type
- for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
- for (int j = 0; j < 2 && end == task; ++j) {
- task = queue_type::pop_task(&queue->m_ready[i][j]);
- }
- }
-
- if (end == task) break;
-
- (*task->m_apply)(task, &single_exec);
-
- queue->complete(task);
-
- } while (1);
- }
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-#endif /* #if defined( KOKKOS_ENABLE_OPENMPTARGET ) && defined( \
- KOKKOS_ENABLE_TASKPOLICY ) */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_OPENMP_TASK_HPP
-#define KOKKOS_IMPL_OPENMP_TASK_HPP
-
-#if defined(KOKKOS_ENABLE_TASKPOLICY)
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <>
-class TaskQueueSpecialization<Kokkos::Experimental::OpenMPTarget> {
- public:
- using execution_space = Kokkos::Experimental::OpenMPTarget;
- using queue_type = Kokkos::Impl::TaskQueue<execution_space>;
- using task_base_type = Kokkos::Impl::TaskBase<execution_space, void, void>;
-
- // Must specify memory space
- using memory_space = Kokkos::HostSpace;
-
- static void iff_single_thread_recursive_execute(queue_type* const);
-
- // Must provide task queue execution function
- static void execute(queue_type* const);
-
- // Must provide mechanism to set function pointer in
- // execution space from the host process.
- template <typename FunctorType>
- static void proc_set_apply(task_base_type::function_type* ptr) {
- using TaskType = TaskBase<Kokkos::Experimental::OpenMPTarget,
- typename FunctorType::value_type, FunctorType>;
- *ptr = TaskType::apply;
- }
-};
-
-extern template class TaskQueue<Kokkos::Experimental::OpenMPTarget>;
-
-//----------------------------------------------------------------------------
-
-template <>
-class TaskExec<Kokkos::Experimental::OpenMPTarget> {
- private:
- TaskExec(TaskExec&&) = delete;
- TaskExec(TaskExec const&) = delete;
- TaskExec& operator=(TaskExec&&) = delete;
- TaskExec& operator=(TaskExec const&) = delete;
-
- using PoolExec = Kokkos::Impl::OpenMPTargetExec;
-
- friend class Kokkos::Impl::TaskQueue<Kokkos::Experimental::OpenMPTarget>;
- friend class Kokkos::Impl::TaskQueueSpecialization<
- Kokkos::Experimental::OpenMPTarget>;
-
- PoolExec* const m_self_exec; ///< This thread's thread pool data structure
- PoolExec* const m_team_exec; ///< Team thread's thread pool data structure
- int64_t m_sync_mask;
- int64_t mutable m_sync_value;
- int mutable m_sync_step;
- int m_group_rank; ///< Which "team" subset of thread pool
- int m_team_rank; ///< Which thread within a team
- int m_team_size;
-
- TaskExec();
- TaskExec(PoolExec& arg_exec, int arg_team_size);
-
- void team_barrier_impl() const;
-
- public:
- KOKKOS_FUNCTION void* team_shared() const {
- KOKKOS_IF_ON_HOST(
- (return m_team_exec ? m_team_exec->scratch_thread() : nullptr;))
-
- KOKKOS_IF_ON_DEVICE((return nullptr;))
- }
-
- KOKKOS_FUNCTION int team_shared_size() const {
- KOKKOS_IF_ON_HOST(
- (return m_team_exec ? m_team_exec->scratch_thread_size() : 0;))
-
- KOKKOS_IF_ON_DEVICE((return 0;))
- }
-
- /**\brief Whole team enters this function call
- * before any teeam member returns from
- * this function call.
- */
- KOKKOS_FUNCTION void team_barrier() const {
- KOKKOS_IF_ON_HOST((if (1 < m_team_size) { team_barrier_impl(); }))
- }
-
- KOKKOS_INLINE_FUNCTION
- int team_rank() const { return m_team_rank; }
-
- KOKKOS_INLINE_FUNCTION
- int team_size() const { return m_team_size; }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >
-TeamThreadRange(Impl::TaskExec<Kokkos::Experimental::OpenMPTarget>& thread,
- const iType& count) {
- return Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >(thread,
- count);
-}
-
-template <typename iType>
-KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >
-TeamThreadRange(Impl::TaskExec<Kokkos::Experimental::OpenMPTarget>& thread,
- const iType& start, const iType& end) {
- return Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >(thread, start,
- end);
-}
-
-/** \brief Inter-thread parallel_for. Executes lambda(iType i) for each
- * i=0..N-1.
- *
- * The range i=0..N-1 is mapped to all threads of the the calling thread team.
- */
-template <typename iType, class Lambda>
-KOKKOS_INLINE_FUNCTION void parallel_for(
- const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
- loop_boundaries,
- const Lambda& lambda) {
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i);
- }
-}
-
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
- loop_boundaries,
- const Lambda& lambda, ValueType& initialized_result) {
- int team_rank =
- loop_boundaries.thread.team_rank(); // member num within the team
- ValueType result = initialized_result;
-
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, result);
- }
-
- if (1 < loop_boundaries.thread.team_size()) {
- ValueType* shared = (ValueType*)loop_boundaries.thread.team_shared();
-
- loop_boundaries.thread.team_barrier();
- shared[team_rank] = result;
-
- loop_boundaries.thread.team_barrier();
-
- // reduce across threads to thread 0
- if (team_rank == 0) {
- for (int i = 1; i < loop_boundaries.thread.team_size(); i++) {
- shared[0] += shared[i];
- }
- }
-
- loop_boundaries.thread.team_barrier();
-
- // broadcast result
- initialized_result = shared[0];
- } else {
- initialized_result = result;
- }
-}
-
-template <typename iType, class Lambda, typename ValueType, class JoinType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
- loop_boundaries,
- const Lambda& lambda, const JoinType& join, ValueType& initialized_result) {
- int team_rank =
- loop_boundaries.thread.team_rank(); // member num within the team
- ValueType result = initialized_result;
-
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- lambda(i, result);
- }
-
- if (1 < loop_boundaries.thread.team_size()) {
- ValueType* shared = (ValueType*)loop_boundaries.thread.team_shared();
-
- loop_boundaries.thread.team_barrier();
- shared[team_rank] = result;
-
- loop_boundaries.thread.team_barrier();
-
- // reduce across threads to thread 0
- if (team_rank == 0) {
- for (int i = 1; i < loop_boundaries.thread.team_size(); i++) {
- join(shared[0], shared[i]);
- }
- }
-
- loop_boundaries.thread.team_barrier();
-
- // broadcast result
- initialized_result = shared[0];
- } else {
- initialized_result = result;
- }
-}
-
-// placeholder for future function
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
- loop_boundaries,
- const Lambda& lambda, ValueType& initialized_result) {}
-
-// placeholder for future function
-template <typename iType, class Lambda, typename ValueType, class JoinType>
-KOKKOS_INLINE_FUNCTION void parallel_reduce(
- const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
- loop_boundaries,
- const Lambda& lambda, const JoinType& join, ValueType& initialized_result) {
-}
-
-template <typename ValueType, typename iType, class Lambda>
-KOKKOS_INLINE_FUNCTION void parallel_scan(
- const Impl::TeamThreadRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
- loop_boundaries,
- const Lambda& lambda) {
- ValueType accum = 0;
- ValueType val, local_total;
- ValueType* shared = (ValueType*)loop_boundaries.thread.team_shared();
- int team_size = loop_boundaries.thread.team_size();
- int team_rank =
- loop_boundaries.thread.team_rank(); // member num within the team
-
- // Intra-member scan
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- local_total = 0;
- lambda(i, local_total, false);
- val = accum;
- lambda(i, val, true);
- accum += local_total;
- }
-
- shared[team_rank] = accum;
- loop_boundaries.thread.team_barrier();
-
- // Member 0 do scan on accumulated totals
- if (team_rank == 0) {
- for (iType i = 1; i < team_size; i += 1) {
- shared[i] += shared[i - 1];
- }
- accum = 0; // Member 0 set accum to 0 in preparation for inter-member scan
- }
-
- loop_boundaries.thread.team_barrier();
-
- // Inter-member scan adding in accumulated totals
- if (team_rank != 0) {
- accum = shared[team_rank - 1];
- }
- for (iType i = loop_boundaries.start; i < loop_boundaries.end;
- i += loop_boundaries.increment) {
- local_total = 0;
- lambda(i, local_total, false);
- val = accum;
- lambda(i, val, true);
- accum += local_total;
- }
-}
-
-// placeholder for future function
-template <typename iType, class Lambda, typename ValueType>
-KOKKOS_INLINE_FUNCTION void parallel_scan(
- const Impl::ThreadVectorRangeBoundariesStruct<
- iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
- loop_boundaries,
- const Lambda& lambda) {}
-
-} /* namespace Kokkos */
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #if defined( KOKKOS_ENABLE_TASKPOLICY ) */
-#endif /* #ifndef KOKKOS_IMPL_OPENMP_TASK_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SYCL_ABORT_HPP
-#define KOKKOS_SYCL_ABORT_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_SYCL)
-#include <CL/sycl.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-inline void sycl_abort(char const* msg) {
-#ifdef NDEBUG
- KOKKOS_IMPL_DO_NOT_USE_PRINTF("Aborting with message %s.\n", msg);
-#else
- // Choosing "" here causes problems but a single whitespace character works.
- const char* empty = " ";
- __assert_fail(msg, empty, 0, empty);
-#endif
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SYCLDEEPCOPY_HPP
-#define KOKKOS_SYCLDEEPCOPY_HPP
-
-#include <Kokkos_Core_fwd.hpp>
-#include <Kokkos_SYCL.hpp>
-
-#include <vector>
-
-#ifdef KOKKOS_ENABLE_SYCL
-
-namespace Kokkos {
-namespace Impl {
-
-template <class DT, class... DP>
-struct ZeroMemset<Kokkos::Experimental::SYCL, DT, DP...> {
- ZeroMemset(const Kokkos::Experimental::SYCL& exec_space,
- const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- auto event = exec_space.impl_internal_space_instance()->m_queue->memset(
- dst.data(), 0,
- dst.size() * sizeof(typename View<DT, DP...>::value_type));
- exec_space.impl_internal_space_instance()
- ->m_queue->ext_oneapi_submit_barrier(std::vector<sycl::event>{event});
- }
-
- ZeroMemset(const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- Experimental::Impl::SYCLInternal::singleton().m_queue->memset(
- dst.data(), 0,
- dst.size() * sizeof(typename View<DT, DP...>::value_type));
- }
-};
-
-void DeepCopySYCL(void* dst, const void* src, size_t n);
-void DeepCopyAsyncSYCL(const Kokkos::Experimental::SYCL& instance, void* dst,
- const void* src, size_t n);
-void DeepCopyAsyncSYCL(void* dst, const void* src, size_t n);
-
-template <class MemSpace>
-struct DeepCopy<MemSpace, HostSpace, Kokkos::Experimental::SYCL,
- std::enable_if_t<is_sycl_type_space<MemSpace>::value>> {
- DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
- DeepCopy(const Kokkos::Experimental::SYCL& instance, void* dst,
- const void* src, size_t n) {
- DeepCopyAsyncSYCL(instance, dst, src, n);
- }
-};
-
-template <class MemSpace>
-struct DeepCopy<HostSpace, MemSpace, Kokkos::Experimental::SYCL,
- std::enable_if_t<is_sycl_type_space<MemSpace>::value>> {
- DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
- DeepCopy(const Kokkos::Experimental::SYCL& instance, void* dst,
- const void* src, size_t n) {
- DeepCopyAsyncSYCL(instance, dst, src, n);
- }
-};
-
-template <class MemSpace1, class MemSpace2>
-struct DeepCopy<MemSpace1, MemSpace2, Kokkos::Experimental::SYCL,
- std::enable_if_t<is_sycl_type_space<MemSpace1>::value &&
- is_sycl_type_space<MemSpace2>::value>> {
- DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
- DeepCopy(const Kokkos::Experimental::SYCL& instance, void* dst,
- const void* src, size_t n) {
- DeepCopyAsyncSYCL(instance, dst, src, n);
- }
-};
-
-template <class MemSpace1, class MemSpace2, class ExecutionSpace>
-struct DeepCopy<
- MemSpace1, MemSpace2, ExecutionSpace,
- std::enable_if_t<
- is_sycl_type_space<MemSpace1>::value &&
- is_sycl_type_space<MemSpace2>::value &&
- !std::is_same<ExecutionSpace, Kokkos::Experimental::SYCL>::value>> {
- inline DeepCopy(void* dst, const void* src, size_t n) {
- DeepCopySYCL(dst, src, n);
- }
-
- inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- exec.fence(fence_string());
- DeepCopyAsyncSYCL(dst, src, n);
- }
-
- private:
- static const std::string& fence_string() {
- static const std::string string =
- std::string("Kokkos::Impl::DeepCopy<") + MemSpace1::name() + "Space, " +
- MemSpace2::name() +
- "Space, ExecutionSpace>::DeepCopy: fence before copy";
- return string;
- }
-};
-
-template <class MemSpace, class ExecutionSpace>
-struct DeepCopy<
- MemSpace, HostSpace, ExecutionSpace,
- std::enable_if_t<
- is_sycl_type_space<MemSpace>::value &&
- !std::is_same<ExecutionSpace, Kokkos::Experimental::SYCL>::value>> {
- inline DeepCopy(void* dst, const void* src, size_t n) {
- DeepCopySYCL(dst, src, n);
- }
-
- inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- exec.fence(fence_string());
- DeepCopyAsyncSYCL(dst, src, n);
- }
-
- private:
- static const std::string& fence_string() {
- static const std::string string =
- std::string("Kokkos::Impl::DeepCopy<") + MemSpace::name() +
- "Space, HostSpace, ExecutionSpace>::DeepCopy: fence before copy";
- return string;
- }
-};
-
-template <class MemSpace, class ExecutionSpace>
-struct DeepCopy<
- HostSpace, MemSpace, ExecutionSpace,
- std::enable_if_t<
- is_sycl_type_space<MemSpace>::value &&
- !std::is_same<ExecutionSpace, Kokkos::Experimental::SYCL>::value>> {
- inline DeepCopy(void* dst, const void* src, size_t n) {
- DeepCopySYCL(dst, src, n);
- }
-
- inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
- size_t n) {
- exec.fence(fence_string());
- DeepCopyAsyncSYCL(dst, src, n);
- }
-
- private:
- static const std::string& fence_string() {
- static const std::string string =
- std::string("Kokkos::Impl::DeepCopy<HostSpace, ") + MemSpace::name() +
- "Space, ExecutionSpace>::DeepCopy: fence before copy";
- return string;
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.5
-// Copyright (2022) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SYCL_HALF_HPP_
-#define KOKKOS_SYCL_HALF_HPP_
-
-#ifdef KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
-
-#include <Kokkos_Half.hpp>
-#include <Kokkos_NumericTraits.hpp> // reduction_identity
-
-namespace Kokkos {
-namespace Experimental {
-
-/************************** half conversions **********************************/
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(half_t val) { return val; }
-
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(float val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(double val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(short val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(unsigned short val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(int val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(unsigned int val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(long long val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(unsigned long long val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(long val) { return half_t::impl_type(val); }
-KOKKOS_INLINE_FUNCTION
-half_t cast_to_half(unsigned long val) { return half_t::impl_type(val); }
-
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
-cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
-cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
-cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION
- std::enable_if_t<std::is_same<T, unsigned short>::value, T>
- cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
-cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
-cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
-cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION
- std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
- cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
-cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-template <class T>
-KOKKOS_INLINE_FUNCTION
- std::enable_if_t<std::is_same<T, unsigned long>::value, T>
- cast_from_half(half_t val) {
- return half_t::impl_type(val);
-}
-} // namespace Experimental
-
-template <>
-struct reduction_identity<Kokkos::Experimental::half_t> {
- KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
- sum() noexcept {
- return Kokkos::Experimental::half_t::impl_type(0.0F);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
- prod() noexcept {
- return Kokkos::Experimental::half_t::impl_type(1.0F);
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
- max() noexcept {
- return std::numeric_limits<
- Kokkos::Experimental::half_t::impl_type>::lowest();
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
- min() noexcept {
- return std::numeric_limits<Kokkos::Experimental::half_t::impl_type>::max();
- }
-};
-
-} // namespace Kokkos
-#endif // KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.5
-// Copyright (2022) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SYCL_HALF_IMPL_TYPE_HPP_
-#define KOKKOS_SYCL_HALF_IMPL_TYPE_HPP_
-
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_SYCL
-
-#include <CL/sycl.hpp>
-
-#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
-// Make sure no one else tries to define half_t
-#define KOKKOS_IMPL_HALF_TYPE_DEFINED
-#define KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
-
-namespace Kokkos {
-namespace Impl {
-struct half_impl_t {
- using type = sycl::half;
-};
-} // namespace Impl
-} // namespace Kokkos
-#endif // KOKKOS_IMPL_HALF_TYPE_DEFINED
-#endif // KOKKOS_ENABLE_SYCL
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Core.hpp> //kokkos_malloc
-
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-
-std::vector<std::optional<sycl::queue>*> SYCLInternal::all_queues;
-std::mutex SYCLInternal::mutex;
-
-Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> sycl_global_unique_token_locks(
- bool deallocate) {
- static Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> locks =
- Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>();
- if (!deallocate && locks.extent(0) == 0)
- locks = Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>(
- "Kokkos::UniqueToken<SYCL>::m_locks", SYCL().concurrency());
- if (deallocate) locks = Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>();
- return locks;
-}
-
-SYCLInternal::~SYCLInternal() {
- if (!was_finalized || m_scratchSpace || m_scratchFlags) {
- std::cerr << "Kokkos::Experimental::SYCL ERROR: Failed to call "
- "Kokkos::Experimental::SYCL::finalize()"
- << std::endl;
- std::cerr.flush();
- }
-}
-
-int SYCLInternal::verify_is_initialized(const char* const label) const {
- if (!is_initialized()) {
- Kokkos::abort((std::string("Kokkos::Experimental::SYCL::") + label +
- " : ERROR device not initialized\n")
- .c_str());
- }
- return is_initialized();
-}
-SYCLInternal& SYCLInternal::singleton() {
- static SYCLInternal self;
- return self;
-}
-
-void SYCLInternal::initialize(const sycl::device& d) {
- auto exception_handler = [](sycl::exception_list exceptions) {
- bool asynchronous_error = false;
- for (std::exception_ptr const& e : exceptions) {
- try {
- std::rethrow_exception(e);
- } catch (sycl::exception const& e) {
- std::cerr << e.what() << '\n';
- asynchronous_error = true;
- }
- }
- if (asynchronous_error)
- Kokkos::Impl::throw_runtime_exception(
- "There was an asynchronous SYCL error!\n");
- };
- initialize(sycl::queue{d, exception_handler});
-}
-
-// FIXME_SYCL
-void SYCLInternal::initialize(const sycl::queue& q) {
- if (was_finalized)
- Kokkos::abort("Calling SYCL::initialize after SYCL::finalize is illegal\n");
-
- if (is_initialized()) return;
-
- if (!HostSpace::execution_space::impl_is_initialized()) {
- const std::string msg(
- "SYCL::initialize ERROR : HostSpace::execution_space is not "
- "initialized");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-
- const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
- const bool ok_dev = true;
- if (ok_init && ok_dev) {
- m_queue = q;
- // guard pushing to all_queues
- {
- std::scoped_lock lock(mutex);
- all_queues.push_back(&m_queue);
- }
- const sycl::device& d = m_queue->get_device();
-
- m_maxWorkgroupSize =
- d.template get_info<sycl::info::device::max_work_group_size>();
- // FIXME_SYCL this should give the correct value for NVIDIA GPUs
- m_maxConcurrency =
- m_maxWorkgroupSize * 2 *
- d.template get_info<sycl::info::device::max_compute_units>();
-
- // Setup concurent bitset for obtaining unique tokens from within an
- // executing kernel.
- {
- const int32_t buffer_bound =
- Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);
- using Record = Kokkos::Impl::SharedAllocationRecord<
- Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
- Record* const r =
- Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
- "Kokkos::Experimental::SYCL::InternalScratchBitset",
- sizeof(uint32_t) * buffer_bound);
- Record::increment(r);
- }
-
- m_maxShmemPerBlock =
- d.template get_info<sycl::info::device::local_mem_size>();
-
- for (auto& usm_mem : m_indirectKernelMem) {
- usm_mem.reset(*m_queue, m_instance_id);
- }
-
- } else {
- std::ostringstream msg;
- msg << "Kokkos::Experimental::SYCL::initialize(...) FAILED";
-
- if (!ok_init) {
- msg << " : Already initialized";
- }
- Kokkos::Impl::throw_runtime_exception(msg.str());
- }
-
- m_team_scratch_current_size = 0;
- m_team_scratch_ptr = nullptr;
-}
-
-sycl::device_ptr<void> SYCLInternal::resize_team_scratch_space(
- std::int64_t bytes, bool force_shrink) {
- if (m_team_scratch_current_size == 0) {
- m_team_scratch_current_size = bytes;
- m_team_scratch_ptr =
- Kokkos::kokkos_malloc<Experimental::SYCLDeviceUSMSpace>(
- "Kokkos::Experimental::SYCLDeviceUSMSpace::TeamScratchMemory",
- m_team_scratch_current_size);
- }
- if ((bytes > m_team_scratch_current_size) ||
- ((bytes < m_team_scratch_current_size) && (force_shrink))) {
- m_team_scratch_current_size = bytes;
- m_team_scratch_ptr =
- Kokkos::kokkos_realloc<Experimental::SYCLDeviceUSMSpace>(
- m_team_scratch_ptr, m_team_scratch_current_size);
- }
- return m_team_scratch_ptr;
-}
-
-uint32_t SYCLInternal::impl_get_instance_id() const { return m_instance_id; }
-
-void SYCLInternal::finalize() {
- SYCLInternal::fence(*m_queue,
- "Kokkos::SYCLInternal::finalize: fence on finalization",
- m_instance_id);
- was_finalized = true;
-
- // The global_unique_token_locks array is static and should only be
- // deallocated once by the defualt instance
- if (this == &singleton()) Impl::sycl_global_unique_token_locks(true);
-
- using RecordSYCL = Kokkos::Impl::SharedAllocationRecord<SYCLDeviceUSMSpace>;
- if (nullptr != m_scratchSpace)
- RecordSYCL::decrement(RecordSYCL::get_record(m_scratchSpace));
- if (nullptr != m_scratchFlags)
- RecordSYCL::decrement(RecordSYCL::get_record(m_scratchFlags));
- m_syclDev = -1;
- m_scratchSpaceCount = 0;
- m_scratchSpace = nullptr;
- m_scratchFlagsCount = 0;
- m_scratchFlags = nullptr;
-
- if (m_team_scratch_current_size > 0)
- Kokkos::kokkos_free<Kokkos::Experimental::SYCLDeviceUSMSpace>(
- m_team_scratch_ptr);
- m_team_scratch_current_size = 0;
- m_team_scratch_ptr = nullptr;
-
- for (auto& usm_mem : m_indirectKernelMem) usm_mem.reset();
- // guard erasing from all_queues
- {
- std::scoped_lock lock(mutex);
- all_queues.erase(std::find(all_queues.begin(), all_queues.end(), &m_queue));
- }
- m_queue.reset();
-}
-
-sycl::device_ptr<void> SYCLInternal::scratch_space(const std::size_t size) {
- const size_type sizeScratchGrain =
- sizeof(Kokkos::Experimental::SYCL::size_type);
- if (verify_is_initialized("scratch_space") &&
- m_scratchSpaceCount * sizeScratchGrain < size) {
- m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
-
- using Record = Kokkos::Impl::SharedAllocationRecord<
- Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
-
- if (nullptr != m_scratchSpace)
- Record::decrement(Record::get_record(m_scratchSpace));
-
- Record* const r =
- Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
- "Kokkos::Experimental::SYCL::InternalScratchSpace",
- (sizeScratchGrain * m_scratchSpaceCount));
-
- Record::increment(r);
-
- m_scratchSpace = reinterpret_cast<size_type*>(r->data());
- }
-
- return m_scratchSpace;
-}
-
-sycl::device_ptr<void> SYCLInternal::scratch_flags(const std::size_t size) {
- const size_type sizeScratchGrain =
- sizeof(Kokkos::Experimental::SYCL::size_type);
- if (verify_is_initialized("scratch_flags") &&
- m_scratchFlagsCount * sizeScratchGrain < size) {
- m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
-
- using Record = Kokkos::Impl::SharedAllocationRecord<
- Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
-
- if (nullptr != m_scratchFlags)
- Record::decrement(Record::get_record(m_scratchFlags));
-
- Record* const r =
- Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
- "Kokkos::Experimental::SYCL::InternalScratchFlags",
- (sizeScratchGrain * m_scratchFlagsCount));
-
- Record::increment(r);
-
- m_scratchFlags = reinterpret_cast<size_type*>(r->data());
- }
- m_queue->memset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain);
- fence(*m_queue,
- "Kokkos::Experimental::SYCLInternal::scratch_flags fence after "
- "initializing m_scratchFlags",
- m_instance_id);
-
- return m_scratchFlags;
-}
-
-template <typename WAT>
-void SYCLInternal::fence_helper(WAT& wat, const std::string& name,
- uint32_t instance_id) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::SYCL>(
- name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id},
- [&]() {
- try {
- wat.wait_and_throw();
- } catch (sycl::exception const& e) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("There was a synchronous SYCL error:\n") += e.what());
- }
- });
-}
-template void SYCLInternal::fence_helper<sycl::queue>(sycl::queue&,
- const std::string&,
- uint32_t);
-template void SYCLInternal::fence_helper<sycl::event>(sycl::event&,
- const std::string&,
- uint32_t);
-
-// This function cycles through a pool of USM allocations for functors
-SYCLInternal::IndirectKernelMem& SYCLInternal::get_indirect_kernel_mem() {
- // Thread safety: atomically increment round robin variable
- // NB: atomic_fetch_inc_mod returns values in range [0-N], not
- // [0-N) as might be expected.
- size_t next_pool = desul::atomic_fetch_inc_mod(
- &m_pool_next, m_usm_pool_size - 1, desul::MemoryOrderRelaxed(),
- desul::MemoryScopeDevice());
- return m_indirectKernelMem[next_pool];
-}
-
-template <sycl::usm::alloc Kind>
-size_t SYCLInternal::USMObjectMem<Kind>::reserve(size_t n) {
- assert(m_q);
-
- if (m_capacity < n) {
- using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
- // First free what we have (in case malloc can reuse it)
- if (m_data) Record::decrement(Record::get_record(m_data));
-
- Record* const r = Record::allocate(
- AllocationSpace(*m_q), "Kokkos::Experimental::SYCL::USMObjectMem", n);
- Record::increment(r);
-
- m_data = r->data();
- if constexpr (sycl::usm::alloc::device == Kind)
- m_staging.reset(new char[n]);
- m_capacity = n;
- }
-
- return m_capacity;
-}
-
-template <sycl::usm::alloc Kind>
-void SYCLInternal::USMObjectMem<Kind>::reset() {
- if (m_data) {
- // This implies a fence since this class is not copyable
- // and deallocating implies a fence across all registered queues.
- using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
- Record::decrement(Record::get_record(m_data));
-
- m_capacity = 0;
- m_data = nullptr;
- }
- m_q.reset();
-}
-
-template class SYCLInternal::USMObjectMem<sycl::usm::alloc::shared>;
-template class SYCLInternal::USMObjectMem<sycl::usm::alloc::device>;
-template class SYCLInternal::USMObjectMem<sycl::usm::alloc::host>;
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
+++ /dev/null
-#ifndef KOKKOS_SYCL_MDRANGEPOLICY_HPP_
-#define KOKKOS_SYCL_MDRANGEPOLICY_HPP_
-
-#include <KokkosExp_MDRangePolicy.hpp>
-
-namespace Kokkos {
-
-template <>
-struct default_outer_direction<Kokkos::Experimental::SYCL> {
- using type = Iterate;
- static constexpr Iterate value = Iterate::Left;
-};
-
-template <>
-struct default_inner_direction<Kokkos::Experimental::SYCL> {
- using type = Iterate;
- static constexpr Iterate value = Iterate::Left;
-};
-
-namespace Impl {
-
-// Settings for MDRangePolicy
-template <>
-inline TileSizeProperties get_tile_size_properties<Kokkos::Experimental::SYCL>(
- const Kokkos::Experimental::SYCL& space) {
- TileSizeProperties properties;
- properties.max_threads =
- space.impl_internal_space_instance()->m_maxWorkgroupSize;
- properties.default_largest_tile_size = 16;
- properties.default_tile_size = 2;
- properties.max_total_tile_size = properties.max_threads;
- return properties;
-}
-
-} // Namespace Impl
-} // Namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SYCL_PARALLEL_REDUCE_HPP
-#define KOKKOS_SYCL_PARALLEL_REDUCE_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#include <vector>
-#if defined(KOKKOS_ENABLE_SYCL)
-#include <Kokkos_Parallel_Reduce.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-
-template <class ReducerType>
-inline constexpr bool use_shuffle_based_algorithm =
- std::is_reference_v<typename ReducerType::reference_type>;
-
-namespace SYCLReduction {
-template <typename ValueType, typename ReducerType, int dim>
-std::enable_if_t<!use_shuffle_based_algorithm<ReducerType>> workgroup_reduction(
- sycl::nd_item<dim>& item, sycl::local_ptr<ValueType> local_mem,
- sycl::device_ptr<ValueType> results_ptr,
- sycl::global_ptr<ValueType> device_accessible_result_ptr,
- const unsigned int value_count, const ReducerType& final_reducer,
- bool final, unsigned int max_size) {
- const auto local_id = item.get_local_linear_id();
-
- // Perform the actual workgroup reduction in each subgroup
- // separately.
- auto sg = item.get_sub_group();
- auto* result = &local_mem[local_id * value_count];
- const auto id_in_sg = sg.get_local_id()[0];
- const auto local_range =
- std::min<unsigned int>(sg.get_local_range()[0], max_size);
- const auto upper_stride_bound =
- std::min(local_range - id_in_sg, max_size - local_id);
- for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
- if (stride < upper_stride_bound)
- final_reducer.join(result, &local_mem[(local_id + stride) * value_count]);
- sycl::group_barrier(sg);
- }
- sycl::group_barrier(item.get_group());
-
- // Copy the subgroup results into the first positions of the
- // reduction array.
- if (id_in_sg == 0)
- final_reducer.copy(&local_mem[sg.get_group_id()[0] * value_count], result);
- sycl::group_barrier(item.get_group());
-
- // Do the final reduction only using the first subgroup.
- if (sg.get_group_id()[0] == 0) {
- const auto n_subgroups = sg.get_group_range()[0];
- auto* result_ = &local_mem[id_in_sg * value_count];
- // In case the number of subgroups is larger than the range of
- // the first subgroup, we first combine the items with a higher
- // index.
- for (unsigned int offset = local_range; offset < n_subgroups;
- offset += local_range)
- if (id_in_sg + offset < n_subgroups)
- final_reducer.join(result_,
- &local_mem[(id_in_sg + offset) * value_count]);
- sycl::group_barrier(sg);
-
- // Then, we proceed as before.
- for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
- if (id_in_sg + stride < n_subgroups)
- final_reducer.join(result_,
- &local_mem[(id_in_sg + stride) * value_count]);
- sycl::group_barrier(sg);
- }
-
- // Finally, we copy the workgroup results back to global memory
- // to be used in the next iteration. If this is the last
- // iteration, i.e., there is only one workgroup also call
- // final() if necessary.
- if (id_in_sg == 0) {
- if (final) {
- final_reducer.final(&local_mem[0]);
- if (device_accessible_result_ptr != nullptr)
- final_reducer.copy(&device_accessible_result_ptr[0], &local_mem[0]);
- else
- final_reducer.copy(&results_ptr[0], &local_mem[0]);
- } else
- final_reducer.copy(
- &results_ptr[(item.get_group_linear_id()) * value_count],
- &local_mem[0]);
- }
- }
-}
-
-template <typename ValueType, typename ReducerType, int dim>
-std::enable_if_t<use_shuffle_based_algorithm<ReducerType>> workgroup_reduction(
- sycl::nd_item<dim>& item, sycl::local_ptr<ValueType> local_mem,
- ValueType local_value, sycl::device_ptr<ValueType> results_ptr,
- sycl::global_ptr<ValueType> device_accessible_result_ptr,
- const ReducerType& final_reducer, bool final, unsigned int max_size) {
- const auto local_id = item.get_local_linear_id();
-
- // Perform the actual workgroup reduction in each subgroup
- // separately.
- auto sg = item.get_sub_group();
- const auto id_in_sg = sg.get_local_id()[0];
- const auto local_range =
- std::min<unsigned int>(sg.get_local_range()[0], max_size);
- const auto upper_stride_bound =
- std::min(local_range - id_in_sg, max_size - local_id);
- for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
- auto tmp = sg.shuffle_down(local_value, stride);
- if (stride < upper_stride_bound) final_reducer.join(&local_value, &tmp);
- }
-
- // Copy the subgroup results into the first positions of the
- // reduction array.
- const auto max_subgroup_size = sg.get_max_local_range()[0];
- const auto n_active_subgroups =
- (max_size + max_subgroup_size - 1) / max_subgroup_size;
- if (id_in_sg == 0 && sg.get_group_id()[0] <= n_active_subgroups)
- local_mem[sg.get_group_id()[0]] = local_value;
- item.barrier(sycl::access::fence_space::local_space);
-
- // Do the final reduction only using the first subgroup.
- if (sg.get_group_id()[0] == 0) {
- auto sg_value = local_mem[id_in_sg < n_active_subgroups ? id_in_sg : 0];
-
- // In case the number of subgroups is larger than the range of
- // the first subgroup, we first combine the items with a higher
- // index.
- if (n_active_subgroups > local_range) {
- for (unsigned int offset = local_range; offset < n_active_subgroups;
- offset += local_range)
- if (id_in_sg + offset < n_active_subgroups) {
- final_reducer.join(&sg_value, &local_mem[(id_in_sg + offset)]);
- }
- sg.barrier();
- }
-
- // Then, we proceed as before.
- for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
- auto tmp = sg.shuffle_down(sg_value, stride);
- if (id_in_sg + stride < n_active_subgroups)
- final_reducer.join(&sg_value, &tmp);
- }
-
- // Finally, we copy the workgroup results back to global memory
- // to be used in the next iteration. If this is the last
- // iteration, i.e., there is only one workgroup also call
- // final() if necessary.
- if (id_in_sg == 0) {
- if (final) {
- final_reducer.final(&sg_value);
- if (device_accessible_result_ptr != nullptr)
- device_accessible_result_ptr[0] = sg_value;
- else
- results_ptr[0] = sg_value;
- } else
- results_ptr[(item.get_group_linear_id())] = sg_value;
- }
- }
-}
-
-} // namespace SYCLReduction
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::SYCL> {
- public:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- private:
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
- using execution_space = typename Analysis::execution_space;
- using value_type = typename Analysis::value_type;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- using WorkTag = typename Policy::work_tag;
-
- public:
- // V - View
- template <typename V>
- ParallelReduce(const FunctorType& f, const Policy& p, const V& v,
- std::enable_if_t<Kokkos::is_view<V>::value, void*> = nullptr)
- : m_functor(f),
- m_policy(p),
- m_result_ptr(v.data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- typename V::memory_space>::accessible),
- m_shared_memory_lock(
- p.space().impl_internal_space_instance()->m_mutexScratchSpace) {}
-
- ParallelReduce(const FunctorType& f, const Policy& p,
- const ReducerType& reducer)
- : m_functor(f),
- m_policy(p),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_shared_memory_lock(
- p.space().impl_internal_space_instance()->m_mutexScratchSpace) {}
-
- private:
- template <typename PolicyType, typename FunctorWrapper,
- typename ReducerWrapper>
- sycl::event sycl_direct_launch(
- const PolicyType& policy, const FunctorWrapper& functor_wrapper,
- const ReducerWrapper& reducer_wrapper,
- const std::vector<sycl::event>& memcpy_events) const {
- // Convenience references
- const Kokkos::Experimental::SYCL& space = policy.space();
- Kokkos::Experimental::Impl::SYCLInternal& instance =
- *space.impl_internal_space_instance();
- sycl::queue& q = space.sycl_queue();
-
- // FIXME_SYCL optimize
- constexpr size_t wgroup_size = 128;
- constexpr size_t values_per_thread = 2;
- std::size_t size = policy.end() - policy.begin();
- const auto init_size = std::max<std::size_t>(
- ((size + values_per_thread - 1) / values_per_thread + wgroup_size - 1) /
- wgroup_size,
- 1);
- const unsigned int value_count =
- Analysis::value_count(ReducerConditional::select(m_functor, m_reducer));
- const auto results_ptr =
- static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
- sizeof(value_type) * std::max(value_count, 1u) * init_size));
- sycl::global_ptr<value_type> device_accessible_result_ptr =
- m_result_ptr_device_accessible ? m_result_ptr : nullptr;
- auto scratch_flags = static_cast<sycl::device_ptr<unsigned int>>(
- instance.scratch_flags(sizeof(unsigned int)));
-
- sycl::event last_reduction_event;
-
- // If size<=1 we only call init(), the functor and possibly final once
- // working with the global scratch memory but don't copy back to
- // m_result_ptr yet.
- if (size <= 1) {
- auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
- const auto begin = policy.begin();
- cgh.depends_on(memcpy_events);
- cgh.single_task([=]() {
- const auto& functor = functor_wrapper.get_functor();
- const auto& selected_reducer = ReducerConditional::select(
- static_cast<const FunctorType&>(functor),
- static_cast<const ReducerType&>(reducer_wrapper.get_functor()));
- typename Analysis::Reducer final_reducer(&selected_reducer);
- reference_type update = final_reducer.init(results_ptr);
- if (size == 1) {
- if constexpr (std::is_void<WorkTag>::value)
- functor(begin, update);
- else
- functor(WorkTag(), begin, update);
- }
- final_reducer.final(results_ptr);
- if (device_accessible_result_ptr != nullptr)
- final_reducer.copy(device_accessible_result_ptr.get(),
- results_ptr.get());
- });
- });
- q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{parallel_reduce_event});
- last_reduction_event = parallel_reduce_event;
- }
-
- // Otherwise, we perform a reduction on the values in all workgroups
- // separately, write the workgroup results back to global memory and recurse
- // until only one workgroup does the reduction and thus gets the final
- // value.
- if (size > 1) {
- auto n_wgroups = ((size + values_per_thread - 1) / values_per_thread +
- wgroup_size - 1) /
- wgroup_size;
- auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
- sycl::accessor<value_type, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- local_mem(sycl::range<1>(wgroup_size) * std::max(value_count, 1u),
- cgh);
- sycl::accessor<unsigned int, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- num_teams_done(1, cgh);
-
- const auto begin = policy.begin();
-
- cgh.depends_on(memcpy_events);
-
- cgh.parallel_for(
- sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
- [=](sycl::nd_item<1> item) {
- const auto local_id = item.get_local_linear_id();
- const auto global_id =
- wgroup_size * item.get_group_linear_id() * values_per_thread +
- local_id;
- const auto& functor = functor_wrapper.get_functor();
- const auto& selected_reducer = ReducerConditional::select(
- static_cast<const FunctorType&>(functor),
- static_cast<const ReducerType&>(
- reducer_wrapper.get_functor()));
- typename Analysis::Reducer final_reducer(&selected_reducer);
-
- using index_type = typename Policy::index_type;
- const auto upper_bound = std::min<index_type>(
- global_id + values_per_thread * wgroup_size, size);
-
- if constexpr (Analysis::StaticValueSize == 0) {
- reference_type update =
- final_reducer.init(&local_mem[local_id * value_count]);
- for (index_type id = global_id; id < upper_bound;
- id += wgroup_size) {
- if constexpr (std::is_void<WorkTag>::value)
- functor(id + begin, update);
- else
- functor(WorkTag(), id + begin, update);
- }
- item.barrier(sycl::access::fence_space::local_space);
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), results_ptr,
- device_accessible_result_ptr, value_count, final_reducer,
- false, std::min(size, wgroup_size));
-
- if (local_id == 0) {
- sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
- sycl::memory_scope::device,
- sycl::access::address_space::global_space>
- scratch_flags_ref(*scratch_flags);
- num_teams_done[0] = ++scratch_flags_ref;
- }
- item.barrier(sycl::access::fence_space::local_space);
- if (num_teams_done[0] == n_wgroups) {
- if (local_id >= n_wgroups)
- final_reducer.init(&local_mem[local_id * value_count]);
- else {
- final_reducer.copy(&local_mem[local_id * value_count],
- &results_ptr[local_id * value_count]);
- for (unsigned int id = local_id + wgroup_size;
- id < n_wgroups; id += wgroup_size) {
- final_reducer.join(&local_mem[local_id * value_count],
- &results_ptr[id * value_count]);
- }
- }
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), results_ptr,
- device_accessible_result_ptr, value_count, final_reducer,
- true, std::min(n_wgroups, wgroup_size));
- }
- } else {
- value_type local_value;
- reference_type update = final_reducer.init(&local_value);
- for (index_type id = global_id; id < upper_bound;
- id += wgroup_size) {
- if constexpr (std::is_void<WorkTag>::value)
- functor(id + begin, update);
- else
- functor(WorkTag(), id + begin, update);
- }
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), local_value, results_ptr,
- device_accessible_result_ptr, final_reducer, false,
- std::min(size, wgroup_size));
-
- if (local_id == 0) {
- sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
- sycl::memory_scope::device,
- sycl::access::address_space::global_space>
- scratch_flags_ref(*scratch_flags);
- num_teams_done[0] = ++scratch_flags_ref;
- }
- item.barrier(sycl::access::fence_space::local_space);
- if (num_teams_done[0] == n_wgroups) {
- if (local_id >= n_wgroups)
- final_reducer.init(&local_value);
- else {
- local_value = results_ptr[local_id];
- for (unsigned int id = local_id + wgroup_size;
- id < n_wgroups; id += wgroup_size) {
- final_reducer.join(&local_value, &results_ptr[id]);
- }
- }
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), local_value, results_ptr,
- device_accessible_result_ptr, final_reducer, true,
- std::min(n_wgroups, wgroup_size));
- }
- }
- });
- });
- last_reduction_event = q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{parallel_reduce_event});
- }
-
- // At this point, the reduced value is written to the entry in results_ptr
- // and all that is left is to copy it back to the given result pointer if
- // necessary.
- if (m_result_ptr && !m_result_ptr_device_accessible) {
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace>(
- space, m_result_ptr, results_ptr,
- sizeof(*m_result_ptr) * value_count);
- }
-
- return last_reduction_event;
- }
-
- public:
- void execute() const {
- Kokkos::Experimental::Impl::SYCLInternal& instance =
- *m_policy.space().impl_internal_space_instance();
- using IndirectKernelMem =
- Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem;
- IndirectKernelMem& indirectKernelMem = instance.get_indirect_kernel_mem();
- IndirectKernelMem& indirectReducerMem = instance.get_indirect_kernel_mem();
-
- auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_functor, indirectKernelMem);
- auto reducer_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_reducer, indirectReducerMem);
-
- sycl::event event = sycl_direct_launch(
- m_policy, functor_wrapper, reducer_wrapper,
- {functor_wrapper.get_copy_event(), reducer_wrapper.get_copy_event()});
- functor_wrapper.register_event(event);
- reducer_wrapper.register_event(event);
- }
-
- private:
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const bool m_result_ptr_device_accessible;
-
- // Only let one Parallel/Scan modify the shared memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::scoped_lock<std::mutex> m_shared_memory_lock;
-};
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::Experimental::SYCL> {
- public:
- using Policy = Kokkos::MDRangePolicy<Traits...>;
-
- private:
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
- using execution_space = typename Analysis::execution_space;
- using value_type = typename Analysis::value_type;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- using WorkTag = typename Policy::work_tag;
-
- // MDRangePolicy is not trivially copyable. Hence, replicate the data we
- // really need in DeviceIterateTile in a trivially copyable struct.
- struct BarePolicy {
- using index_type = typename Policy::index_type;
-
- BarePolicy(const Policy& policy)
- : m_lower(policy.m_lower),
- m_upper(policy.m_upper),
- m_tile(policy.m_tile),
- m_tile_end(policy.m_tile_end),
- m_num_tiles(policy.m_num_tiles),
- m_prod_tile_dims(policy.m_prod_tile_dims) {}
-
- const typename Policy::point_type m_lower;
- const typename Policy::point_type m_upper;
- const typename Policy::tile_type m_tile;
- const typename Policy::point_type m_tile_end;
- const typename Policy::index_type m_num_tiles;
- const typename Policy::index_type m_prod_tile_dims;
- static constexpr Iterate inner_direction = Policy::inner_direction;
- static constexpr int rank = Policy::rank;
- };
-
- public:
- // V - View
- template <typename V>
- ParallelReduce(const FunctorType& f, const Policy& p, const V& v,
- std::enable_if_t<Kokkos::is_view<V>::value, void*> = nullptr)
- : m_functor(f),
- m_policy(p),
- m_space(p.space()),
- m_result_ptr(v.data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- typename V::memory_space>::accessible),
- m_shared_memory_lock(
- m_space.impl_internal_space_instance()->m_mutexScratchSpace) {}
-
- ParallelReduce(const FunctorType& f, const Policy& p,
- const ReducerType& reducer)
- : m_functor(f),
- m_policy(p),
- m_space(p.space()),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_shared_memory_lock(
- m_space.impl_internal_space_instance()->m_mutexScratchSpace) {}
-
- private:
- template <typename PolicyType, typename FunctorWrapper,
- typename ReducerWrapper>
- sycl::event sycl_direct_launch(
- const PolicyType& policy, const FunctorWrapper& functor_wrapper,
- const ReducerWrapper& reducer_wrapper,
- const std::vector<sycl::event>& memcpy_events) const {
- // Convenience references
- Kokkos::Experimental::Impl::SYCLInternal& instance =
- *m_space.impl_internal_space_instance();
- sycl::queue& q = m_space.sycl_queue();
-
- const typename Policy::index_type nwork = m_policy.m_num_tiles;
- const typename Policy::index_type block_size =
- std::pow(2, std::ceil(std::log2(m_policy.m_prod_tile_dims)));
-
- const sycl::range<1> local_range(block_size);
- // REMEMBER swap local x<->y to be conforming with Cuda/HIP implementation
- const sycl::range<1> global_range(nwork * block_size);
- const sycl::nd_range<1> range{global_range, local_range};
-
- const size_t wgroup_size = range.get_local_range().size();
- size_t size = range.get_global_range().size();
- const auto init_size =
- std::max<std::size_t>((size + wgroup_size - 1) / wgroup_size, 1);
- const unsigned int value_count =
- Analysis::value_count(ReducerConditional::select(m_functor, m_reducer));
- const auto results_ptr =
- static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
- sizeof(value_type) * std::max(value_count, 1u) * init_size));
- sycl::global_ptr<value_type> device_accessible_result_ptr =
- m_result_ptr_device_accessible ? m_result_ptr : nullptr;
- auto scratch_flags = static_cast<sycl::device_ptr<unsigned int>>(
- instance.scratch_flags(sizeof(unsigned int)));
-
- sycl::event last_reduction_event;
-
- // If size<=1 we only call init(), the functor and possibly final once
- // working with the global scratch memory but don't copy back to
- // m_result_ptr yet.
- if (size <= 1) {
- auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
- cgh.depends_on(memcpy_events);
- cgh.single_task([=]() {
- const auto& functor = functor_wrapper.get_functor();
- const auto& selected_reducer = ReducerConditional::select(
- static_cast<const FunctorType&>(functor),
- static_cast<const ReducerType&>(reducer_wrapper.get_functor()));
- typename Analysis::Reducer final_reducer(&selected_reducer);
-
- reference_type update = final_reducer.init(results_ptr);
- if (size == 1) {
- Kokkos::Impl::Reduce::DeviceIterateTile<
- Policy::rank, BarePolicy, FunctorType,
- typename Policy::work_tag, reference_type>(
- policy, functor, update, {1, 1, 1}, {0, 0, 0}, {0, 0, 0})
- .exec_range();
- }
- final_reducer.final(results_ptr);
- if (device_accessible_result_ptr)
- final_reducer.copy(device_accessible_result_ptr.get(),
- results_ptr.get());
- });
- });
- q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{parallel_reduce_event});
- last_reduction_event = parallel_reduce_event;
- }
-
- // Otherwise, we perform a reduction on the values in all workgroups
- // separately, write the workgroup results back to global memory and recurse
- // until only one workgroup does the reduction and thus gets the final
- // value.
- if (size > 1) {
- auto n_wgroups = (size + wgroup_size - 1) / wgroup_size;
- auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
- sycl::accessor<value_type, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- local_mem(sycl::range<1>(wgroup_size) * std::max(value_count, 1u),
- cgh);
- sycl::accessor<unsigned int, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- num_teams_done(1, cgh);
-
- const BarePolicy bare_policy = m_policy;
-
- cgh.depends_on(memcpy_events);
-
- cgh.parallel_for(range, [=](sycl::nd_item<1> item) {
- const auto local_id = item.get_local_linear_id();
- const auto& functor = functor_wrapper.get_functor();
- const auto& selected_reducer = ReducerConditional::select(
- static_cast<const FunctorType&>(functor),
- static_cast<const ReducerType&>(reducer_wrapper.get_functor()));
- typename Analysis::Reducer final_reducer(&selected_reducer);
-
- // In the first iteration, we call functor to initialize the local
- // memory. Otherwise, the local memory is initialized with the
- // results from the previous iteration that are stored in global
- // memory.
- using index_type = typename Policy::index_type;
-
- // SWAPPED here to be conforming with CUDA implementation
- const index_type local_x = 0;
- const index_type local_y = item.get_local_id(0);
- const index_type local_z = 0;
- const index_type global_x = item.get_group(0);
- const index_type global_y = 0;
- const index_type global_z = 0;
- const index_type n_global_x = item.get_group_range(0);
- const index_type n_global_y = 1;
- const index_type n_global_z = 1;
-
- if constexpr (Analysis::StaticValueSize == 0) {
- reference_type update =
- final_reducer.init(&local_mem[local_id * value_count]);
-
- Kokkos::Impl::Reduce::DeviceIterateTile<
- Policy::rank, BarePolicy, FunctorType,
- typename Policy::work_tag, reference_type>(
- bare_policy, functor, update,
- {n_global_x, n_global_y, n_global_z},
- {global_x, global_y, global_z}, {local_x, local_y, local_z})
- .exec_range();
- item.barrier(sycl::access::fence_space::local_space);
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), results_ptr,
- device_accessible_result_ptr, value_count, final_reducer, false,
- std::min(size, wgroup_size));
-
- if (local_id == 0) {
- sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
- sycl::memory_scope::device,
- sycl::access::address_space::global_space>
- scratch_flags_ref(*scratch_flags);
- num_teams_done[0] = ++scratch_flags_ref;
- }
- item.barrier(sycl::access::fence_space::local_space);
- if (num_teams_done[0] == n_wgroups) {
- if (local_id >= n_wgroups)
- final_reducer.init(&local_mem[local_id * value_count]);
- else {
- final_reducer.copy(&local_mem[local_id * value_count],
- &results_ptr[local_id * value_count]);
- for (unsigned int id = local_id + wgroup_size; id < n_wgroups;
- id += wgroup_size) {
- final_reducer.join(&local_mem[local_id * value_count],
- &results_ptr[id * value_count]);
- }
- }
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), results_ptr,
- device_accessible_result_ptr, value_count, final_reducer,
- true, std::min(n_wgroups, wgroup_size));
- }
- } else {
- value_type local_value;
- reference_type update = final_reducer.init(&local_value);
-
- Kokkos::Impl::Reduce::DeviceIterateTile<
- Policy::rank, BarePolicy, FunctorType,
- typename Policy::work_tag, reference_type>(
- bare_policy, functor, update,
- {n_global_x, n_global_y, n_global_z},
- {global_x, global_y, global_z}, {local_x, local_y, local_z})
- .exec_range();
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), local_value, results_ptr,
- device_accessible_result_ptr, final_reducer, false,
- std::min(size, wgroup_size));
-
- if (local_id == 0) {
- sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
- sycl::memory_scope::device,
- sycl::access::address_space::global_space>
- scratch_flags_ref(*scratch_flags);
- num_teams_done[0] = ++scratch_flags_ref;
- }
- item.barrier(sycl::access::fence_space::local_space);
- if (num_teams_done[0] == n_wgroups) {
- if (local_id >= n_wgroups)
- final_reducer.init(&local_value);
- else {
- local_value = results_ptr[local_id];
- for (unsigned int id = local_id + wgroup_size; id < n_wgroups;
- id += wgroup_size) {
- final_reducer.join(&local_value, &results_ptr[id]);
- }
- }
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), local_value, results_ptr,
- device_accessible_result_ptr, final_reducer, true,
- std::min(n_wgroups, wgroup_size));
- }
- }
- });
- });
- last_reduction_event = q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{parallel_reduce_event});
- }
-
- // At this point, the reduced value is written to the entry in results_ptr
- // and all that is left is to copy it back to the given result pointer if
- // necessary.
- if (m_result_ptr && !m_result_ptr_device_accessible) {
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace>(
- m_space, m_result_ptr, results_ptr,
- sizeof(*m_result_ptr) * value_count);
- }
-
- return last_reduction_event;
- }
-
- public:
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy& policy, const Functor&) {
- return policy.space().impl_internal_space_instance()->m_maxWorkgroupSize;
- }
-
- void execute() const {
- Kokkos::Experimental::Impl::SYCLInternal& instance =
- *m_space.impl_internal_space_instance();
- using IndirectKernelMem =
- Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem;
- IndirectKernelMem& indirectKernelMem = instance.get_indirect_kernel_mem();
- IndirectKernelMem& indirectReducerMem = instance.get_indirect_kernel_mem();
-
- auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_functor, indirectKernelMem);
- auto reducer_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_reducer, indirectReducerMem);
-
- sycl::event event = sycl_direct_launch(
- m_policy, functor_wrapper, reducer_wrapper,
- {functor_wrapper.get_copy_event(), reducer_wrapper.get_copy_event()});
- functor_wrapper.register_event(event);
- reducer_wrapper.register_event(event);
- }
-
- private:
- const FunctorType m_functor;
- const BarePolicy m_policy;
- const Kokkos::Experimental::SYCL& m_space;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const bool m_result_ptr_device_accessible;
-
- // Only let one Parallel/Scan modify the shared memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::scoped_lock<std::mutex> m_shared_memory_lock;
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif
-#endif /* KOKKOS_SYCL_PARALLEL_REDUCE_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKO_SYCL_PARALLEL_SCAN_HPP
-#define KOKKO_SYCL_PARALLEL_SCAN_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <memory>
-#include <vector>
-#if defined(KOKKOS_ENABLE_SYCL)
-
-namespace Kokkos {
-namespace Impl {
-
-// Perform a scan over a workgroup.
-// At the end of this function, the subgroup scans are stored in the local array
-// such that the last value (at position n_active_subgroups-1) contains the
-// total sum.
-template <int dim, typename ValueType, typename FunctorType>
-void workgroup_scan(sycl::nd_item<dim> item, const FunctorType& final_reducer,
- sycl::local_ptr<ValueType> local_mem,
- ValueType& local_value, unsigned int global_range) {
- // subgroup scans
- auto sg = item.get_sub_group();
- const auto sg_group_id = sg.get_group_id()[0];
- const auto id_in_sg = sg.get_local_id()[0];
- for (unsigned int stride = 1; stride < global_range; stride <<= 1) {
- auto tmp = sg.shuffle_up(local_value, stride);
- if (id_in_sg >= stride) final_reducer.join(&local_value, &tmp);
- }
-
- const auto max_subgroup_size = sg.get_max_local_range()[0];
- const auto n_active_subgroups =
- (global_range + max_subgroup_size - 1) / max_subgroup_size;
-
- const auto local_range = sg.get_local_range()[0];
- if (id_in_sg == local_range - 1 && sg_group_id < n_active_subgroups)
- local_mem[sg_group_id] = local_value;
- local_value = sg.shuffle_up(local_value, 1);
- if (id_in_sg == 0) final_reducer.init(&local_value);
- sycl::group_barrier(item.get_group());
-
- // scan subgroup results using the first subgroup
- if (n_active_subgroups > 1) {
- if (sg_group_id == 0) {
- const auto n_rounds =
- (n_active_subgroups + local_range - 1) / local_range;
- for (unsigned int round = 0; round < n_rounds; ++round) {
- const unsigned int idx = id_in_sg + round * local_range;
- const auto upper_bound =
- std::min(local_range, n_active_subgroups - round * local_range);
- auto local_sg_value = local_mem[idx < n_active_subgroups ? idx : 0];
- for (unsigned int stride = 1; stride < upper_bound; stride <<= 1) {
- auto tmp = sg.shuffle_up(local_sg_value, stride);
- if (id_in_sg >= stride) {
- if (idx < n_active_subgroups)
- final_reducer.join(&local_sg_value, &tmp);
- else
- local_sg_value = tmp;
- }
- }
- if (idx < n_active_subgroups) {
- local_mem[idx] = local_sg_value;
- if (round > 0)
- final_reducer.join(&local_mem[idx],
- &local_mem[round * local_range - 1]);
- }
- if (round + 1 < n_rounds) sycl::group_barrier(sg);
- }
- }
- sycl::group_barrier(item.get_group());
- }
-
- // add results to all subgroups
- if (sg_group_id > 0)
- final_reducer.join(&local_value, &local_mem[sg_group_id - 1]);
-}
-
-template <class FunctorType, class... Traits>
-class ParallelScanSYCLBase {
- public:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- protected:
- using Member = typename Policy::member_type;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using LaunchBounds = typename Policy::launch_bounds;
-
- public:
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
- using functor_type = FunctorType;
- using size_type = Kokkos::Experimental::SYCL::size_type;
- using index_type = typename Policy::index_type;
-
- protected:
- const FunctorType m_functor;
- const Policy m_policy;
- pointer_type m_scratch_space = nullptr;
-
- // Only let one Parallel/Scan modify the shared memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::scoped_lock<std::mutex> m_shared_memory_lock;
-
- private:
- template <typename FunctorWrapper>
- void scan_internal(sycl::queue& q, const FunctorWrapper& functor_wrapper,
- pointer_type global_mem, std::size_t size) const {
- // FIXME_SYCL optimize
- constexpr size_t wgroup_size = 128;
- auto n_wgroups = (size + wgroup_size - 1) / wgroup_size;
- pointer_type group_results = global_mem + n_wgroups * wgroup_size;
-
- auto local_scans = q.submit([&](sycl::handler& cgh) {
- // Store subgroup totals
- const auto min_subgroup_size =
- q.get_device()
- .template get_info<sycl::info::device::sub_group_sizes>()
- .front();
- sycl::accessor<value_type, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- local_mem(sycl::range<1>((wgroup_size + min_subgroup_size - 1) /
- min_subgroup_size),
- cgh);
-
- cgh.parallel_for(
- sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
- [=](sycl::nd_item<1> item) {
- const FunctorType& functor = functor_wrapper.get_functor();
- typename Analysis::Reducer final_reducer(&functor);
-
- const auto local_id = item.get_local_linear_id();
- const auto global_id = item.get_global_linear_id();
-
- // Initialize local memory
- value_type local_value;
- if (global_id < size)
- local_value = global_mem[global_id];
- else
- final_reducer.init(&local_value);
-
- workgroup_scan<>(item, final_reducer, local_mem.get_pointer(),
- local_value, wgroup_size);
-
- if (n_wgroups > 1 && local_id == wgroup_size - 1)
- group_results[item.get_group_linear_id()] =
- local_mem[item.get_sub_group().get_group_range()[0] - 1];
-
- // Write results to global memory
- if (global_id < size) global_mem[global_id] = local_value;
- });
- });
- q.ext_oneapi_submit_barrier(std::vector<sycl::event>{local_scans});
-
- if (n_wgroups > 1) {
- scan_internal(q, functor_wrapper, group_results, n_wgroups);
- auto update_with_group_results = q.submit([&](sycl::handler& cgh) {
- cgh.parallel_for(
- sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
- [=](sycl::nd_item<1> item) {
- const auto global_id = item.get_global_linear_id();
- const FunctorType& functor = functor_wrapper.get_functor();
- typename Analysis::Reducer final_reducer(&functor);
- if (global_id < size)
- final_reducer.join(&global_mem[global_id],
- &group_results[item.get_group_linear_id()]);
- });
- });
- q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{update_with_group_results});
- }
- }
-
- template <typename FunctorWrapper>
- sycl::event sycl_direct_launch(const FunctorWrapper& functor_wrapper,
- sycl::event memcpy_event) const {
- // Convenience references
- const Kokkos::Experimental::SYCL& space = m_policy.space();
- sycl::queue& q = space.sycl_queue();
-
- const std::size_t len = m_policy.end() - m_policy.begin();
-
- // Initialize global memory
- auto initialize_global_memory = q.submit([&](sycl::handler& cgh) {
- auto global_mem = m_scratch_space;
- auto begin = m_policy.begin();
-
- cgh.depends_on(memcpy_event);
- cgh.parallel_for(sycl::range<1>(len), [=](sycl::item<1> item) {
- const typename Policy::index_type id =
- static_cast<typename Policy::index_type>(item.get_id()) + begin;
- const FunctorType& functor = functor_wrapper.get_functor();
- typename Analysis::Reducer final_reducer(&functor);
-
- value_type update{};
- final_reducer.init(&update);
- if constexpr (std::is_void<WorkTag>::value)
- functor_wrapper.get_functor()(id, update, false);
- else
- functor_wrapper.get_functor()(WorkTag(), id, update, false);
- global_mem[id] = update;
- });
- });
- q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{initialize_global_memory});
-
- // Perform the actual exclusive scan
- scan_internal(q, functor_wrapper, m_scratch_space, len);
-
- // Write results to global memory
- auto update_global_results = q.submit([&](sycl::handler& cgh) {
- auto global_mem = m_scratch_space;
- cgh.parallel_for(sycl::range<1>(len), [=](sycl::item<1> item) {
- auto global_id = item.get_id(0);
-
- value_type update = global_mem[global_id];
- if constexpr (std::is_void<WorkTag>::value)
- functor_wrapper.get_functor()(global_id, update, true);
- else
- functor_wrapper.get_functor()(WorkTag(), global_id, update, true);
- global_mem[global_id] = update;
- });
- });
- q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{update_global_results});
- return update_global_results;
- }
-
- public:
- template <typename PostFunctor>
- void impl_execute(const PostFunctor& post_functor) {
- if (m_policy.begin() == m_policy.end()) return;
-
- auto& instance = *m_policy.space().impl_internal_space_instance();
- const std::size_t len = m_policy.end() - m_policy.begin();
-
- // Compute the total amount of memory we will need. We emulate the recursive
- // structure that is used to do the actual scan. Essentially, we need to
- // allocate memory for the whole range and then recursively for the reduced
- // group results until only one group is left.
- std::size_t total_memory = 0;
- {
- size_t wgroup_size = 128;
- size_t n_nested_size = len;
- size_t n_nested_wgroups;
- do {
- n_nested_wgroups = (n_nested_size + wgroup_size - 1) / wgroup_size;
- n_nested_size = n_nested_wgroups;
- total_memory += sizeof(value_type) * n_nested_wgroups * wgroup_size;
- } while (n_nested_wgroups > 1);
- total_memory += sizeof(value_type) * wgroup_size;
- }
-
- // FIXME_SYCL consider only storing one value per block and recreate initial
- // results in the end before doing the final pass
- m_scratch_space = static_cast<sycl::device_ptr<value_type>>(
- instance.scratch_space(total_memory));
-
- Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
- indirectKernelMem = instance.get_indirect_kernel_mem();
-
- auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_functor, indirectKernelMem);
-
- sycl::event event =
- sycl_direct_launch(functor_wrapper, functor_wrapper.get_copy_event());
- functor_wrapper.register_event(event);
- post_functor();
- }
-
- ParallelScanSYCLBase(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_shared_memory_lock(m_policy.space()
- .impl_internal_space_instance()
- ->m_mutexScratchSpace) {}
-};
-
-template <class FunctorType, class... Traits>
-class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::SYCL>
- : private ParallelScanSYCLBase<FunctorType, Traits...> {
- public:
- using Base = ParallelScanSYCLBase<FunctorType, Traits...>;
-
- inline void execute() {
- Base::impl_execute([]() {});
- }
-
- ParallelScan(const FunctorType& arg_functor,
- const typename Base::Policy& arg_policy)
- : Base(arg_functor, arg_policy) {}
-};
-
-//----------------------------------------------------------------------------
-
-template <class FunctorType, class ReturnType, class... Traits>
-class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
- ReturnType, Kokkos::Experimental::SYCL>
- : private ParallelScanSYCLBase<FunctorType, Traits...> {
- public:
- using Base = ParallelScanSYCLBase<FunctorType, Traits...>;
-
- ReturnType& m_returnvalue;
- const Kokkos::Experimental::SYCL& m_exec;
-
- inline void execute() {
- Base::impl_execute([&]() {
- const long long nwork = Base::m_policy.end() - Base::m_policy.begin();
- if (nwork > 0) {
- const int size = Base::Analysis::value_size(Base::m_functor);
- DeepCopy<HostSpace, Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCL>(
- m_exec, &m_returnvalue, Base::m_scratch_space + nwork - 1, size);
- }
- });
- }
-
- ParallelScanWithTotal(const FunctorType& arg_functor,
- const typename Base::Policy& arg_policy,
- ReturnType& arg_returnvalue)
- : Base(arg_functor, arg_policy),
- m_returnvalue(arg_returnvalue),
- m_exec(arg_policy.space()) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SYCL_PARALLEL_TEAM_HPP
-#define KOKKOS_SYCL_PARALLEL_TEAM_HPP
-
-#include <Kokkos_Parallel.hpp>
-
-#include <SYCL/Kokkos_SYCL_Parallel_Reduce.hpp> // workgroup_reduction
-#include <SYCL/Kokkos_SYCL_Team.hpp>
-
-#include <vector>
-
-namespace Kokkos {
-namespace Impl {
-template <typename... Properties>
-class TeamPolicyInternal<Kokkos::Experimental::SYCL, Properties...>
- : public PolicyTraits<Properties...> {
- public:
- using execution_policy = TeamPolicyInternal;
-
- using traits = PolicyTraits<Properties...>;
-
- template <typename ExecSpace, typename... OtherProperties>
- friend class TeamPolicyInternal;
-
- private:
- typename traits::execution_space m_space;
- int m_league_size;
- int m_team_size;
- int m_vector_length;
- size_t m_team_scratch_size[2];
- size_t m_thread_scratch_size[2];
- int m_chunk_size;
- bool m_tune_team_size;
- bool m_tune_vector_length;
-
- public:
- using execution_space = Kokkos::Experimental::SYCL;
-
- template <class... OtherProperties>
- TeamPolicyInternal(TeamPolicyInternal<OtherProperties...> const& p) {
- m_league_size = p.m_league_size;
- m_team_size = p.m_team_size;
- m_vector_length = p.m_vector_length;
- m_team_scratch_size[0] = p.m_team_scratch_size[0];
- m_team_scratch_size[1] = p.m_team_scratch_size[1];
- m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
- m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
- m_chunk_size = p.m_chunk_size;
- m_space = p.m_space;
- m_tune_team_size = p.m_tune_team_size;
- m_tune_vector_length = p.m_tune_vector_length;
- }
-
- template <typename FunctorType>
- int team_size_max(FunctorType const& f, ParallelForTag const&) const {
- return internal_team_size_max_for(f);
- }
-
- template <class FunctorType>
- inline int team_size_max(const FunctorType& f,
- const ParallelReduceTag&) const {
- return internal_team_size_max_reduce(f);
- }
-
- template <class FunctorType, class ReducerType>
- inline int team_size_max(const FunctorType& f, const ReducerType& /*r*/,
- const ParallelReduceTag&) const {
- return internal_team_size_max_reduce(f);
- }
-
- template <typename FunctorType>
- int team_size_recommended(FunctorType const& f, ParallelForTag const&) const {
- return internal_team_size_recommended_for(f);
- }
-
- template <typename FunctorType>
- inline int team_size_recommended(FunctorType const& f,
- ParallelReduceTag const&) const {
- return internal_team_size_recommended_reduce(f);
- }
-
- template <class FunctorType, class ReducerType>
- int team_size_recommended(FunctorType const& f, ReducerType const&,
- ParallelReduceTag const&) const {
- return internal_team_size_recommended_reduce(f);
- }
- inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
- inline bool impl_auto_team_size() const { return m_tune_team_size; }
- // FIXME_SYCL This is correct in most cases, but not necessarily in case a
- // custom sycl::queue is used to initialize the execution space.
- static int vector_length_max() {
- std::vector<size_t> sub_group_sizes =
- execution_space{}
- .impl_internal_space_instance()
- ->m_queue->get_device()
- .template get_info<sycl::info::device::sub_group_sizes>();
- return *std::max_element(sub_group_sizes.begin(), sub_group_sizes.end());
- }
-
- private:
- static int verify_requested_vector_length(int requested_vector_length) {
- int test_vector_length =
- std::min(requested_vector_length, vector_length_max());
-
- // Allow only power-of-two vector_length
- if (!(is_integral_power_of_two(test_vector_length))) {
- int test_pow2 = 1;
- while (test_pow2 < test_vector_length) test_pow2 <<= 1;
- test_vector_length = test_pow2 >> 1;
- }
-
- return test_vector_length;
- }
-
- public:
- static int scratch_size_max(int level) {
- return level == 0 ? 1024 * 32
- : // FIXME_SYCL arbitrarily setting this to 32kB
- 20 * 1024 * 1024; // FIXME_SYCL arbitrarily setting this to 20MB
- }
- inline void impl_set_vector_length(size_t size) { m_vector_length = size; }
- inline void impl_set_team_size(size_t size) { m_team_size = size; }
- int impl_vector_length() const { return m_vector_length; }
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- KOKKOS_DEPRECATED int vector_length() const { return impl_vector_length(); }
-#endif
-
- int team_size() const { return m_team_size; }
-
- int league_size() const { return m_league_size; }
-
- size_t scratch_size(int level, int team_size_ = -1) const {
- if (team_size_ < 0) team_size_ = m_team_size;
- return m_team_scratch_size[level] +
- team_size_ * m_thread_scratch_size[level];
- }
-
- size_t team_scratch_size(int level) const {
- return m_team_scratch_size[level];
- }
-
- size_t thread_scratch_size(int level) const {
- return m_thread_scratch_size[level];
- }
-
- typename traits::execution_space space() const { return m_space; }
-
- TeamPolicyInternal()
- : m_space(typename traits::execution_space()),
- m_league_size(0),
- m_team_size(-1),
- m_vector_length(0),
- m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(vector_length_max()),
- m_tune_team_size(false),
- m_tune_vector_length(false) {}
-
- /** \brief Specify league size, request team size */
- TeamPolicyInternal(const execution_space space_, int league_size_,
- int team_size_request, int vector_length_request = 1)
- : m_space(space_),
- m_league_size(league_size_),
- m_team_size(team_size_request),
- m_vector_length(
- (vector_length_request > 0)
- ? verify_requested_vector_length(vector_length_request)
- : (verify_requested_vector_length(1))),
- m_team_scratch_size{0, 0},
- m_thread_scratch_size{0, 0},
- m_chunk_size(vector_length_max()),
- m_tune_team_size(bool(team_size_request <= 0)),
- m_tune_vector_length(bool(vector_length_request <= 0)) {
- // FIXME_SYCL Check that league size is permissible,
- // https://github.com/intel/llvm/pull/4064
-
- // Make sure total block size is permissible
- if (m_team_size * m_vector_length >
- static_cast<int>(
- m_space.impl_internal_space_instance()->m_maxWorkgroupSize)) {
- Impl::throw_runtime_exception(
- std::string("Kokkos::TeamPolicy<SYCL> the team size is too large. "
- "Team size x vector length is " +
- std::to_string(m_team_size * m_vector_length) +
- " but must be smaller than ") +
- std::to_string(
- m_space.impl_internal_space_instance()->m_maxWorkgroupSize));
- }
- }
-
- /** \brief Specify league size, request team size */
- TeamPolicyInternal(const execution_space space_, int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- int vector_length_request = 1)
- : TeamPolicyInternal(space_, league_size_, -1, vector_length_request) {}
- // FLAG
- /** \brief Specify league size and team size, request vector length*/
- TeamPolicyInternal(const execution_space space_, int league_size_,
- int team_size_request,
- const Kokkos::AUTO_t& /* vector_length_request */
- )
- : TeamPolicyInternal(space_, league_size_, team_size_request, -1)
-
- {}
-
- /** \brief Specify league size, request team size and vector length*/
- TeamPolicyInternal(const execution_space space_, int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- const Kokkos::AUTO_t& /* vector_length_request */
-
- )
- : TeamPolicyInternal(space_, league_size_, -1, -1)
-
- {}
-
- TeamPolicyInternal(int league_size_, int team_size_request,
- int vector_length_request = 1)
- : TeamPolicyInternal(typename traits::execution_space(), league_size_,
- team_size_request, vector_length_request) {}
-
- TeamPolicyInternal(int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- int vector_length_request = 1)
- : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
- vector_length_request) {}
-
- /** \brief Specify league size and team size, request vector length*/
- TeamPolicyInternal(int league_size_, int team_size_request,
- const Kokkos::AUTO_t& /* vector_length_request */
-
- )
- : TeamPolicyInternal(typename traits::execution_space(), league_size_,
- team_size_request, -1)
-
- {}
-
- /** \brief Specify league size, request team size and vector length*/
- TeamPolicyInternal(int league_size_,
- const Kokkos::AUTO_t& /* team_size_request */,
- const Kokkos::AUTO_t& /* vector_length_request */
-
- )
- : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
- -1) {}
-
- int chunk_size() const { return m_chunk_size; }
-
- TeamPolicyInternal& set_chunk_size(typename traits::index_type chunk_size_) {
- m_chunk_size = chunk_size_;
- return *this;
- }
-
- /** \brief set per team scratch size for a specific level of the scratch
- * hierarchy */
- TeamPolicyInternal& set_scratch_size(int level,
- PerTeamValue const& per_team) {
- m_team_scratch_size[level] = per_team.value;
- return *this;
- }
-
- /** \brief set per thread scratch size for a specific level of the scratch
- * hierarchy */
- TeamPolicyInternal& set_scratch_size(int level,
- PerThreadValue const& per_thread) {
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-
- /** \brief set per thread and per team scratch size for a specific level of
- * the scratch hierarchy */
- TeamPolicyInternal& set_scratch_size(int level, PerTeamValue const& per_team,
- PerThreadValue const& per_thread) {
- m_team_scratch_size[level] = per_team.value;
- m_thread_scratch_size[level] = per_thread.value;
- return *this;
- }
-
- using member_type = Kokkos::Impl::SYCLTeamMember;
-
- protected:
- template <class FunctorType>
- int internal_team_size_max_for(const FunctorType& /*f*/) const {
- // nested_reducer_memsize = (sizeof(double) * (m_team_size + 2)
- // custom: m_team_scratch_size[0] + m_thread_scratch_size[0] * m_team_size
- // total:
- // 2*sizeof(double)+m_team_scratch_size[0]
- // + m_team_size(sizeof(double)+m_thread_scratch_size[0])
- const int max_threads_for_memory =
- (space().impl_internal_space_instance()->m_maxShmemPerBlock -
- 2 * sizeof(double) - m_team_scratch_size[0]) /
- (sizeof(double) + m_thread_scratch_size[0]);
- return std::min({
- int(m_space.impl_internal_space_instance()->m_maxWorkgroupSize),
- // FIXME_SYCL Avoid requesting to many registers on NVIDIA GPUs.
-#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
- defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) || \
- defined(KOKKOS_ARCH_TURING75) || defined(KOKKOS_ARCH_AMPERE)
- 256,
-#endif
- max_threads_for_memory
- }) /
- impl_vector_length();
- }
-
- template <class FunctorType>
- int internal_team_size_max_reduce(const FunctorType& f) const {
- using Analysis = FunctorAnalysis<FunctorPatternInterface::REDUCE,
- TeamPolicyInternal, FunctorType>;
- using value_type = typename Analysis::value_type;
- const int value_count = Analysis::value_count(f);
-
- // nested_reducer_memsize = (sizeof(double) * (m_team_size + 2)
- // reducer_memsize = sizeof(value_type) * m_team_size * value_count
- // custom: m_team_scratch_size[0] + m_thread_scratch_size[0] * m_team_size
- // total:
- // 2*sizeof(double)+m_team_scratch_size[0]
- // + m_team_size(sizeof(double)+sizeof(value_type)*value_count
- // +m_thread_scratch_size[0])
- const int max_threads_for_memory =
- (space().impl_internal_space_instance()->m_maxShmemPerBlock -
- 2 * sizeof(double) - m_team_scratch_size[0]) /
- (sizeof(double) + sizeof(value_type) * value_count +
- m_thread_scratch_size[0]);
- return std::min<int>({
- int(m_space.impl_internal_space_instance()->m_maxWorkgroupSize),
- // FIXME_SYCL Avoid requesting to many registers on NVIDIA GPUs.
-#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
- defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) || \
- defined(KOKKOS_ARCH_TURING75) || defined(KOKKOS_ARCH_AMPERE)
- 256,
-#endif
- max_threads_for_memory
- }) /
- impl_vector_length();
- }
-
- template <class FunctorType>
- int internal_team_size_recommended_for(const FunctorType& f) const {
- // FIXME_SYCL improve
- return 1 << Kokkos::Impl::int_log2(internal_team_size_max_for(f));
- }
-
- template <class FunctorType>
- int internal_team_size_recommended_reduce(const FunctorType& f) const {
- // FIXME_SYCL improve
- return 1 << Kokkos::Impl::int_log2(internal_team_size_max_reduce(f));
- }
-};
-
-template <typename FunctorType, typename... Properties>
-class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
- Kokkos::Experimental::SYCL> {
- public:
- using Policy = TeamPolicyInternal<Kokkos::Experimental::SYCL, Properties...>;
- using functor_type = FunctorType;
- using size_type = ::Kokkos::Experimental::SYCL::size_type;
-
- private:
- using member_type = typename Policy::member_type;
- using work_tag = typename Policy::work_tag;
- using launch_bounds = typename Policy::launch_bounds;
-
- FunctorType const m_functor;
- Policy const m_policy;
- size_type const m_league_size;
- int m_team_size;
- size_type const m_vector_size;
- int m_shmem_begin;
- int m_shmem_size;
- sycl::device_ptr<char> m_global_scratch_ptr;
- size_t m_scratch_size[2];
- // Only let one ParallelFor/Reduce modify the team scratch memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::scoped_lock<std::mutex> m_scratch_lock;
-
- template <typename FunctorWrapper>
- sycl::event sycl_direct_launch(const Policy& policy,
- const FunctorWrapper& functor_wrapper,
- const sycl::event& memcpy_events) const {
- // Convenience references
- const Kokkos::Experimental::SYCL& space = policy.space();
- sycl::queue& q = space.sycl_queue();
-
- auto parallel_for_event = q.submit([&](sycl::handler& cgh) {
- // FIXME_SYCL accessors seem to need a size greater than zero at least for
- // host queues
- sycl::accessor<char, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- team_scratch_memory_L0(
- sycl::range<1>(
- std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
- cgh);
-
- // Avoid capturing *this since it might not be trivially copyable
- const auto shmem_begin = m_shmem_begin;
- const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
- sycl::device_ptr<char> const global_scratch_ptr = m_global_scratch_ptr;
-
- auto lambda = [=](sycl::nd_item<2> item) {
- const member_type team_member(
- team_scratch_memory_L0.get_pointer(), shmem_begin, scratch_size[0],
- global_scratch_ptr + item.get_group(1) * scratch_size[1],
- scratch_size[1], item);
- if constexpr (std::is_void<work_tag>::value)
- functor_wrapper.get_functor()(team_member);
- else
- functor_wrapper.get_functor()(work_tag(), team_member);
- };
-
- static sycl::kernel kernel = [&] {
- sycl::kernel_id functor_kernel_id =
- sycl::get_kernel_id<decltype(lambda)>();
- auto kernel_bundle =
- sycl::get_kernel_bundle<sycl::bundle_state::executable>(
- q.get_context(), std::vector{functor_kernel_id});
- return kernel_bundle.get_kernel(functor_kernel_id);
- }();
- auto max_sg_size =
- kernel
- .get_info<sycl::info::kernel_device_specific::max_sub_group_size>(
- q.get_device(),
- sycl::range<3>(m_team_size, m_vector_size, 1));
- auto final_vector_size = std::min<int>(m_vector_size, max_sg_size);
- // FIXME_SYCL For some reason, explicitly enforcing the kernel bundle to
- // be used gives a runtime error.
- // cgh.use_kernel_bundle(kernel_bundle);
-
- cgh.depends_on(memcpy_events);
- cgh.parallel_for(
- sycl::nd_range<2>(
- sycl::range<2>(m_team_size, m_league_size * final_vector_size),
- sycl::range<2>(m_team_size, final_vector_size)),
- lambda);
- });
- q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
- return parallel_for_event;
- }
-
- public:
- inline void execute() const {
- if (m_league_size == 0) return;
-
- Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
- indirectKernelMem = m_policy.space()
- .impl_internal_space_instance()
- ->get_indirect_kernel_mem();
-
- auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_functor, indirectKernelMem);
-
- sycl::event event = sycl_direct_launch(m_policy, functor_wrapper,
- functor_wrapper.get_copy_event());
- functor_wrapper.register_event(event);
- }
-
- ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_league_size(arg_policy.league_size()),
- m_team_size(arg_policy.team_size()),
- m_vector_size(arg_policy.impl_vector_length()),
- m_scratch_lock(arg_policy.space()
- .impl_internal_space_instance()
- ->m_team_scratch_mutex) {
- // FIXME_SYCL optimize
- if (m_team_size < 0)
- m_team_size =
- m_policy.team_size_recommended(arg_functor, ParallelForTag{});
-
- m_shmem_begin = (sizeof(double) * (m_team_size + 2));
- m_shmem_size =
- (m_policy.scratch_size(0, m_team_size) +
- FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
- m_scratch_size[0] = m_shmem_size;
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
-
- // Functor's reduce memory, team scan memory, and team shared memory depend
- // upon team size.
- auto& space = *m_policy.space().impl_internal_space_instance();
- m_global_scratch_ptr =
- static_cast<sycl::device_ptr<char>>(space.resize_team_scratch_space(
- static_cast<ptrdiff_t>(m_scratch_size[1]) * m_league_size));
-
- if (static_cast<int>(space.m_maxShmemPerBlock) <
- m_shmem_size - m_shmem_begin) {
- std::stringstream out;
- out << "Kokkos::Impl::ParallelFor<SYCL> insufficient shared memory! "
- "Requested "
- << m_shmem_size - m_shmem_begin << " bytes but maximum is "
- << space.m_maxShmemPerBlock << '\n';
- Kokkos::Impl::throw_runtime_exception(out.str());
- }
-
- const auto max_team_size =
- m_policy.team_size_max(arg_functor, ParallelForTag{});
- if (m_team_size > m_policy.team_size_max(arg_functor, ParallelForTag{}))
- Kokkos::Impl::throw_runtime_exception(
- "Kokkos::Impl::ParallelFor<SYCL> requested too large team size. The "
- "maximal team_size is " +
- std::to_string(max_team_size) + '!');
- }
-};
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Experimental::SYCL> {
- public:
- using Policy = TeamPolicyInternal<Kokkos::Experimental::SYCL, Properties...>;
-
- private:
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
- using member_type = typename Policy::member_type;
- using WorkTag = typename Policy::work_tag;
- using launch_bounds = typename Policy::launch_bounds;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
- using value_type = typename Analysis::value_type;
-
- public:
- using functor_type = FunctorType;
- using size_type = Kokkos::Experimental::SYCL::size_type;
-
- private:
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const bool m_result_ptr_device_accessible;
- size_type m_shmem_begin;
- size_type m_shmem_size;
- sycl::device_ptr<char> m_global_scratch_ptr;
- size_t m_scratch_size[2];
- const size_type m_league_size;
- int m_team_size;
- const size_type m_vector_size;
- // Only let one ParallelFor/Reduce modify the team scratch memory. The
- // constructor acquires the mutex which is released in the destructor.
- std::scoped_lock<std::mutex> m_scratch_lock;
-
- template <typename PolicyType, typename FunctorWrapper,
- typename ReducerWrapper>
- sycl::event sycl_direct_launch(
- const PolicyType& policy, const FunctorWrapper& functor_wrapper,
- const ReducerWrapper& reducer_wrapper,
- const std::vector<sycl::event>& memcpy_events) const {
- // Convenience references
- const Kokkos::Experimental::SYCL& space = policy.space();
- Kokkos::Experimental::Impl::SYCLInternal& instance =
- *space.impl_internal_space_instance();
- sycl::queue& q = space.sycl_queue();
-
- const unsigned int value_count =
- Analysis::value_count(ReducerConditional::select(m_functor, m_reducer));
- std::size_t size = std::size_t(m_league_size) * m_team_size * m_vector_size;
- value_type* results_ptr = nullptr;
-
- sycl::event last_reduction_event;
-
- // If size<=1 we only call init(), the functor and possibly final once
- // working with the global scratch memory but don't copy back to
- // m_result_ptr yet.
- if (size <= 1) {
- results_ptr =
- static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
- sizeof(value_type) * std::max(value_count, 1u)));
- sycl::global_ptr<value_type> device_accessible_result_ptr =
- m_result_ptr_device_accessible ? m_result_ptr : nullptr;
-
- auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
- // FIXME_SYCL accessors seem to need a size greater than zero at least
- // for host queues
- sycl::accessor<char, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- team_scratch_memory_L0(
- sycl::range<1>(
- std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
- cgh);
-
- // Avoid capturing *this since it might not be trivially copyable
- const auto shmem_begin = m_shmem_begin;
- const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
- sycl::device_ptr<char> const global_scratch_ptr = m_global_scratch_ptr;
-
- cgh.depends_on(memcpy_events);
- cgh.parallel_for(
- sycl::nd_range<2>(sycl::range<2>(1, 1), sycl::range<2>(1, 1)),
- [=](sycl::nd_item<2> item) {
- const auto& functor = functor_wrapper.get_functor();
- const auto& selected_reducer = ReducerConditional::select(
- static_cast<const FunctorType&>(functor),
- static_cast<const ReducerType&>(
- reducer_wrapper.get_functor()));
- typename Analysis::Reducer final_reducer(&selected_reducer);
-
- reference_type update = final_reducer.init(results_ptr);
- if (size == 1) {
- const member_type team_member(
- team_scratch_memory_L0.get_pointer(), shmem_begin,
- scratch_size[0], global_scratch_ptr, scratch_size[1], item);
- if constexpr (std::is_void<WorkTag>::value)
- functor(team_member, update);
- else
- functor(WorkTag(), team_member, update);
- }
- final_reducer.final(results_ptr);
- if (device_accessible_result_ptr)
- final_reducer.copy(device_accessible_result_ptr,
- &results_ptr[0]);
- });
- });
- q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{parallel_reduce_event});
- last_reduction_event = parallel_reduce_event;
- } else {
- // Otherwise, (if the total range has more than one element) we perform a
- // reduction on the values in all workgroups separately, write the
- // workgroup results back to global memory and recurse until only one
- // workgroup does the reduction and thus gets the final value.
- auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
- auto scratch_flags = static_cast<sycl::device_ptr<unsigned int>>(
- instance.scratch_flags(sizeof(unsigned int)));
-
- // FIXME_SYCL accessors seem to need a size greater than zero at least
- // for host queues
- sycl::accessor<char, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- team_scratch_memory_L0(
- sycl::range<1>(
- std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
- cgh);
-
- // Avoid capturing *this since it might not be trivially copyable
- const auto shmem_begin = m_shmem_begin;
- const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
- sycl::device_ptr<char> const global_scratch_ptr = m_global_scratch_ptr;
-
- auto team_reduction_factory =
- [&](sycl::accessor<value_type, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- local_mem,
- sycl::device_ptr<value_type> results_ptr) mutable {
- sycl::global_ptr<value_type> device_accessible_result_ptr =
- m_result_ptr_device_accessible ? m_result_ptr : nullptr;
- auto lambda = [=](sycl::nd_item<2> item) {
- auto n_wgroups =
- item.get_group_range()[0] * item.get_group_range()[1];
- auto wgroup_size =
- item.get_local_range()[0] * item.get_local_range()[1];
- auto size = n_wgroups * wgroup_size;
-
- auto& num_teams_done = reinterpret_cast<unsigned int&>(
- local_mem[wgroup_size * std::max(value_count, 1u)]);
- const auto local_id = item.get_local_linear_id();
- const auto& functor = functor_wrapper.get_functor();
- const auto& selected_reducer = ReducerConditional::select(
- static_cast<const FunctorType&>(functor),
- static_cast<const ReducerType&>(
- reducer_wrapper.get_functor()));
- typename Analysis::Reducer final_reducer(&selected_reducer);
-
- if constexpr (Analysis::StaticValueSize == 0) {
- reference_type update =
- final_reducer.init(&local_mem[local_id * value_count]);
- const member_type team_member(
- team_scratch_memory_L0.get_pointer(), shmem_begin,
- scratch_size[0],
- global_scratch_ptr + item.get_group(1) * scratch_size[1],
- scratch_size[1], item);
- if constexpr (std::is_void<WorkTag>::value)
- functor(team_member, update);
- else
- functor(WorkTag(), team_member, update);
- item.barrier(sycl::access::fence_space::local_space);
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), results_ptr,
- device_accessible_result_ptr, value_count,
- selected_reducer, false,
- std::min<std::size_t>(size,
- item.get_local_range()[0] *
- item.get_local_range()[1]));
-
- if (local_id == 0) {
- sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
- sycl::memory_scope::device,
- sycl::access::address_space::global_space>
- scratch_flags_ref(*scratch_flags);
- num_teams_done = ++scratch_flags_ref;
- }
- sycl::group_barrier(item.get_group());
- if (num_teams_done == n_wgroups) {
- if (local_id >= n_wgroups)
- final_reducer.init(&local_mem[local_id * value_count]);
- else {
- final_reducer.copy(&local_mem[local_id * value_count],
- &results_ptr[local_id * value_count]);
- for (unsigned int id = local_id + wgroup_size;
- id < n_wgroups; id += wgroup_size) {
- final_reducer.join(&local_mem[local_id * value_count],
- &results_ptr[id * value_count]);
- }
- }
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), results_ptr,
- device_accessible_result_ptr, value_count,
- selected_reducer, true,
- std::min(n_wgroups, item.get_local_range()[0] *
- item.get_local_range()[1]));
- }
- } else {
- value_type local_value;
- reference_type update = final_reducer.init(&local_value);
- const member_type team_member(
- team_scratch_memory_L0.get_pointer(), shmem_begin,
- scratch_size[0],
- global_scratch_ptr + item.get_group(1) * scratch_size[1],
- scratch_size[1], item);
- if constexpr (std::is_void<WorkTag>::value)
- functor(team_member, update);
- else
- functor(WorkTag(), team_member, update);
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), local_value, results_ptr,
- device_accessible_result_ptr, final_reducer, false,
- std::min<std::size_t>(size,
- item.get_local_range()[0] *
- item.get_local_range()[1]));
-
- if (local_id == 0) {
- sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
- sycl::memory_scope::device,
- sycl::access::address_space::global_space>
- scratch_flags_ref(*scratch_flags);
- num_teams_done = ++scratch_flags_ref;
- }
- item.barrier(sycl::access::fence_space::local_space);
- if (num_teams_done == n_wgroups) {
- if (local_id >= n_wgroups)
- final_reducer.init(&local_value);
- else {
- local_value = results_ptr[local_id];
- for (unsigned int id = local_id + wgroup_size;
- id < n_wgroups; id += wgroup_size) {
- final_reducer.join(&local_value, &results_ptr[id]);
- }
- }
-
- SYCLReduction::workgroup_reduction<>(
- item, local_mem.get_pointer(), local_value, results_ptr,
- device_accessible_result_ptr, final_reducer, true,
- std::min(n_wgroups, item.get_local_range()[0] *
- item.get_local_range()[1]));
- }
- }
- };
- return lambda;
- };
-
- auto dummy_reduction_lambda = team_reduction_factory({1, cgh}, nullptr);
-
- static sycl::kernel kernel = [&] {
- sycl::kernel_id functor_kernel_id =
- sycl::get_kernel_id<decltype(dummy_reduction_lambda)>();
- auto kernel_bundle =
- sycl::get_kernel_bundle<sycl::bundle_state::executable>(
- q.get_context(), std::vector{functor_kernel_id});
- return kernel_bundle.get_kernel(functor_kernel_id);
- }();
- auto max_sg_size = kernel.get_info<
- sycl::info::kernel_device_specific::max_sub_group_size>(
- q.get_device(), sycl::range<3>(m_team_size, m_vector_size, 1));
- auto final_vector_size = std::min<int>(m_vector_size, max_sg_size);
- // FIXME_SYCL For some reason, explicitly enforcing the kernel bundle to
- // be used gives a runtime error.
-
- // cgh.use_kernel_bundle(kernel_bundle);
-
- auto wgroup_size = m_team_size * final_vector_size;
- std::size_t size = std::size_t(m_league_size) * wgroup_size;
- sycl::accessor<value_type, 1, sycl::access::mode::read_write,
- sycl::access::target::local>
- local_mem(sycl::range<1>(wgroup_size) * std::max(value_count, 1u) +
- (sizeof(unsigned int) + sizeof(value_type) - 1) /
- sizeof(value_type),
- cgh);
-
- const auto init_size =
- std::max<std::size_t>((size + wgroup_size - 1) / wgroup_size, 1);
- results_ptr =
- static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
- sizeof(value_type) * std::max(value_count, 1u) * init_size));
-
- auto reduction_lambda = team_reduction_factory(local_mem, results_ptr);
-
- cgh.depends_on(memcpy_events);
-
- cgh.parallel_for(
- sycl::nd_range<2>(
- sycl::range<2>(m_team_size, m_league_size * m_vector_size),
- sycl::range<2>(m_team_size, m_vector_size)),
- reduction_lambda);
- });
- last_reduction_event = q.ext_oneapi_submit_barrier(
- std::vector<sycl::event>{parallel_reduce_event});
- }
-
- // At this point, the reduced value is written to the entry in results_ptr
- // and all that is left is to copy it back to the given result pointer if
- // necessary.
- if (m_result_ptr && !m_result_ptr_device_accessible) {
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
- Kokkos::Experimental::SYCLDeviceUSMSpace>(
- space, m_result_ptr, results_ptr,
- sizeof(*m_result_ptr) * value_count);
- }
-
- return last_reduction_event;
- }
-
- public:
- inline void execute() {
- Kokkos::Experimental::Impl::SYCLInternal& instance =
- *m_policy.space().impl_internal_space_instance();
- using IndirectKernelMem =
- Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem;
- IndirectKernelMem& indirectKernelMem = instance.get_indirect_kernel_mem();
- IndirectKernelMem& indirectReducerMem = instance.get_indirect_kernel_mem();
-
- auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_functor, indirectKernelMem);
- auto reducer_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_reducer, indirectReducerMem);
-
- sycl::event event = sycl_direct_launch(
- m_policy, functor_wrapper, reducer_wrapper,
- {functor_wrapper.get_copy_event(), reducer_wrapper.get_copy_event()});
- functor_wrapper.register_event(event);
- reducer_wrapper.register_event(event);
- }
-
- private:
- void initialize() {
- // FIXME_SYCL optimize
- if (m_team_size < 0)
- m_team_size =
- m_policy.team_size_recommended(m_functor, ParallelReduceTag{});
- // Must be a power of two greater than two, get the one not bigger than the
- // requested one.
- if ((m_team_size & m_team_size - 1) || m_team_size < 2) {
- int temp_team_size = 2;
- while ((temp_team_size << 1) < m_team_size) temp_team_size <<= 1;
- m_team_size = temp_team_size;
- }
-
- m_shmem_begin = (sizeof(double) * (m_team_size + 2));
- m_shmem_size =
- (m_policy.scratch_size(0, m_team_size) +
- FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
- m_scratch_size[0] = m_shmem_size;
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
-
- // Functor's reduce memory, team scan memory, and team shared memory depend
- // upon team size.
- auto& space = *m_policy.space().impl_internal_space_instance();
- m_global_scratch_ptr =
- static_cast<sycl::device_ptr<char>>(space.resize_team_scratch_space(
- static_cast<ptrdiff_t>(m_scratch_size[1]) * m_league_size));
-
- if (static_cast<int>(space.m_maxShmemPerBlock) <
- m_shmem_size - m_shmem_begin) {
- std::stringstream out;
- out << "Kokkos::Impl::ParallelFor<SYCL> insufficient shared memory! "
- "Requested "
- << m_shmem_size - m_shmem_begin << " bytes but maximum is "
- << space.m_maxShmemPerBlock << '\n';
- Kokkos::Impl::throw_runtime_exception(out.str());
- }
-
- if (m_team_size > m_policy.team_size_max(m_functor, ParallelReduceTag{}))
- Kokkos::Impl::throw_runtime_exception(
- "Kokkos::Impl::ParallelFor<SYCL> requested too large team size.");
- }
-
- public:
- template <class ViewType>
- ParallelReduce(
- FunctorType const& arg_functor, Policy const& arg_policy,
- ViewType const& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- typename ViewType::memory_space>::accessible),
- m_league_size(arg_policy.league_size()),
- m_team_size(arg_policy.team_size()),
- m_vector_size(arg_policy.impl_vector_length()),
- m_scratch_lock(arg_policy.space()
- .impl_internal_space_instance()
- ->m_team_scratch_mutex) {
- initialize();
- }
-
- ParallelReduce(FunctorType const& arg_functor, Policy const& arg_policy,
- ReducerType const& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_league_size(arg_policy.league_size()),
- m_team_size(arg_policy.team_size()),
- m_vector_size(arg_policy.impl_vector_length()),
- m_scratch_lock(arg_policy.space()
- .impl_internal_space_instance()
- ->m_team_scratch_mutex) {
- initialize();
- }
-};
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <Kokkos_HostSpace.hpp>
-#include <Kokkos_SYCL.hpp>
-#include <Kokkos_SYCL_Space.hpp>
-#include <SYCL/Kokkos_SYCL_DeepCopy.hpp>
-#include <SYCL/Kokkos_SYCL_Instance.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-#include <impl/Kokkos_Profiling.hpp>
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-namespace Kokkos {
-namespace Impl {
-
-void DeepCopySYCL(void* dst, const void* src, size_t n) {
- Experimental::Impl::SYCLInternal::singleton().m_queue->memcpy(dst, src, n);
-}
-
-void DeepCopyAsyncSYCL(const Kokkos::Experimental::SYCL& instance, void* dst,
- const void* src, size_t n) {
- // FIXME_SYCL memcpy doesn't respect submit_barrier which means that we need
- // to actually fence the execution space to make sure the memcpy is properly
- // enqueued when using out-of-order queues.
- sycl::queue& q = *instance.impl_internal_space_instance()->m_queue;
- q.wait_and_throw();
- auto event = q.memcpy(dst, src, n);
- q.ext_oneapi_submit_barrier(std::vector<sycl::event>{event});
-}
-
-void DeepCopyAsyncSYCL(void* dst, const void* src, size_t n) {
- Experimental::Impl::SYCLInternal::singleton().m_queue->memcpy(dst, src, n);
- Experimental::SYCL().fence(
- "Kokkos::Impl::DeepCopyAsyncSYCL: fence after memcpy");
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Experimental {
-
-SYCLDeviceUSMSpace::SYCLDeviceUSMSpace()
- : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
-SYCLDeviceUSMSpace::SYCLDeviceUSMSpace(sycl::queue queue)
- : m_queue(std::move(queue)) {}
-
-SYCLSharedUSMSpace::SYCLSharedUSMSpace()
- : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
-SYCLSharedUSMSpace::SYCLSharedUSMSpace(sycl::queue queue)
- : m_queue(std::move(queue)) {}
-
-SYCLHostUSMSpace::SYCLHostUSMSpace()
- : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
-SYCLHostUSMSpace::SYCLHostUSMSpace(sycl::queue queue)
- : m_queue(std::move(queue)) {}
-
-void* allocate_sycl(
- const char* arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size, const Kokkos::Tools::SpaceHandle arg_handle,
- const RawMemoryAllocationFailure::AllocationMechanism failure_tag,
- const sycl::usm::alloc allocation_kind, const sycl::queue& queue) {
- void* const hostPtr = sycl::malloc(arg_alloc_size, queue, allocation_kind);
-
- if (hostPtr == nullptr)
- throw RawMemoryAllocationFailure(
- arg_alloc_size, 1, RawMemoryAllocationFailure::FailureMode::Unknown,
- failure_tag);
-
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, hostPtr,
- reported_size);
- }
-
- return hostPtr;
-}
-
-void* SYCLDeviceUSMSpace::allocate(const Kokkos::Experimental::SYCL& exec_space,
- const size_t arg_alloc_size) const {
- return allocate(exec_space, "[unlabeled]", arg_alloc_size);
-}
-
-void* SYCLDeviceUSMSpace::allocate(const Kokkos::Experimental::SYCL& exec_space,
- const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return allocate_sycl(
- arg_label, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()),
- RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocDevice,
- sycl::usm::alloc::device,
- *exec_space.impl_internal_space_instance()->m_queue);
-}
-
-void* SYCLDeviceUSMSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-
-void* SYCLDeviceUSMSpace::allocate(const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return allocate_sycl(
- arg_label, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()),
- RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocDevice,
- sycl::usm::alloc::device, m_queue);
-}
-
-void* SYCLSharedUSMSpace::allocate(const SYCL& exec_space,
- const size_t arg_alloc_size) const {
- return allocate(exec_space, "[unlabeled]", arg_alloc_size);
-}
-void* SYCLSharedUSMSpace::allocate(const SYCL& exec_space,
- const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return allocate_sycl(
- arg_label, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()),
- RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocShared,
- sycl::usm::alloc::shared,
- *exec_space.impl_internal_space_instance()->m_queue);
-}
-
-void* SYCLSharedUSMSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void* SYCLSharedUSMSpace::allocate(const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return allocate_sycl(
- arg_label, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()),
- RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocShared,
- sycl::usm::alloc::shared, m_queue);
-}
-
-void* SYCLHostUSMSpace::allocate(const SYCL& exec_space,
- const size_t arg_alloc_size) const {
- return allocate(exec_space, "[unlabeled]", arg_alloc_size);
-}
-void* SYCLHostUSMSpace::allocate(const SYCL& exec_space, const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return allocate_sycl(
- arg_label, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()),
- RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocHost,
- sycl::usm::alloc::host,
- *exec_space.impl_internal_space_instance()->m_queue);
-}
-
-void* SYCLHostUSMSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void* SYCLHostUSMSpace::allocate(const char* arg_label,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return allocate_sycl(
- arg_label, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()),
- RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocHost,
- sycl::usm::alloc::host, m_queue);
-}
-
-void sycl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle,
- const sycl::queue& queue) {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
-
- SYCL::impl_static_fence(
- "Kokkos::Impl::sycl_deallocate: fence before deallocate");
- sycl::free(arg_alloc_ptr, queue);
-}
-
-void SYCLDeviceUSMSpace::deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-void SYCLDeviceUSMSpace::deallocate(const char* arg_label,
- void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()), m_queue);
-}
-
-void SYCLSharedUSMSpace::deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void SYCLSharedUSMSpace::deallocate(const char* arg_label,
- void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()), m_queue);
-}
-
-void SYCLHostUSMSpace::deallocate(void* const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void SYCLHostUSMSpace::deallocate(const char* arg_label,
- void* const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
- Kokkos::Tools::make_space_handle(name()), m_queue);
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
-#ifdef KOKKOS_ENABLE_DEBUG
-SharedAllocationRecord<void, void> SharedAllocationRecord<
- Kokkos::Experimental::SYCLDeviceUSMSpace, void>::s_root_record;
-
-SharedAllocationRecord<void, void> SharedAllocationRecord<
- Kokkos::Experimental::SYCLSharedUSMSpace, void>::s_root_record;
-
-SharedAllocationRecord<void, void> SharedAllocationRecord<
- Kokkos::Experimental::SYCLHostUSMSpace, void>::s_root_record;
-#endif
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCLDeviceUSMSpace& space,
- const std::string& label, const size_t size,
- const SharedAllocationRecord<void, void>::function_type dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
- void>::s_root_record,
-#endif
- Kokkos::Impl::checked_allocation_with_header(space, label, size),
- sizeof(SharedAllocationHeader) + size, dealloc, label),
- m_space(space) {
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, label);
-
- // Copy to device memory
- Kokkos::Experimental::SYCL exec;
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace, HostSpace>(
- exec, RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
- exec.fence(
- "SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, "
- "void>::SharedAllocationRecord(): fence after copying header from "
- "HostSpace");
-}
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCL& arg_exec_space,
- const Kokkos::Experimental::SYCLDeviceUSMSpace& space,
- const std::string& label, const size_t size,
- const SharedAllocationRecord<void, void>::function_type dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
- void>::s_root_record,
-#endif
- Kokkos::Impl::checked_allocation_with_header(arg_exec_space, space,
- label, size),
- sizeof(SharedAllocationHeader) + size, dealloc, label),
- m_space(space) {
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, label);
-
- // Copy to device memory
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace, HostSpace>(
- arg_exec_space, RecordBase::m_alloc_ptr, &header,
- sizeof(SharedAllocationHeader));
-}
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCL& exec_space,
- const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
- void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(exec_space, arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
-
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
-}
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
- void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
-
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
-}
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCL& exec_space,
- const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace,
- void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(exec_space, arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
-
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
-}
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace,
- void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
-
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
- void>::~SharedAllocationRecord() {
- const auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- alloc_size, alloc_size - sizeof(SharedAllocationHeader));
-}
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
- void>::~SharedAllocationRecord() {
- const auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- alloc_size, alloc_size - sizeof(SharedAllocationHeader));
-}
-
-SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace,
- void>::~SharedAllocationRecord() {
- const auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- alloc_size, alloc_size - sizeof(SharedAllocationHeader));
-}
-
-//----------------------------------------------------------------------------
-
-} // namespace Impl
-} // namespace Kokkos
-
-//==============================================================================
-// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
-
-#include <impl/Kokkos_SharedAlloc_timpl.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-// To avoid additional compilation cost for something that's (mostly?) not
-// performance sensitive, we explicity instantiate these CRTP base classes here,
-// where we have access to the associated *_timpl.hpp header files.
-template class HostInaccessibleSharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLDeviceUSMSpace>;
-template class SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLDeviceUSMSpace>;
-template class SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLSharedUSMSpace>;
-template class SharedAllocationRecordCommon<
- Kokkos::Experimental::SYCLHostUSMSpace>;
-
-} // namespace Impl
-} // namespace Kokkos
-
-// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
-//==============================================================================
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKO_SERIAL_PARALLEL_MDRANGE_HPP
-#define KOKKO_SERIAL_PARALLEL_MDRANGE_HPP
-
-#include <Kokkos_Parallel.hpp>
-#include <KokkosExp_MDRangePolicy.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::Serial> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
-
- using iterate_type = typename Kokkos::Impl::HostIterateTile<
- MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
-
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy;
-
- void exec() const {
- const typename Policy::member_type e = m_policy.end();
- for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- iterate_type(m_mdr_policy, m_functor)(i);
- }
- }
-
- public:
- inline void execute() const { this->exec(); }
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
- inline ParallelFor(const FunctorType& arg_functor,
- const MDRangePolicy& arg_policy)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)) {}
-};
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::Serial> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
-
- using WorkTag = typename MDRangePolicy::work_tag;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
- void>;
-
- using Analysis = FunctorAnalysis<FunctorPatternInterface::REDUCE,
- MDRangePolicy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
-
- using iterate_type =
- typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
- WorkTag, reference_type>;
-
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
-
- inline void exec(reference_type update) const {
- const typename Policy::member_type e = m_policy.end();
- for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- iterate_type(m_mdr_policy, m_functor, update)(i);
- }
- }
-
- public:
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy&, const Functor&) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
- inline void execute() const {
- const size_t pool_reduce_size =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
- const size_t team_reduce_size = 0; // Never shrinks
- const size_t team_shared_size = 0; // Never shrinks
- const size_t thread_local_size = 0; // Never shrinks
-
- auto* internal_instance = m_policy.space().impl_internal_space_instance();
- // Need to lock resize_thread_team_data
- std::lock_guard<std::mutex> lock(
- internal_instance->m_thread_team_data_mutex);
- internal_instance->resize_thread_team_data(
- pool_reduce_size, team_reduce_size, team_shared_size,
- thread_local_size);
-
- pointer_type ptr =
- m_result_ptr
- ? m_result_ptr
- : pointer_type(
- internal_instance->m_thread_team_data.pool_reduce_local());
-
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- reference_type update = final_reducer.init(ptr);
-
- this->exec(update);
-
- final_reducer.final(ptr);
- }
-
- template <class HostViewType>
- ParallelReduce(const FunctorType& arg_functor,
- const MDRangePolicy& arg_policy,
- const HostViewType& arg_result_view,
- std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result_view.data()) {
- static_assert(Kokkos::is_view<HostViewType>::value,
- "Kokkos::Serial reduce result must be a View");
-
- static_assert(
- Kokkos::Impl::MemorySpaceAccess<typename HostViewType::memory_space,
- Kokkos::HostSpace>::accessible,
- "Kokkos::Serial reduce result must be a View in HostSpace");
- }
-
- inline ParallelReduce(const FunctorType& arg_functor,
- MDRangePolicy arg_policy, const ReducerType& reducer)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()) {
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_TASKDAG)
-
-#include <Kokkos_Core.hpp>
-
-#include <Serial/Kokkos_Serial_Task.hpp>
-#include <impl/Kokkos_TaskQueue_impl.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template class TaskQueue<Kokkos::Serial, typename Kokkos::Serial::memory_space>;
-
-}
-} // namespace Kokkos
-
-#else
-void KOKKOS_CORE_SRC_IMPL_SERIAL_TASK_PREVENT_LINK_ERROR() {}
-#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP
-#define KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
- Kokkos::Serial> {
- private:
- using Policy = Kokkos::WorkGraphPolicy<Traits...>;
-
- Policy m_policy;
- FunctorType m_functor;
-
- template <class TagType>
- std::enable_if_t<std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- m_functor(w);
- }
-
- template <class TagType>
- std::enable_if_t<!std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- const TagType t{};
- m_functor(t, w);
- }
-
- public:
- inline void execute() const noexcept {
- // Spin until COMPLETED_TOKEN.
- // END_TOKEN indicates no work is currently available.
-
- for (std::int32_t w = Policy::END_TOKEN;
- Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
- if (Policy::END_TOKEN != w) {
- exec_one<typename Policy::work_tag>(w);
- m_policy.completed_work(w);
- }
- }
- }
-
- inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_policy(arg_policy), m_functor(arg_functor) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* #define KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <cstdint>
-#include <limits>
-#include <utility>
-#include <iostream>
-#include <sstream>
-#include <thread>
-#include <mutex>
-
-#include <Kokkos_Core.hpp>
-
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_CPUDiscovery.hpp>
-#include <impl/Kokkos_Tools.hpp>
-#include <impl/Kokkos_ExecSpaceManager.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-namespace {
-std::mutex host_internal_cppthread_mutex;
-
-// std::thread compatible driver.
-// Recovery from an exception would require constant intra-thread health
-// verification; which would negatively impact runtime. As such simply
-// abort the process.
-void internal_cppthread_driver() {
- try {
- ThreadsExec::driver();
- } catch (const std::exception &x) {
- std::cerr << "Exception thrown from worker thread: " << x.what()
- << std::endl;
- std::cerr.flush();
- std::abort();
- } catch (...) {
- std::cerr << "Exception thrown from worker thread" << std::endl;
- std::cerr.flush();
- std::abort();
- }
-}
-
-ThreadsExec s_threads_process;
-ThreadsExec *s_threads_exec[ThreadsExec::MAX_THREAD_COUNT] = {nullptr};
-std::thread::id s_threads_pid[ThreadsExec::MAX_THREAD_COUNT];
-std::pair<unsigned, unsigned> s_threads_coord[ThreadsExec::MAX_THREAD_COUNT];
-
-int s_thread_pool_size[3] = {0, 0, 0};
-
-unsigned s_current_reduce_size = 0;
-unsigned s_current_shared_size = 0;
-
-void (*volatile s_current_function)(ThreadsExec &, const void *);
-const void *volatile s_current_function_arg = nullptr;
-
-struct Sentinel {
- ~Sentinel() {
- if (s_thread_pool_size[0] || s_thread_pool_size[1] ||
- s_thread_pool_size[2] || s_current_reduce_size ||
- s_current_shared_size || s_current_function || s_current_function_arg ||
- s_threads_exec[0]) {
- std::cerr << "ERROR : Process exiting while Kokkos::Threads is still "
- "initialized"
- << std::endl;
- }
- }
-};
-
-inline unsigned fan_size(const unsigned rank, const unsigned size) {
- const unsigned rank_rev = size - (rank + 1);
- unsigned count = 0;
- for (unsigned n = 1; (rank_rev + n < size) && !(rank_rev & n); n <<= 1) {
- ++count;
- }
- return count;
-}
-
-} // namespace
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-//----------------------------------------------------------------------------
-// Spawn a thread
-
-void ThreadsExec::spawn() {
- std::thread t(internal_cppthread_driver);
- t.detach();
-}
-
-//----------------------------------------------------------------------------
-
-bool ThreadsExec::is_process() {
- static const std::thread::id master_pid = std::this_thread::get_id();
-
- return master_pid == std::this_thread::get_id();
-}
-
-void ThreadsExec::global_lock() { host_internal_cppthread_mutex.lock(); }
-
-void ThreadsExec::global_unlock() { host_internal_cppthread_mutex.unlock(); }
-
-//----------------------------------------------------------------------------
-
-void ThreadsExec::wait_yield(volatile int &flag, const int value) {
- while (value == flag) {
- std::this_thread::yield();
- }
-}
-
-void execute_function_noop(ThreadsExec &, const void *) {}
-
-void ThreadsExec::driver() {
- SharedAllocationRecord<void, void>::tracking_enable();
-
- ThreadsExec this_thread;
-
- while (ThreadsExec::Active == this_thread.m_pool_state) {
- (*s_current_function)(this_thread, s_current_function_arg);
-
- // Deactivate thread and wait for reactivation
- this_thread.m_pool_state = ThreadsExec::Inactive;
-
- wait_yield(this_thread.m_pool_state, ThreadsExec::Inactive);
- }
-}
-
-ThreadsExec::ThreadsExec()
- : m_pool_base(nullptr),
- m_scratch(nullptr),
- m_scratch_reduce_end(0),
- m_scratch_thread_end(0),
- m_numa_rank(0),
- m_numa_core_rank(0),
- m_pool_rank(0),
- m_pool_size(0),
- m_pool_fan_size(0),
- m_pool_state(ThreadsExec::Terminating) {
- if (&s_threads_process != this) {
- // A spawned thread
-
- ThreadsExec *const nil = nullptr;
-
- // Which entry in 's_threads_exec', possibly determined from hwloc binding
- const int entry = reinterpret_cast<size_t>(s_current_function_arg) <
- size_t(s_thread_pool_size[0])
- ? reinterpret_cast<size_t>(s_current_function_arg)
- : size_t(Kokkos::hwloc::bind_this_thread(
- s_thread_pool_size[0], s_threads_coord));
-
- // Given a good entry set this thread in the 's_threads_exec' array
- if (entry < s_thread_pool_size[0] &&
- nil == atomic_compare_exchange(s_threads_exec + entry, nil, this)) {
- const std::pair<unsigned, unsigned> coord =
- Kokkos::hwloc::get_this_thread_coordinate();
-
- m_numa_rank = coord.first;
- m_numa_core_rank = coord.second;
- m_pool_base = s_threads_exec;
- m_pool_rank = s_thread_pool_size[0] - (entry + 1);
- m_pool_rank_rev = s_thread_pool_size[0] - (pool_rank() + 1);
- m_pool_size = s_thread_pool_size[0];
- m_pool_fan_size = fan_size(m_pool_rank, m_pool_size);
- m_pool_state = ThreadsExec::Active;
-
- s_threads_pid[m_pool_rank] = std::this_thread::get_id();
-
- // Inform spawning process that the threads_exec entry has been set.
- s_threads_process.m_pool_state = ThreadsExec::Active;
- } else {
- // Inform spawning process that the threads_exec entry could not be set.
- s_threads_process.m_pool_state = ThreadsExec::Terminating;
- }
- } else {
- // Enables 'parallel_for' to execute on unitialized Threads device
- m_pool_rank = 0;
- m_pool_size = 1;
- m_pool_state = ThreadsExec::Inactive;
-
- s_threads_pid[m_pool_rank] = std::this_thread::get_id();
- }
-}
-
-ThreadsExec::~ThreadsExec() {
- const unsigned entry = m_pool_size - (m_pool_rank + 1);
-
- using Record = Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
-
- if (m_scratch) {
- Record *const r = Record::get_record(m_scratch);
-
- m_scratch = nullptr;
-
- Record::decrement(r);
- }
-
- m_pool_base = nullptr;
- m_scratch_reduce_end = 0;
- m_scratch_thread_end = 0;
- m_numa_rank = 0;
- m_numa_core_rank = 0;
- m_pool_rank = 0;
- m_pool_size = 0;
- m_pool_fan_size = 0;
-
- m_pool_state = ThreadsExec::Terminating;
-
- if (&s_threads_process != this && entry < MAX_THREAD_COUNT) {
- ThreadsExec *const nil = nullptr;
-
- atomic_compare_exchange(s_threads_exec + entry, this, nil);
-
- s_threads_process.m_pool_state = ThreadsExec::Terminating;
- }
-}
-
-int ThreadsExec::get_thread_count() { return s_thread_pool_size[0]; }
-
-ThreadsExec *ThreadsExec::get_thread(const int init_thread_rank) {
- ThreadsExec *const th =
- init_thread_rank < s_thread_pool_size[0]
- ? s_threads_exec[s_thread_pool_size[0] - (init_thread_rank + 1)]
- : nullptr;
-
- if (nullptr == th || th->m_pool_rank != init_thread_rank) {
- std::ostringstream msg;
- msg << "Kokkos::Impl::ThreadsExec::get_thread ERROR : "
- << "thread " << init_thread_rank << " of " << s_thread_pool_size[0];
- if (nullptr == th) {
- msg << " does not exist";
- } else {
- msg << " has wrong thread_rank " << th->m_pool_rank;
- }
- Kokkos::Impl::throw_runtime_exception(msg.str());
- }
-
- return th;
-}
-
-//----------------------------------------------------------------------------
-
-void ThreadsExec::execute_sleep(ThreadsExec &exec, const void *) {
- ThreadsExec::global_lock();
- ThreadsExec::global_unlock();
-
- const int n = exec.m_pool_fan_size;
- const int rank_rev = exec.m_pool_size - (exec.m_pool_rank + 1);
-
- for (int i = 0; i < n; ++i) {
- Impl::spinwait_while_equal<int>(
- exec.m_pool_base[rank_rev + (1 << i)]->m_pool_state,
- ThreadsExec::Active);
- }
-
- exec.m_pool_state = ThreadsExec::Inactive;
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-void ThreadsExec::verify_is_process(const std::string &name,
- const bool initialized) {
- if (!is_process()) {
- std::string msg(name);
- msg.append(
- " FAILED : Called by a worker thread, can only be called by the master "
- "process.");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-
- if (initialized && 0 == s_thread_pool_size[0]) {
- std::string msg(name);
- msg.append(" FAILED : Threads not initialized.");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-}
-
-int ThreadsExec::in_parallel() {
- // A thread function is in execution and
- // the function argument is not the special threads process argument and
- // the master process is a worker or is not the master process.
- return s_current_function && (&s_threads_process != s_current_function_arg) &&
- (s_threads_process.m_pool_base || !is_process());
-}
-void ThreadsExec::fence() { internal_fence(Impl::fence_is_static::yes); }
-void ThreadsExec::fence(const std::string &name) {
- internal_fence(name, Impl::fence_is_static::yes);
-}
-
-void ThreadsExec::internal_fence(Impl::fence_is_static is_static) {
- internal_fence((is_static == Impl::fence_is_static::no)
- ? "Kokkos::ThreadsExec::fence: Unnamed Instance Fence"
- : "Kokkos::ThreadsExec::fence: Unnamed Static Fence",
- is_static);
-}
-
-// Wait for root thread to become inactive
-void ThreadsExec::internal_fence(const std::string &name,
- Impl::fence_is_static is_static) {
- const auto &fence_lam = [&]() {
- if (s_thread_pool_size[0]) {
- // Wait for the root thread to complete:
- Impl::spinwait_while_equal<int>(s_threads_exec[0]->m_pool_state,
- ThreadsExec::Active);
- }
-
- s_current_function = nullptr;
- s_current_function_arg = nullptr;
-
- // Make sure function and arguments are cleared before
- // potentially re-activating threads with a subsequent launch.
- memory_fence();
- };
- if (is_static == Impl::fence_is_static::yes) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Threads>(
- name,
- Kokkos::Tools::Experimental::SpecialSynchronizationCases::
- GlobalDeviceSynchronization,
- fence_lam);
- } else {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Threads>(
- name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1},
- fence_lam);
- }
-}
-
-/** \brief Begin execution of the asynchronous functor */
-void ThreadsExec::start(void (*func)(ThreadsExec &, const void *),
- const void *arg) {
- verify_is_process("ThreadsExec::start", true);
-
- if (s_current_function || s_current_function_arg) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("ThreadsExec::start() FAILED : already executing"));
- }
-
- s_current_function = func;
- s_current_function_arg = arg;
-
- // Make sure function and arguments are written before activating threads.
- memory_fence();
-
- // Activate threads:
- for (int i = s_thread_pool_size[0]; 0 < i--;) {
- s_threads_exec[i]->m_pool_state = ThreadsExec::Active;
- }
-
- if (s_threads_process.m_pool_size) {
- // Master process is the root thread, run it:
- (*func)(s_threads_process, arg);
- s_threads_process.m_pool_state = ThreadsExec::Inactive;
- }
-}
-
-//----------------------------------------------------------------------------
-
-bool ThreadsExec::sleep() {
- verify_is_process("ThreadsExec::sleep", true);
-
- if (&execute_sleep == s_current_function) return false;
-
- fence();
-
- ThreadsExec::global_lock();
-
- s_current_function = &execute_sleep;
-
- // Activate threads:
- for (unsigned i = s_thread_pool_size[0]; 0 < i;) {
- s_threads_exec[--i]->m_pool_state = ThreadsExec::Active;
- }
-
- return true;
-}
-
-bool ThreadsExec::wake() {
- verify_is_process("ThreadsExec::wake", true);
-
- if (&execute_sleep != s_current_function) return false;
-
- ThreadsExec::global_unlock();
-
- if (s_threads_process.m_pool_base) {
- execute_sleep(s_threads_process, nullptr);
- s_threads_process.m_pool_state = ThreadsExec::Inactive;
- }
-
- fence();
-
- return true;
-}
-
-//----------------------------------------------------------------------------
-
-void ThreadsExec::execute_resize_scratch_in_serial() {
- const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
-
- auto deallocate_scratch_memory = [](ThreadsExec &exec) {
- if (exec.m_scratch) {
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
- Record *const r = Record::get_record(exec.m_scratch);
- exec.m_scratch = nullptr;
- Record::decrement(r);
- }
- };
- if (s_threads_process.m_pool_base) {
- for (unsigned i = s_thread_pool_size[0]; begin < i;) {
- deallocate_scratch_memory(*s_threads_exec[--i]);
- }
- }
-
- s_current_function = &first_touch_allocate_thread_private_scratch;
- s_current_function_arg = &s_threads_process;
-
- // Make sure function and arguments are written before activating threads.
- memory_fence();
-
- for (unsigned i = s_thread_pool_size[0]; begin < i;) {
- ThreadsExec &th = *s_threads_exec[--i];
-
- th.m_pool_state = ThreadsExec::Active;
-
- wait_yield(th.m_pool_state, ThreadsExec::Active);
- }
-
- if (s_threads_process.m_pool_base) {
- deallocate_scratch_memory(s_threads_process);
- s_threads_process.m_pool_state = ThreadsExec::Active;
- first_touch_allocate_thread_private_scratch(s_threads_process, nullptr);
- s_threads_process.m_pool_state = ThreadsExec::Inactive;
- }
-
- s_current_function_arg = nullptr;
- s_current_function = nullptr;
-
- // Make sure function and arguments are cleared before proceeding.
- memory_fence();
-}
-
-//----------------------------------------------------------------------------
-
-void *ThreadsExec::root_reduce_scratch() {
- return s_threads_process.reduce_memory();
-}
-
-void ThreadsExec::first_touch_allocate_thread_private_scratch(ThreadsExec &exec,
- const void *) {
- exec.m_scratch_reduce_end = s_threads_process.m_scratch_reduce_end;
- exec.m_scratch_thread_end = s_threads_process.m_scratch_thread_end;
-
- if (s_threads_process.m_scratch_thread_end) {
- // Allocate tracked memory:
- {
- using Record =
- Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
- Record *const r =
- Record::allocate(Kokkos::HostSpace(), "Kokkos::thread_scratch",
- s_threads_process.m_scratch_thread_end);
-
- Record::increment(r);
-
- exec.m_scratch = r->data();
- }
-
- unsigned *ptr = reinterpret_cast<unsigned *>(exec.m_scratch);
-
- unsigned *const end =
- ptr + s_threads_process.m_scratch_thread_end / sizeof(unsigned);
-
- // touch on this thread
- while (ptr < end) *ptr++ = 0;
- }
-}
-
-void *ThreadsExec::resize_scratch(size_t reduce_size, size_t thread_size) {
- enum { ALIGN_MASK = Kokkos::Impl::MEMORY_ALIGNMENT - 1 };
-
- fence();
-
- const size_t old_reduce_size = s_threads_process.m_scratch_reduce_end;
- const size_t old_thread_size = s_threads_process.m_scratch_thread_end -
- s_threads_process.m_scratch_reduce_end;
-
- reduce_size = (reduce_size + ALIGN_MASK) & ~ALIGN_MASK;
- thread_size = (thread_size + ALIGN_MASK) & ~ALIGN_MASK;
-
- // Increase size or deallocate completely.
-
- if ((old_reduce_size < reduce_size) || (old_thread_size < thread_size) ||
- ((reduce_size == 0 && thread_size == 0) &&
- (old_reduce_size != 0 || old_thread_size != 0))) {
- verify_is_process("ThreadsExec::resize_scratch", true);
-
- s_threads_process.m_scratch_reduce_end = reduce_size;
- s_threads_process.m_scratch_thread_end = reduce_size + thread_size;
-
- execute_resize_scratch_in_serial();
-
- s_threads_process.m_scratch = s_threads_exec[0]->m_scratch;
- }
-
- return s_threads_process.m_scratch;
-}
-
-//----------------------------------------------------------------------------
-
-void ThreadsExec::print_configuration(std::ostream &s, const bool detail) {
- verify_is_process("ThreadsExec::print_configuration", false);
-
- fence();
-
- const unsigned numa_count = Kokkos::hwloc::get_available_numa_count();
- const unsigned cores_per_numa = Kokkos::hwloc::get_available_cores_per_numa();
- const unsigned threads_per_core =
- Kokkos::hwloc::get_available_threads_per_core();
-
- // Forestall compiler warnings for unused variables.
- (void)numa_count;
- (void)cores_per_numa;
- (void)threads_per_core;
-
- s << "Kokkos::Threads";
-
-#if defined(KOKKOS_ENABLE_THREADS)
- s << " KOKKOS_ENABLE_THREADS";
-#endif
-#if defined(KOKKOS_ENABLE_HWLOC)
- s << " hwloc[" << numa_count << "x" << cores_per_numa << "x"
- << threads_per_core << "]";
-#endif
-
- if (s_thread_pool_size[0]) {
- s << " threads[" << s_thread_pool_size[0] << "]"
- << " threads_per_numa[" << s_thread_pool_size[1] << "]"
- << " threads_per_core[" << s_thread_pool_size[2] << "]";
- if (nullptr == s_threads_process.m_pool_base) {
- s << " Asynchronous";
- }
- s << " ReduceScratch[" << s_current_reduce_size << "]"
- << " SharedScratch[" << s_current_shared_size << "]";
- s << std::endl;
-
- if (detail) {
- for (int i = 0; i < s_thread_pool_size[0]; ++i) {
- ThreadsExec *const th = s_threads_exec[i];
-
- if (th) {
- const int rank_rev = th->m_pool_size - (th->m_pool_rank + 1);
-
- s << " Thread[ " << th->m_pool_rank << " : " << th->m_numa_rank << "."
- << th->m_numa_core_rank << " ]";
-
- s << " Fan{";
- for (int j = 0; j < th->m_pool_fan_size; ++j) {
- ThreadsExec *const thfan = th->m_pool_base[rank_rev + (1 << j)];
- s << " [ " << thfan->m_pool_rank << " : " << thfan->m_numa_rank
- << "." << thfan->m_numa_core_rank << " ]";
- }
- s << " }";
-
- if (th == &s_threads_process) {
- s << " is_process";
- }
- }
- s << std::endl;
- }
- }
- } else {
- s << " not initialized" << std::endl;
- }
-}
-
-//----------------------------------------------------------------------------
-
-int ThreadsExec::is_initialized() { return nullptr != s_threads_exec[0]; }
-
-void ThreadsExec::initialize(int thread_count_arg) {
- // legacy arguments
- unsigned thread_count = thread_count_arg == -1 ? 0 : thread_count_arg;
- unsigned use_numa_count = 0;
- unsigned use_cores_per_numa = 0;
- bool allow_asynchronous_threadpool = false;
- // need to provide an initializer for Intel compilers
- static const Sentinel sentinel = {};
-
- const bool is_initialized = 0 != s_thread_pool_size[0];
-
- unsigned thread_spawn_failed = 0;
-
- for (int i = 0; i < ThreadsExec::MAX_THREAD_COUNT; i++)
- s_threads_exec[i] = nullptr;
-
- if (!is_initialized) {
- // If thread_count, use_numa_count, or use_cores_per_numa are zero
- // then they will be given default values based upon hwloc detection
- // and allowed asynchronous execution.
-
- const bool hwloc_avail = Kokkos::hwloc::available();
- const bool hwloc_can_bind =
- hwloc_avail && Kokkos::hwloc::can_bind_threads();
-
- if (thread_count == 0) {
- thread_count = hwloc_avail
- ? Kokkos::hwloc::get_available_numa_count() *
- Kokkos::hwloc::get_available_cores_per_numa() *
- Kokkos::hwloc::get_available_threads_per_core()
- : 1;
- }
-
- const unsigned thread_spawn_begin = hwloc::thread_mapping(
- "Kokkos::Threads::initialize", allow_asynchronous_threadpool,
- thread_count, use_numa_count, use_cores_per_numa, s_threads_coord);
-
- const std::pair<unsigned, unsigned> proc_coord = s_threads_coord[0];
-
- if (thread_spawn_begin) {
- // Synchronous with s_threads_coord[0] as the process core
- // Claim entry #0 for binding the process core.
- s_threads_coord[0] = std::pair<unsigned, unsigned>(~0u, ~0u);
- }
-
- s_thread_pool_size[0] = thread_count;
- s_thread_pool_size[1] = s_thread_pool_size[0] / use_numa_count;
- s_thread_pool_size[2] = s_thread_pool_size[1] / use_cores_per_numa;
- s_current_function =
- &execute_function_noop; // Initialization work function
-
- for (unsigned ith = thread_spawn_begin; ith < thread_count; ++ith) {
- s_threads_process.m_pool_state = ThreadsExec::Inactive;
-
- // If hwloc available then spawned thread will
- // choose its own entry in 's_threads_coord'
- // otherwise specify the entry.
- s_current_function_arg =
- reinterpret_cast<void *>(hwloc_can_bind ? ~0u : ith);
-
- // Make sure all outstanding memory writes are complete
- // before spawning the new thread.
- memory_fence();
-
- // Spawn thread executing the 'driver()' function.
- // Wait until spawned thread has attempted to initialize.
- // If spawning and initialization is successful then
- // an entry in 's_threads_exec' will be assigned.
- ThreadsExec::spawn();
- wait_yield(s_threads_process.m_pool_state, ThreadsExec::Inactive);
- if (s_threads_process.m_pool_state == ThreadsExec::Terminating) break;
- }
-
- // Wait for all spawned threads to deactivate before zeroing the function.
-
- for (unsigned ith = thread_spawn_begin; ith < thread_count; ++ith) {
- // Try to protect against cache coherency failure by casting to volatile.
- ThreadsExec *const th = ((ThreadsExec * volatile *)s_threads_exec)[ith];
- if (th) {
- wait_yield(th->m_pool_state, ThreadsExec::Active);
- } else {
- ++thread_spawn_failed;
- }
- }
-
- s_current_function = nullptr;
- s_current_function_arg = nullptr;
- s_threads_process.m_pool_state = ThreadsExec::Inactive;
-
- memory_fence();
-
- if (!thread_spawn_failed) {
- // Bind process to the core on which it was located before spawning
- // occurred
- if (hwloc_can_bind) {
- Kokkos::hwloc::bind_this_thread(proc_coord);
- }
-
- if (thread_spawn_begin) { // Include process in pool.
- const std::pair<unsigned, unsigned> coord =
- Kokkos::hwloc::get_this_thread_coordinate();
-
- s_threads_exec[0] = &s_threads_process;
- s_threads_process.m_numa_rank = coord.first;
- s_threads_process.m_numa_core_rank = coord.second;
- s_threads_process.m_pool_base = s_threads_exec;
- s_threads_process.m_pool_rank =
- thread_count - 1; // Reversed for scan-compatible reductions
- s_threads_process.m_pool_size = thread_count;
- s_threads_process.m_pool_fan_size = fan_size(
- s_threads_process.m_pool_rank, s_threads_process.m_pool_size);
- s_threads_pid[s_threads_process.m_pool_rank] =
- std::this_thread::get_id();
- } else {
- s_threads_process.m_pool_base = nullptr;
- s_threads_process.m_pool_rank = 0;
- s_threads_process.m_pool_size = 0;
- s_threads_process.m_pool_fan_size = 0;
- }
-
- // Initial allocations:
- ThreadsExec::resize_scratch(1024, 1024);
- } else {
- s_thread_pool_size[0] = 0;
- s_thread_pool_size[1] = 0;
- s_thread_pool_size[2] = 0;
- }
- }
-
- if (is_initialized || thread_spawn_failed) {
- std::ostringstream msg;
-
- msg << "Kokkos::Threads::initialize ERROR";
-
- if (is_initialized) {
- msg << " : already initialized";
- }
- if (thread_spawn_failed) {
- msg << " : failed to spawn " << thread_spawn_failed << " threads";
- }
-
- Kokkos::Impl::throw_runtime_exception(msg.str());
- }
-
- // Check for over-subscription
- if (Kokkos::show_warnings() &&
- (Impl::mpi_ranks_per_node() * long(thread_count) >
- Impl::processors_per_node())) {
- std::cerr << "Kokkos::Threads::initialize WARNING: You are likely "
- "oversubscribing your CPU cores."
- << std::endl;
- std::cerr << " Detected: "
- << Impl::processors_per_node() << " cores per node." << std::endl;
- std::cerr << " Detected: "
- << Impl::mpi_ranks_per_node() << " MPI_ranks per node."
- << std::endl;
- std::cerr << " Requested: "
- << thread_count << " threads per process." << std::endl;
- }
-
- // Init the array for used for arbitrarily sized atomics
- Impl::init_lock_array_host_space();
-
- Impl::SharedAllocationRecord<void, void>::tracking_enable();
-}
-
-//----------------------------------------------------------------------------
-
-void ThreadsExec::finalize() {
- verify_is_process("ThreadsExec::finalize", false);
-
- fence();
-
- resize_scratch(0, 0);
-
- const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
-
- for (unsigned i = s_thread_pool_size[0]; begin < i--;) {
- if (s_threads_exec[i]) {
- s_threads_exec[i]->m_pool_state = ThreadsExec::Terminating;
-
- wait_yield(s_threads_process.m_pool_state, ThreadsExec::Inactive);
-
- s_threads_process.m_pool_state = ThreadsExec::Inactive;
- }
-
- s_threads_pid[i] = std::thread::id();
- }
-
- if (s_threads_process.m_pool_base) {
- (&s_threads_process)->~ThreadsExec();
- s_threads_exec[0] = nullptr;
- }
-
- if (Kokkos::hwloc::can_bind_threads()) {
- Kokkos::hwloc::unbind_this_thread();
- }
-
- s_thread_pool_size[0] = 0;
- s_thread_pool_size[1] = 0;
- s_thread_pool_size[2] = 0;
-
- // Reset master thread to run solo.
- s_threads_process.m_numa_rank = 0;
- s_threads_process.m_numa_core_rank = 0;
- s_threads_process.m_pool_base = nullptr;
- s_threads_process.m_pool_rank = 0;
- s_threads_process.m_pool_size = 1;
- s_threads_process.m_pool_fan_size = 0;
- s_threads_process.m_pool_state = ThreadsExec::Inactive;
-
- Kokkos::Profiling::finalize();
-}
-
-//----------------------------------------------------------------------------
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-int Threads::concurrency() { return impl_thread_pool_size(0); }
-void Threads::fence(const std::string &name) const {
- Impl::ThreadsExec::internal_fence(name, Impl::fence_is_static::no);
-}
-
-Threads &Threads::impl_instance(int) {
- static Threads t;
- return t;
-}
-
-int Threads::impl_thread_pool_rank_host() {
- const std::thread::id pid = std::this_thread::get_id();
- int i = 0;
- while ((i < Impl::s_thread_pool_size[0]) && (pid != Impl::s_threads_pid[i])) {
- ++i;
- }
- return i;
-}
-
-int Threads::impl_thread_pool_size(int depth) {
- return Impl::s_thread_pool_size[depth];
-}
-
-const char *Threads::name() { return "Threads"; }
-
-namespace Impl {
-
-int g_threads_space_factory_initialized =
- initialize_space_factory<Threads>("050_Threads");
-
-} // namespace Impl
-
-#ifdef KOKKOS_ENABLE_CXX14
-namespace Tools {
-namespace Experimental {
-constexpr DeviceType DeviceTypeTraits<Threads>::id;
-}
-} // namespace Tools
-#endif
-
-} /* namespace Kokkos */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_THREADS_PARALLEL_MDRANGE_HPP
-#define KOKKOS_THREADS_PARALLEL_MDRANGE_HPP
-
-#include <Kokkos_Parallel.hpp>
-
-#include <KokkosExp_MDRangePolicy.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::Threads> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
-
- using WorkTag = typename MDRangePolicy::work_tag;
-
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using iterate_type = typename Kokkos::Impl::HostIterateTile<
- MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
-
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy; // construct as RangePolicy( 0, num_tiles
- // ).set_chunk_size(1) in ctor
-
- inline static void exec_range(const MDRangePolicy &mdr_policy,
- const FunctorType &functor, const Member ibeg,
- const Member iend) {
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- iterate_type(mdr_policy, functor)(i);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- exec_schedule<typename Policy::schedule_type::type>(exec, arg);
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelFor &self = *((const ParallelFor *)arg);
-
- WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- ParallelFor::exec_range(self.m_mdr_policy, self.m_functor, range.begin(),
- range.end());
-
- exec.fan_in();
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelFor &self = *((const ParallelFor *)arg);
-
- WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- exec.set_work_range(range.begin(), range.end(), self.m_policy.chunk_size());
- exec.reset_steal_target();
- exec.barrier();
-
- long work_index = exec.get_work_index();
-
- while (work_index != -1) {
- const Member begin =
- static_cast<Member>(work_index) * self.m_policy.chunk_size();
- const Member end =
- begin + self.m_policy.chunk_size() < self.m_policy.end()
- ? begin + self.m_policy.chunk_size()
- : self.m_policy.end();
-
- ParallelFor::exec_range(self.m_mdr_policy, self.m_functor, begin, end);
- work_index = exec.get_work_index();
- }
-
- exec.fan_in();
- }
-
- public:
- inline void execute() const {
- ThreadsExec::start(&ParallelFor::exec, this);
- ThreadsExec::fence();
- }
-
- ParallelFor(const FunctorType &arg_functor, const MDRangePolicy &arg_policy)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)) {}
-
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy &, const Functor &) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
-};
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::Threads> {
- private:
- using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
- using Policy = typename MDRangePolicy::impl_range_policy;
-
- using WorkTag = typename MDRangePolicy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- MDRangePolicy, ReducerTypeFwd>;
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
-
- using iterate_type =
- typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
- WorkTag, reference_type>;
-
- const FunctorType m_functor;
- const MDRangePolicy m_mdr_policy;
- const Policy m_policy; // construct as RangePolicy( 0, num_tiles
- // ).set_chunk_size(1) in ctor
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
-
- inline static void exec_range(const MDRangePolicy &mdr_policy,
- const FunctorType &functor, const Member &ibeg,
- const Member &iend, reference_type update) {
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- iterate_type(mdr_policy, functor, update)(i);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- exec_schedule<typename Policy::schedule_type::type>(exec, arg);
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelReduce &self = *((const ParallelReduce *)arg);
- const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- typename Analysis::Reducer reducer(
- &ReducerConditional::select(self.m_functor, self.m_reducer));
-
- ParallelReduce::exec_range(
- self.m_mdr_policy, self.m_functor, range.begin(), range.end(),
- reducer.init(static_cast<pointer_type>(exec.reduce_memory())));
-
- exec.fan_in_reduce(reducer);
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelReduce &self = *((const ParallelReduce *)arg);
- const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- exec.set_work_range(range.begin(), range.end(), self.m_policy.chunk_size());
- exec.reset_steal_target();
- exec.barrier();
-
- long work_index = exec.get_work_index();
- typename Analysis::Reducer reducer(
- &ReducerConditional::select(self.m_functor, self.m_reducer));
-
- reference_type update =
- reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
- while (work_index != -1) {
- const Member begin =
- static_cast<Member>(work_index) * self.m_policy.chunk_size();
- const Member end =
- begin + self.m_policy.chunk_size() < self.m_policy.end()
- ? begin + self.m_policy.chunk_size()
- : self.m_policy.end();
- ParallelReduce::exec_range(self.m_mdr_policy, self.m_functor, begin, end,
- update);
- work_index = exec.get_work_index();
- }
-
- exec.fan_in_reduce(reducer);
- }
-
- public:
- inline void execute() const {
- ThreadsExec::resize_scratch(
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer)),
- 0);
-
- ThreadsExec::start(&ParallelReduce::exec, this);
-
- ThreadsExec::fence();
-
- if (m_result_ptr) {
- const pointer_type data =
- (pointer_type)ThreadsExec::root_reduce_scratch();
-
- const unsigned n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
- for (unsigned i = 0; i < n; ++i) {
- m_result_ptr[i] = data[i];
- }
- }
- }
-
- template <class HostViewType>
- ParallelReduce(const FunctorType &arg_functor,
- const MDRangePolicy &arg_policy,
- const HostViewType &arg_result_view,
- std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void *> = nullptr)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result_view.data()) {
- static_assert(Kokkos::is_view<HostViewType>::value,
- "Kokkos::Threads reduce result must be a View");
-
- static_assert(
- std::is_same<typename HostViewType::memory_space, HostSpace>::value,
- "Kokkos::Threads reduce result must be a View in HostSpace");
- }
-
- inline ParallelReduce(const FunctorType &arg_functor,
- MDRangePolicy arg_policy, const ReducerType &reducer)
- : m_functor(arg_functor),
- m_mdr_policy(arg_policy),
- m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()) {
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-
- template <typename Policy, typename Functor>
- static int max_tile_size_product(const Policy &, const Functor &) {
- /**
- * 1024 here is just our guess for a reasonable max tile size,
- * it isn't a hardware constraint. If people see a use for larger
- * tile size products, we're happy to change this.
- */
- return 1024;
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_THREADS_PARALLEL_RANGE_HPP
-#define KOKKOS_THREADS_PARALLEL_RANGE_HPP
-
-#include <Kokkos_Parallel.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Threads> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member ibeg, const Member iend) {
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(i);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member ibeg, const Member iend) {
- const TagType t{};
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(t, i);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- exec_schedule<typename Policy::schedule_type::type>(exec, arg);
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelFor &self = *((const ParallelFor *)arg);
-
- WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- ParallelFor::template exec_range<WorkTag>(self.m_functor, range.begin(),
- range.end());
-
- exec.fan_in();
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelFor &self = *((const ParallelFor *)arg);
-
- WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- exec.set_work_range(range.begin() - self.m_policy.begin(),
- range.end() - self.m_policy.begin(),
- self.m_policy.chunk_size());
- exec.reset_steal_target();
- exec.barrier();
-
- long work_index = exec.get_work_index();
-
- while (work_index != -1) {
- const Member begin =
- static_cast<Member>(work_index) * self.m_policy.chunk_size() +
- self.m_policy.begin();
- const Member end =
- begin + self.m_policy.chunk_size() < self.m_policy.end()
- ? begin + self.m_policy.chunk_size()
- : self.m_policy.end();
- ParallelFor::template exec_range<WorkTag>(self.m_functor, begin, end);
- work_index = exec.get_work_index();
- }
-
- exec.fan_in();
- }
-
- public:
- inline void execute() const {
- ThreadsExec::start(&ParallelFor::exec, this);
- ThreadsExec::fence();
- }
-
- ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-};
-
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
- Kokkos::Threads> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- using WorkTag = typename Policy::work_tag;
- using WorkRange = typename Policy::WorkRange;
- using Member = typename Policy::member_type;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member &ibeg, const Member &iend,
- reference_type update) {
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(i, update);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member &ibeg, const Member &iend,
- reference_type update) {
- const TagType t{};
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(t, i, update);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- exec_schedule<typename Policy::schedule_type::type>(exec, arg);
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelReduce &self = *((const ParallelReduce *)arg);
- const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- typename Analysis::Reducer reducer(
- &ReducerConditional::select(self.m_functor, self.m_reducer));
-
- ParallelReduce::template exec_range<WorkTag>(
- self.m_functor, range.begin(), range.end(),
- reducer.init(static_cast<pointer_type>(exec.reduce_memory())));
-
- exec.fan_in_reduce(reducer);
- }
-
- template <class Schedule>
- static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
- exec_schedule(ThreadsExec &exec, const void *arg) {
- const ParallelReduce &self = *((const ParallelReduce *)arg);
- const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- exec.set_work_range(range.begin() - self.m_policy.begin(),
- range.end() - self.m_policy.begin(),
- self.m_policy.chunk_size());
- exec.reset_steal_target();
- exec.barrier();
-
- long work_index = exec.get_work_index();
- typename Analysis::Reducer reducer(
- &ReducerConditional::select(self.m_functor, self.m_reducer));
-
- reference_type update =
- reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
- while (work_index != -1) {
- const Member begin =
- static_cast<Member>(work_index) * self.m_policy.chunk_size() +
- self.m_policy.begin();
- const Member end =
- begin + self.m_policy.chunk_size() < self.m_policy.end()
- ? begin + self.m_policy.chunk_size()
- : self.m_policy.end();
- ParallelReduce::template exec_range<WorkTag>(self.m_functor, begin, end,
- update);
- work_index = exec.get_work_index();
- }
-
- exec.fan_in_reduce(reducer);
- }
-
- public:
- inline void execute() const {
- if (m_policy.end() <= m_policy.begin()) {
- if (m_result_ptr) {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
- final_reducer.init(m_result_ptr);
- final_reducer.final(m_result_ptr);
- }
- } else {
- ThreadsExec::resize_scratch(
- Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)),
- 0);
-
- ThreadsExec::start(&ParallelReduce::exec, this);
-
- ThreadsExec::fence();
-
- if (m_result_ptr) {
- const pointer_type data =
- (pointer_type)ThreadsExec::root_reduce_scratch();
-
- const unsigned n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
- for (unsigned i = 0; i < n; ++i) {
- m_result_ptr[i] = data[i];
- }
- }
- }
- }
-
- template <class HostViewType>
- ParallelReduce(const FunctorType &arg_functor, const Policy &arg_policy,
- const HostViewType &arg_result_view,
- std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void *> = nullptr)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result_view.data()) {
- static_assert(Kokkos::is_view<HostViewType>::value,
- "Kokkos::Threads reduce result must be a View");
-
- static_assert(
- std::is_same<typename HostViewType::memory_space, HostSpace>::value,
- "Kokkos::Threads reduce result must be a View in HostSpace");
- }
-
- inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
- const ReducerType &reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()) {
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-};
-
-template <class FunctorType, class... Traits>
-class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Threads> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkRange = typename Policy::WorkRange;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- Policy, FunctorType>;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member &ibeg, const Member &iend,
- reference_type update, const bool final) {
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(i, update, final);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member &ibeg, const Member &iend,
- reference_type update, const bool final) {
- const TagType t{};
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(t, i, update, final);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- const ParallelScan &self = *((const ParallelScan *)arg);
-
- const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- typename Analysis::Reducer final_reducer(&self.m_functor);
-
- reference_type update =
- final_reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
-
- ParallelScan::template exec_range<WorkTag>(self.m_functor, range.begin(),
- range.end(), update, false);
-
- // exec.template scan_large( final_reducer );
- exec.scan_small(final_reducer);
-
- ParallelScan::template exec_range<WorkTag>(self.m_functor, range.begin(),
- range.end(), update, true);
-
- exec.fan_in();
- }
-
- public:
- inline void execute() const {
- ThreadsExec::resize_scratch(2 * Analysis::value_size(m_functor), 0);
- ThreadsExec::start(&ParallelScan::exec, this);
- ThreadsExec::fence();
- }
-
- ParallelScan(const FunctorType &arg_functor, const Policy &arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-};
-
-template <class FunctorType, class ReturnType, class... Traits>
-class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
- ReturnType, Kokkos::Threads> {
- private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkRange = typename Policy::WorkRange;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
-
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- Policy, FunctorType>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
- ReturnType &m_returnvalue;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member &ibeg, const Member &iend,
- reference_type update, const bool final) {
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(i, update, final);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
- const FunctorType &functor, const Member &ibeg, const Member &iend,
- reference_type update, const bool final) {
- const TagType t{};
-#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
- defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
-#pragma ivdep
-#endif
- for (Member i = ibeg; i < iend; ++i) {
- functor(t, i, update, final);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- const ParallelScanWithTotal &self = *((const ParallelScanWithTotal *)arg);
-
- const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
-
- typename Analysis::Reducer final_reducer(&self.m_functor);
-
- reference_type update =
- final_reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
-
- ParallelScanWithTotal::template exec_range<WorkTag>(
- self.m_functor, range.begin(), range.end(), update, false);
-
- // exec.template scan_large(final_reducer);
- exec.scan_small(final_reducer);
-
- ParallelScanWithTotal::template exec_range<WorkTag>(
- self.m_functor, range.begin(), range.end(), update, true);
-
- exec.fan_in();
-
- if (exec.pool_rank() == exec.pool_size() - 1) {
- self.m_returnvalue = update;
- }
- }
-
- public:
- inline void execute() const {
- ThreadsExec::resize_scratch(2 * Analysis::value_size(m_functor), 0);
- ThreadsExec::start(&ParallelScanWithTotal::exec, this);
- ThreadsExec::fence();
- }
-
- ParallelScanWithTotal(const FunctorType &arg_functor,
- const Policy &arg_policy, ReturnType &arg_returnvalue)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_returnvalue(arg_returnvalue) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_THREADS_PARALLEL_TEAM_HPP
-#define KOKKOS_THREADS_PARALLEL_TEAM_HPP
-
-#include <Kokkos_Parallel.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Properties>
-class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
- Kokkos::Threads> {
- private:
- using Policy =
- Kokkos::Impl::TeamPolicyInternal<Kokkos::Threads, Properties...>;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const size_t m_shared;
-
- template <class TagType, class Schedule>
- inline static std::enable_if_t<std::is_void<TagType>::value &&
- std::is_same<Schedule, Kokkos::Static>::value>
- exec_team(const FunctorType &functor, Member member) {
- for (; member.valid_static(); member.next_static()) {
- functor(member);
- }
- }
-
- template <class TagType, class Schedule>
- inline static std::enable_if_t<!std::is_void<TagType>::value &&
- std::is_same<Schedule, Kokkos::Static>::value>
- exec_team(const FunctorType &functor, Member member) {
- const TagType t{};
- for (; member.valid_static(); member.next_static()) {
- functor(t, member);
- }
- }
-
- template <class TagType, class Schedule>
- inline static std::enable_if_t<std::is_void<TagType>::value &&
- std::is_same<Schedule, Kokkos::Dynamic>::value>
- exec_team(const FunctorType &functor, Member member) {
- for (; member.valid_dynamic(); member.next_dynamic()) {
- functor(member);
- }
- }
-
- template <class TagType, class Schedule>
- inline static std::enable_if_t<!std::is_void<TagType>::value &&
- std::is_same<Schedule, Kokkos::Dynamic>::value>
- exec_team(const FunctorType &functor, Member member) {
- const TagType t{};
- for (; member.valid_dynamic(); member.next_dynamic()) {
- functor(t, member);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- const ParallelFor &self = *((const ParallelFor *)arg);
-
- ParallelFor::exec_team<WorkTag, typename Policy::schedule_type::type>(
- self.m_functor, Member(&exec, self.m_policy, self.m_shared));
-
- exec.barrier();
- exec.fan_in();
- }
- template <typename Policy>
- Policy fix_policy(Policy policy) {
- if (policy.impl_vector_length() < 0) {
- policy.impl_set_vector_length(1);
- }
- if (policy.team_size() < 0) {
- policy.impl_set_team_size(
- policy.team_size_recommended(m_functor, ParallelForTag{}));
- }
- return policy;
- }
-
- public:
- inline void execute() const {
- ThreadsExec::resize_scratch(
- 0, Policy::member_type::team_reduce_size() + m_shared);
-
- ThreadsExec::start(&ParallelFor::exec, this);
-
- ThreadsExec::fence();
- }
-
- ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
- : m_functor(arg_functor),
- m_policy(fix_policy(arg_policy)),
- m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, m_policy.team_size())) {}
-};
-
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Threads> {
- private:
- using Policy =
- Kokkos::Impl::TeamPolicyInternal<Kokkos::Threads, Properties...>;
- using WorkTag = typename Policy::work_tag;
- using Member = typename Policy::member_type;
-
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- Policy, ReducerTypeFwd>;
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
- const Policy m_policy;
- const ReducerType m_reducer;
- const pointer_type m_result_ptr;
- const size_t m_shared;
-
- template <class TagType>
- inline static std::enable_if_t<std::is_void<TagType>::value> exec_team(
- const FunctorType &functor, Member member, reference_type update) {
- for (; member.valid_static(); member.next_static()) {
- functor(member, update);
- }
- }
-
- template <class TagType>
- inline static std::enable_if_t<!std::is_void<TagType>::value> exec_team(
- const FunctorType &functor, Member member, reference_type update) {
- const TagType t{};
- for (; member.valid_static(); member.next_static()) {
- functor(t, member, update);
- }
- }
-
- static void exec(ThreadsExec &exec, const void *arg) {
- const ParallelReduce &self = *((const ParallelReduce *)arg);
-
- typename Analysis::Reducer reducer(
- &ReducerConditional::select(self.m_functor, self.m_reducer));
-
- ParallelReduce::template exec_team<WorkTag>(
- self.m_functor, Member(&exec, self.m_policy, self.m_shared),
- reducer.init(static_cast<pointer_type>(exec.reduce_memory())));
-
- exec.fan_in_reduce(reducer);
- }
-
- public:
- inline void execute() const {
- if (m_policy.league_size() * m_policy.team_size() == 0) {
- if (m_result_ptr) {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
- final_reducer.init(m_result_ptr);
- final_reducer.final(m_result_ptr);
- }
- } else {
- ThreadsExec::resize_scratch(
- Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)),
- Policy::member_type::team_reduce_size() + m_shared);
-
- ThreadsExec::start(&ParallelReduce::exec, this);
-
- ThreadsExec::fence();
-
- if (m_result_ptr) {
- const pointer_type data =
- (pointer_type)ThreadsExec::root_reduce_scratch();
-
- const unsigned n = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
- for (unsigned i = 0; i < n; ++i) {
- m_result_ptr[i] = data[i];
- }
- }
- }
- }
-
- template <typename Policy>
- Policy fix_policy(Policy policy) {
- if (policy.impl_vector_length() < 0) {
- policy.impl_set_vector_length(1);
- }
- if (policy.team_size() < 0) {
- policy.impl_set_team_size(policy.team_size_recommended(
- m_functor, m_reducer, ParallelReduceTag{}));
- }
- return policy;
- }
-
- template <class ViewType>
- inline ParallelReduce(
- const FunctorType &arg_functor, const Policy &arg_policy,
- const ViewType &arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void *> = nullptr)
- : m_functor(arg_functor),
- m_policy(fix_policy(arg_policy)),
- m_reducer(InvalidType()),
- m_result_ptr(arg_result.data()),
- m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, m_policy.team_size())) {}
-
- inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
- const ReducerType &reducer)
- : m_functor(arg_functor),
- m_policy(fix_policy(arg_policy)),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(
- arg_functor, m_policy.team_size())) {
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_THREADS_WORKGRAPHPOLICY_HPP
-#define KOKKOS_THREADS_WORKGRAPHPOLICY_HPP
-
-#include <Kokkos_Core_fwd.hpp>
-#include <Kokkos_Threads.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class FunctorType, class... Traits>
-class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
- Kokkos::Threads> {
- private:
- using Policy = Kokkos::WorkGraphPolicy<Traits...>;
-
- using Self = ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
- Kokkos::Threads>;
-
- Policy m_policy;
- FunctorType m_functor;
-
- template <class TagType>
- std::enable_if_t<std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- m_functor(w);
- }
-
- template <class TagType>
- std::enable_if_t<!std::is_void<TagType>::value> exec_one(
- const std::int32_t w) const noexcept {
- const TagType t{};
- m_functor(t, w);
- }
-
- inline void exec_one_thread() const noexcept {
- // Spin until COMPLETED_TOKEN.
- // END_TOKEN indicates no work is currently available.
-
- for (std::int32_t w = Policy::END_TOKEN;
- Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
- if (Policy::END_TOKEN != w) {
- exec_one<typename Policy::work_tag>(w);
- m_policy.completed_work(w);
- }
- }
- }
-
- static inline void thread_main(ThreadsExec& exec, const void* arg) noexcept {
- const Self& self = *(static_cast<const Self*>(arg));
- self.exec_one_thread();
- exec.fan_in();
- }
-
- public:
- inline void execute() {
- ThreadsExec::start(&Self::thread_main, this);
- ThreadsExec::fence();
- }
-
- inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_policy(arg_policy), m_functor(arg_functor) {}
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif /* #define KOKKOS_THREADS_WORKGRAPHPOLICY_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_CUDA_HPP
-#define KOKKOS_DECLARE_CUDA_HPP
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Kokkos_Cuda.hpp>
-#include <Cuda/Kokkos_Cuda_Half_Impl_Type.hpp>
-#include <Cuda/Kokkos_Cuda_Half_Conversion.hpp>
-#include <Cuda/Kokkos_Cuda_Parallel_MDRange.hpp>
-#include <Cuda/Kokkos_Cuda_Parallel_Range.hpp>
-#include <Cuda/Kokkos_Cuda_Parallel_Team.hpp>
-#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
-#include <Cuda/Kokkos_Cuda_Instance.hpp>
-#include <Cuda/Kokkos_Cuda_View.hpp>
-#include <Cuda/Kokkos_Cuda_Team.hpp>
-#include <Cuda/Kokkos_Cuda_Task.hpp>
-#include <Cuda/Kokkos_Cuda_MDRangePolicy.hpp>
-#include <Cuda/Kokkos_Cuda_UniqueToken.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_HBWSPACE_HPP
-#define KOKKOS_DECLARE_HBWSPACE_HPP
-
-#ifdef KOKKOS_ENABLE_HBWSPACE
-#include <Kokkos_HBWSpace.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_HIP_HPP
-#define KOKKOS_DECLARE_HIP_HPP
-
-#if defined(KOKKOS_ENABLE_HIP)
-#include <Kokkos_HIP.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_HPX_HPP
-#define KOKKOS_DECLARE_HPX_HPP
-
-#if defined(KOKKOS_ENABLE_HPX)
-#include <Kokkos_HPX.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_OPENACC_HPP
-#define KOKKOS_DECLARE_OPENACC_HPP
-
-#if defined(KOKKOS_ENABLE_OPENACC)
-#include <OpenACC/Kokkos_OpenACC.hpp>
-#include <OpenACC/Kokkos_OpenACCSpace.hpp>
-#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_OPENMP_HPP
-#define KOKKOS_DECLARE_OPENMP_HPP
-
-#if defined(KOKKOS_ENABLE_OPENMP)
-#include <Kokkos_OpenMP.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_OPENMPTARGET_HPP
-#define KOKKOS_DECLARE_OPENMPTARGET_HPP
-
-#if defined(KOKKOS_ENABLE_OPENMPTARGET)
-#include <Kokkos_OpenMPTarget.hpp>
-#include <Kokkos_OpenMPTargetSpace.hpp>
-#include <OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_SERIAL_HPP
-#define KOKKOS_DECLARE_SERIAL_HPP
-
-#if defined(KOKKOS_ENABLE_SERIAL)
-#include <Kokkos_Serial.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_SYCL_HPP
-#define KOKKOS_DECLARE_SYCL_HPP
-
-#if defined(KOKKOS_ENABLE_SYCL)
-#include <Kokkos_SYCL.hpp>
-#include <SYCL/Kokkos_SYCL_Half_Impl_Type.hpp>
-#include <SYCL/Kokkos_SYCL_Half_Conversion.hpp>
-#include <SYCL/Kokkos_SYCL_DeepCopy.hpp>
-#include <SYCL/Kokkos_SYCL_MDRangePolicy.hpp>
-#include <SYCL/Kokkos_SYCL_Parallel_Range.hpp>
-#include <SYCL/Kokkos_SYCL_Parallel_Reduce.hpp>
-#include <SYCL/Kokkos_SYCL_Parallel_Scan.hpp>
-#include <SYCL/Kokkos_SYCL_Parallel_Team.hpp>
-#include <SYCL/Kokkos_SYCL_UniqueToken.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DECLARE_THREADS_HPP
-#define KOKKOS_DECLARE_THREADS_HPP
-
-#if defined(KOKKOS_ENABLE_THREADS)
-#include <Kokkos_Threads.hpp>
-#endif
-
-#endif
+++ /dev/null
-
-
-namespace Kokkos {
-namespace AvoidCompilerWarnings {
-int dontComplain() {
- // keep the compiler from complaining about emptiness
- return 0;
-}
-} // namespace AvoidCompilerWarnings
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_CUDA_FWD_HPP_
-#define KOKKOS_CUDA_FWD_HPP_
-#if defined(KOKKOS_ENABLE_CUDA)
-namespace Kokkos {
-
-class CudaSpace; ///< Memory space on Cuda GPU
-class CudaUVMSpace; ///< Memory space on Cuda GPU with UVM
-class CudaHostPinnedSpace; ///< Memory space on Host accessible to Cuda GPU
-class Cuda; ///< Execution space for Cuda GPU
-
-namespace Impl {
-
-template <class ExecSpace>
-void cuda_prefetch_pointer(const ExecSpace& /*space*/, const void* /*ptr*/,
- size_t /*bytes*/, bool /*to_device*/) {}
-
-void cuda_prefetch_pointer(const Cuda& space, const void* ptr, size_t bytes,
- bool to_device);
-
-} // namespace Impl
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HBWSPACE_FWD_HPP_
-#define KOKKOS_HBWSPACE_FWD_HPP_
-
-#ifdef KOKKOS_ENABLE_HBWSPACE
-namespace Kokkos {
-
-namespace Experimental {
-class HBWSpace; /// Memory space for hbw_malloc from memkind (e.g. for KNL
- /// processor)
-} // namespace Experimental
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HIP_FWD_HPP_
-#define KOKKOS_HIP_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_HIP)
-namespace Kokkos {
-namespace Experimental {
-class HIPSpace; ///< Memory space on HIP GPU
-class HIPHostPinnedSpace; ///< Memory space on Host accessible to HIP GPU
-class HIPManagedSpace; ///< Memory migratable between Host and HIP GPU
-class HIP; ///< Execution space for HIP GPU
-} // namespace Experimental
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_HPX_FWD_HPP_
-#define KOKKOS_HPX_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_HPX)
-namespace Kokkos {
-namespace Experimental {
-class HPX; ///< Execution space with HPX back-end.
-} // namespace Experimental
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENACC_FWD_HPP_
-#define KOKKOS_OPENACC_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_OPENACC)
-namespace Kokkos {
-namespace Experimental {
-class OpenACC; ///< OpenACC execution space.
-class OpenACCSpace;
-} // namespace Experimental
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMP_FWD_HPP_
-#define KOKKOS_OPENMP_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_OPENMP)
-namespace Kokkos {
-class OpenMP; ///< OpenMP execution space.
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_OPENMPTARGET_FWD_HPP_
-#define KOKKOS_OPENMPTARGET_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_OPENMPTARGET)
-namespace Kokkos {
-namespace Experimental {
-class OpenMPTarget; ///< OpenMPTarget execution space.
-class OpenMPTargetSpace;
-} // namespace Experimental
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SERIAL_FWD_HPP_
-#define KOKKOS_SERIAL_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_SERIAL)
-namespace Kokkos {
-class Serial; ///< Execution space main process on CPU.
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SYCL_FWD_HPP_
-#define KOKKOS_SYCL_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_SYCL)
-namespace Kokkos {
-namespace Experimental {
-class SYCLDeviceUSMSpace; ///< Memory space on SYCL device, not accessible from
- ///< the host
-class SYCLSharedUSMSpace; ///< Memory space accessible from both the SYCL
- ///< device and the host
-class SYCLHostUSMSpace; ///< Memory space accessible from both the SYCL
- ///< device and the host (host pinned)
-class SYCL; ///< Execution space for SYCL
-} // namespace Experimental
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_THREADS_FWD_HPP_
-#define KOKKOS_THREADS_FWD_HPP_
-
-#if defined(KOKKOS_ENABLE_THREADS)
-namespace Kokkos {
-class Threads; ///< Execution space with C++11 threads back-end.
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-// Deprecated file for backward compatibility
-
-#include <impl/Kokkos_ViewMapping.hpp>
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_ASSEMBLY_HPP)
-#define KOKKOS_ATOMIC_ASSEMBLY_HPP
-namespace Kokkos {
-
-namespace Impl {
-
-#if !defined(_WIN32)
-struct cas128_t {
- uint64_t lower;
- uint64_t upper;
-
- KOKKOS_INLINE_FUNCTION
- cas128_t() {
- lower = 0;
- upper = 0;
- }
-
- KOKKOS_INLINE_FUNCTION
- cas128_t(const cas128_t& a) {
- lower = a.lower;
- upper = a.upper;
- }
- KOKKOS_INLINE_FUNCTION
- cas128_t(volatile cas128_t* a) {
- lower = a->lower;
- upper = a->upper;
- }
-
- KOKKOS_INLINE_FUNCTION
- bool operator!=(const cas128_t& a) const {
- return (lower != a.lower) || upper != a.upper;
- }
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const cas128_t& a) {
- lower = a.lower;
- upper = a.upper;
- }
- KOKKOS_INLINE_FUNCTION
- void operator=(const cas128_t& a) volatile {
- lower = a.lower;
- upper = a.upper;
- }
-} __attribute__((__aligned__(16)));
-#endif
-
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
-inline cas128_t cas128(volatile cas128_t* ptr, cas128_t cmp, cas128_t swap) {
- bool swapped = false;
- __asm__ __volatile__(
- "lock cmpxchg16b %1\n\t"
- "setz %0"
- : "=q"(swapped), "+m"(*ptr), "+d"(cmp.upper), "+a"(cmp.lower)
- : "c"(swap.upper), "b"(swap.lower), "q"(swapped));
- return cmp;
-}
-#endif
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && \
- !defined(KOKKOS_ATOMIC_COMPARE_EXCHANGE_STRONG_HPP)
-#define KOKKOS_ATOMIC_COMPARE_EXCHANGE_STRONG_HPP
-
-#include <impl/Kokkos_Atomic_Memory_Order.hpp>
-#include <impl/Kokkos_Memory_Fence.hpp>
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp>
-#endif
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-// Cuda native CAS supports int, unsigned int, and unsigned long long int
-// (non-standard type). Must cast-away 'volatile' for the CAS call.
-
-#if defined(KOKKOS_ENABLE_CUDA)
-
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-__inline__ __device__ int atomic_compare_exchange(volatile int* const dest,
- const int compare,
- const int val) {
- return atomicCAS((int*)dest, compare, val);
-}
-
-__inline__ __device__ unsigned int atomic_compare_exchange(
- volatile unsigned int* const dest, const unsigned int compare,
- const unsigned int val) {
- return atomicCAS((unsigned int*)dest, compare, val);
-}
-
-__inline__ __device__ unsigned long long int atomic_compare_exchange(
- volatile unsigned long long int* const dest,
- const unsigned long long int compare, const unsigned long long int val) {
- return atomicCAS((unsigned long long int*)dest, compare, val);
-}
-
-template <typename T>
-__inline__ __device__ T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
- const int tmp = atomicCAS((int*)dest, *((int*)&compare), *((int*)&val));
- return *((T*)&tmp);
-}
-
-template <typename T>
-__inline__ __device__ T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T&>
- val) {
- using type = unsigned long long int;
- const type tmp = atomicCAS((type*)dest, *((type*)&compare), *((type*)&val));
- return *((T*)&tmp);
-}
-
-template <typename T>
-__inline__ __device__ T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
- T return_val;
- // This is a way to (hopefully) avoid dead lock in a warp
- int done = 0;
- unsigned int mask = __activemask();
- unsigned int active = __ballot_sync(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda_space((void*)dest)) {
- Kokkos::memory_fence();
- return_val = *dest;
- if (return_val == compare) *dest = val;
- Kokkos::memory_fence();
- Impl::unlock_address_cuda_space((void*)dest);
- done = 1;
- }
- }
- done_active = __ballot_sync(mask, done);
- }
- return return_val;
-}
-#endif
-#endif
-
-//----------------------------------------------------------------------------
-// GCC native CAS supports int, long, unsigned int, unsigned long.
-// Intel native CAS support int and long with the same interface as GCC.
-#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-#if defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
-// atomic_compare_exchange are already defined in Kokkos_Atomic_Windows.hpp
-#elif defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
-
-inline int atomic_compare_exchange(volatile int* const dest, const int compare,
- const int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-inline long atomic_compare_exchange(volatile long* const dest,
- const long compare, const long val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
-
-// GCC supports unsigned
-
-inline unsigned int atomic_compare_exchange(volatile unsigned int* const dest,
- const unsigned int compare,
- const unsigned int val) {
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-inline unsigned long atomic_compare_exchange(volatile unsigned long* const dest,
- const unsigned long compare,
- const unsigned long val) {
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-inline unsigned long long atomic_compare_exchange(
- volatile unsigned long long* const dest, const unsigned long long compare,
- const unsigned long long val) {
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-#endif
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
- union U {
- int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- tmp.i =
- __sync_val_compare_and_swap((int*)dest, *((int*)&compare), *((int*)&val));
- return tmp.t;
-}
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
- const T&>
- val) {
- union U {
- long i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- tmp.i = __sync_val_compare_and_swap((long*)dest, *((long*)&compare),
- *((long*)&val));
- return tmp.t;
-}
-
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long) &&
- sizeof(T) == sizeof(Impl::cas128_t),
- const T&>
- val) {
- union U {
- Impl::cas128_t i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- tmp.i = Impl::cas128((Impl::cas128_t*)dest, *((Impl::cas128_t*)&compare),
- *((Impl::cas128_t*)&val));
- return tmp.t;
-}
-#endif
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T compare,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- && (sizeof(T) != 16)
-#endif
- ,
- const T>& val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- T return_val = *dest;
- if (return_val == compare) {
- // Don't use the following line of code here:
- //
- // const T tmp = *dest = val;
- //
- // Instead, put each assignment in its own statement. This is
- // because the overload of T::operator= for volatile *this should
- // return void, not volatile T&. See Kokkos #177:
- //
- // https://github.com/kokkos/kokkos/issues/177
- *dest = val;
- const T tmp = *dest;
-#ifndef KOKKOS_COMPILER_CLANG
- (void)tmp;
-#endif
- Kokkos::memory_fence();
- }
- Impl::unlock_address_host_space((void*)dest);
- return return_val;
-}
-//----------------------------------------------------------------------------
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest,
- const T compare, const T val) {
- T retval;
-#pragma omp critical
- {
- retval = dest[0];
- if (retval == compare) dest[0] = val;
- }
- return retval;
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest_v,
- const T compare, const T val) {
- T* dest = const_cast<T*>(dest_v);
- T retval = *dest;
- if (retval == compare) *dest = val;
- return retval;
-}
-
-#endif
-#endif
-
-// dummy for non-CUDA Kokkos headers being processed by NVCC
-#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
-template <typename T>
-__inline__ __device__ T
-atomic_compare_exchange(volatile T* const, const Kokkos::Impl::identity_t<T>,
- const Kokkos::Impl::identity_t<T>) {
- return T();
-}
-#endif
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION bool atomic_compare_exchange_strong(
- volatile T* const dest, const T compare, const T val) {
- return compare == atomic_compare_exchange(dest, compare, val);
-}
-//----------------------------------------------------------------------------
-
-namespace Impl {
-// memory-ordered versions are in the Impl namespace
-
-template <class T, class MemoryOrderFailure>
-KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
- T* dest, T compare, T val, memory_order_seq_cst_t, MemoryOrderFailure) {
- Kokkos::memory_fence();
- auto rv = Kokkos::atomic_compare_exchange_strong(dest, compare, val);
- Kokkos::memory_fence();
- return rv;
-}
-
-template <class T, class MemoryOrderFailure>
-KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
- T* dest, T compare, T val, memory_order_acquire_t, MemoryOrderFailure) {
- auto rv = Kokkos::atomic_compare_exchange_strong(dest, compare, val);
- Kokkos::memory_fence();
- return rv;
-}
-
-template <class T, class MemoryOrderFailure>
-KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
- T* dest, T compare, T val, memory_order_release_t, MemoryOrderFailure) {
- Kokkos::memory_fence();
- return Kokkos::atomic_compare_exchange_strong(dest, compare, val);
-}
-
-template <class T, class MemoryOrderFailure>
-KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
- T* dest, T compare, T val, memory_order_relaxed_t, MemoryOrderFailure) {
- return Kokkos::atomic_compare_exchange_strong(dest, compare, val);
-}
-
-#if (defined(KOKKOS_ENABLE_GNU_ATOMICS) && !defined(__CUDA_ARCH__)) || \
- (defined(KOKKOS_ENABLE_INTEL_ATOMICS) && !defined(__CUDA_ARCH__)) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
-
-#if defined(__CUDA_ARCH__)
-#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH __inline__ __device__
-#else
-#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH inline
-#endif
-
-template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
-KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH bool _atomic_compare_exchange_strong(
- T* dest, T compare, T val, MemoryOrderSuccess, MemoryOrderFailure,
- std::enable_if_t<
- (sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8 ||
- sizeof(T) == 16) &&
- std::is_same<typename MemoryOrderSuccess::memory_order,
- std::remove_cv_t<MemoryOrderSuccess>>::value &&
- std::is_same<typename MemoryOrderFailure::memory_order,
- std::remove_cv_t<MemoryOrderFailure>>::value,
- void const**> = nullptr) {
- return __atomic_compare_exchange_n(dest, &compare, val, /* weak = */ false,
- MemoryOrderSuccess::gnu_constant,
- MemoryOrderFailure::gnu_constant);
-}
-
-template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
-KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH bool _atomic_compare_exchange_strong(
- T* dest, T compare, T val, MemoryOrderSuccess order_success,
- MemoryOrderFailure order_failure,
- std::enable_if_t<
- !(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
- sizeof(T) == 8 || sizeof(T) == 16) &&
- std::is_same<typename MemoryOrderSuccess::memory_order,
- std::remove_cv_t<MemoryOrderSuccess>>::value &&
- std::is_same<typename MemoryOrderFailure::memory_order,
- std::remove_cv_t<MemoryOrderFailure>>::value,
- void const**> = nullptr) {
- return _atomic_compare_exchange_fallback(dest, compare, val, order_success,
- order_failure);
-}
-
-#else
-
-template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
-KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong(
- T* dest, T compare, T val, MemoryOrderSuccess order_success,
- MemoryOrderFailure order_failure) {
- return _atomic_compare_exchange_strong_fallback(dest, compare, val,
- order_success, order_failure);
-}
-
-#endif
-
-// TODO static asserts in overloads that don't make sense (as listed in
-// https://gcc.gnu.org/onlinedocs/gcc-5.2.0/gcc/_005f_005fatomic-Builtins.html)
-template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
-KOKKOS_FORCEINLINE_FUNCTION bool atomic_compare_exchange_strong(
- T* dest, T compare, T val, MemoryOrderSuccess order_success,
- MemoryOrderFailure order_failure) {
- return _atomic_compare_exchange_strong(dest, compare, val, order_success,
- order_failure);
-}
-
-} // end namespace Impl
-
-} // namespace Kokkos
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp>
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#include <Kokkos_Atomic.hpp>
-#ifndef KOKKOS_ATOMIC_COMPARE_EXCHANGE_WEAK_HPP
-#define KOKKOS_ATOMIC_COMPARE_EXCHANGE_WEAK_HPP
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-// Cuda sm_70 or greater supports C++-like semantics directly
-
-#if defined(KOKKOS_ENABLE_CUDA)
-
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-#if __CUDA_ARCH__ >= 700
-// See: https://github.com/ogiroux/freestanding
-#define kokkos_cuda_internal_cas_release_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.release.sys.b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define kokkos_cuda_internal_cas_acquire_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.acquire.sys.b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define kokkos_cuda_internal_cas_acq_rel_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.acq_rel.sys.b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define kokkos_cuda_internal_cas_relaxed_32(ptr, old, expected, desired) \
- asm volatile("atom.cas.relaxed.sys.b32 %0, [%1], %2, %3;" \
- : "=r"(old) \
- : "l"(ptr), "r"(expected), "r"(desired) \
- : "memory")
-#define kokkos_cuda_internal_fence_seq_cst() \
- asm volatile("fence.sc.sys;" : : : "memory")
-#define kokkos_cuda_internal_fence_acq_rel() \
- asm volatile("fence.acq_rel.sys;" : : : "memory")
-#else
-#define kokkos_cuda_internal_fence_acq_rel() \
- asm volatile("membar.sys;" : : : "memory")
-#define kokkos_cuda_internal_fence_seq_cst() \
- asm volatile("membar.sys;" : : : "memory")
-#endif
-
-// 32-bit version
-template <class T, std::enable_if_t<sizeof(T) == 4, int> = 0>
-__inline__ __device__ bool atomic_compare_exchange_weak(
- T volatile* const dest, T* const expected, T const desired,
- std::memory_order success_order = std::memory_order_seq_cst,
- std::memory_order failure_order = std::memory_order_seq_cst) {
- // TODO assert that success_order >= failure_order
- // See: https://github.com/ogiroux/freestanding
- int32_t tmp = 0;
- int32_t old = 0;
- memcpy(&tmp, &desired, sizeof(T));
- memcpy(&old, expected, sizeof(T));
- int32_t old_tmp = old;
-#if __CUDA_ARCH__ >= 700
- switch (success_order) {
- case std::memory_order_seq_cst:
- // sequentially consistent is just an acquire with a seq_cst fence
- kokkos_cuda_internal_fence_seq_cst();
- kokkos_cuda_internal_cas_acquire_32((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_acquire:
- kokkos_cuda_internal_cas_acquire_32((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_consume:
- // same as acquire on PTX compatible platforms
- kokkos_cuda_internal_cas_acquire_32((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_acq_rel:
- kokkos_cuda_internal_cas_acq_rel_32((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_release:
- kokkos_cuda_internal_cas_release_32((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_relaxed:
- kokkos_cuda_internal_cas_relaxed_32((T*)dest, old, old_tmp, tmp);
- break;
- };
-#else
- // All of the orders that require a fence before the relaxed atomic operation:
- if (success_order == std::memory_order_release ||
- success_order == std::memory_order_acq_rel) {
- kokkos_cuda_internal_fence_acq_rel();
- } else if (success_order == std::memory_order_seq_cst) {
- kokkos_cuda_internal_fence_seq_cst();
- }
- // This is relaxed:
- // Cuda API requires casting away volatile
- atomicCAS((T*)dest, old_tmp, tmp);
-#endif
- bool const rv = (old == old_tmp);
-#if __CUDA_ARCH__ < 700
- if (rv) {
- if (success_order == std::memory_order_acquire ||
- success_order == std::memory_order_consume ||
- success_order == std::memory_order_acq_rel) {
- kokkos_cuda_internal_fence_acq_rel();
- } else if (success_order == std::memory_order_seq_cst) {
- kokkos_cuda_internal_fence_seq_cst();
- }
- } else {
- if (failure_order == std::memory_order_acquire ||
- failure_order == std::memory_order_consume ||
- failure_order == std::memory_order_acq_rel) {
- kokkos_cuda_internal_fence_acq_rel();
- } else if (failure_order == std::memory_order_seq_cst) {
- kokkos_cuda_internal_fence_seq_cst();
- }
- }
-#endif
- memcpy(expected, &old, sizeof(T));
- return rv;
-}
-
-// 64-bit version
-template <class T, std::enable_if_t<sizeof(T) == 8, int> = 0>
-bool atomic_compare_exchange_weak(
- T volatile* const dest, T* const expected, T const desired,
- std::memory_order success_order = std::memory_order_seq_cst,
- std::memory_order failure_order = std::memory_order_seq_cst) {
- // TODO assert that success_order >= failure_order
- // See: https://github.com/ogiroux/freestanding
- int64_t tmp = 0;
- int64_t old = 0;
- memcpy(&tmp, &desired, sizeof(T));
- memcpy(&old, expected, sizeof(T));
- int64_t old_tmp = old;
-#if __CUDA_ARCH__ >= 700
- switch (success_order) {
- case std::memory_order_seq_cst:
- // sequentially consistent is just an acquire with a seq_cst fence
- kokkos_cuda_internal_fence_seq_cst();
- kokkos_cuda_internal_cas_acquire_64((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_acquire:
- kokkos_cuda_internal_cas_acquire_64((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_consume:
- // same as acquire on PTX compatible platforms
- kokkos_cuda_internal_cas_acquire_64((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_acq_rel:
- kokkos_cuda_internal_cas_acq_rel_64((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_release:
- kokkos_cuda_internal_cas_release_64((T*)dest, old, old_tmp, tmp);
- break;
- case std::memory_order_relaxed:
- kokkos_cuda_internal_cas_relaxed_64((T*)dest, old, old_tmp, tmp);
- break;
- };
-#else
- // Cuda API requires casting away volatile
- atomicCAS((T*)dest, old_tmp, tmp);
-#endif
- bool const rv = (old == old_tmp);
- memcpy(expected, &old, sizeof(T));
- return rv;
-}
-
-#endif // defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-#endif // defined( KOKKOS_ENABLE_CUDA )
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-// GCC native CAS supports int, long, unsigned int, unsigned long.
-// Intel native CAS support int and long with the same interface as GCC.
-#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
-
-inline int atomic_compare_exchange(volatile int* const dest, const int compare,
- const int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-inline long atomic_compare_exchange(volatile long* const dest,
- const long compare, const long val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
-
-// GCC supports unsigned
-
-inline unsigned int atomic_compare_exchange(volatile unsigned int* const dest,
- const unsigned int compare,
- const unsigned int val) {
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-inline unsigned long atomic_compare_exchange(volatile unsigned long* const dest,
- const unsigned long compare,
- const unsigned long val) {
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-inline unsigned long long atomic_compare_exchange(
- volatile unsigned long long* const dest, const unsigned long long compare,
- const unsigned long long val) {
- return __sync_val_compare_and_swap(dest, compare, val);
-}
-
-#endif
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
- union U {
- int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- tmp.i =
- __sync_val_compare_and_swap((int*)dest, *((int*)&compare), *((int*)&val));
- return tmp.t;
-}
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
- const T&>
- val) {
- union U {
- long i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- tmp.i = __sync_val_compare_and_swap((long*)dest, *((long*)&compare),
- *((long*)&val));
- return tmp.t;
-}
-
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long) &&
- sizeof(T) == sizeof(Impl::cas128_t),
- const T&>
- val) {
- union U {
- Impl::cas128_t i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- tmp.i = Impl::cas128((Impl::cas128_t*)dest, *((Impl::cas128_t*)&compare),
- *((Impl::cas128_t*)&val));
- return tmp.t;
-}
-#endif
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T compare,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- && (sizeof(T) != 16)
-#endif
- ,
- const T>& val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- T return_val = *dest;
- if (return_val == compare) {
- // Don't use the following line of code here:
- //
- // const T tmp = *dest = val;
- //
- // Instead, put each assignment in its own statement. This is
- // because the overload of T::operator= for volatile *this should
- // return void, not volatile T&. See Kokkos #177:
- //
- // https://github.com/kokkos/kokkos/issues/177
- *dest = val;
- const T tmp = *dest;
-#ifndef KOKKOS_COMPILER_CLANG
- (void)tmp;
-#endif
- Kokkos::memory_fence();
- }
- Impl::unlock_address_host_space((void*)dest);
- return return_val;
-}
-//----------------------------------------------------------------------------
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest,
- const T compare, const T val) {
- T retval;
-#pragma omp critical
- {
- retval = dest[0];
- if (retval == compare) dest[0] = val;
- }
- return retval;
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest_v,
- const T compare, const T val) {
- T* dest = const_cast<T*>(dest_v);
- T retval = *dest;
- if (retval == compare) *dest = val;
- return retval;
-}
-
-#endif
-#endif
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION bool atomic_compare_exchange_strong(
- volatile T* const dest, const T compare, const T val) {
- return compare == atomic_compare_exchange(dest, compare, val);
-}
-//----------------------------------------------------------------------------
-
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_DECREMENT_HPP)
-#define KOKKOS_ATOMIC_DECREMENT_HPP
-
-#include "impl/Kokkos_Atomic_Fetch_Sub.hpp"
-
-namespace Kokkos {
-
-// Atomic decrement
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_decrement<char>(volatile char* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock decb %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- char* a_nv = const_cast<char*>(a);
- --(*a_nv);
-#else
- Kokkos::atomic_fetch_sub(a, char(1));
-#endif
-}
-
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_decrement<short>(volatile short* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock decw %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- short* a_nv = const_cast<short*>(a);
- --(*a_nv);
-#else
- Kokkos::atomic_fetch_sub(a, short(1));
-#endif
-}
-
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_decrement<int>(volatile int* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock decl %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- int* a_nv = const_cast<int*>(a);
- --(*a_nv);
-#else
- Kokkos::atomic_fetch_sub(a, int(1));
-#endif
-}
-
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_decrement<long long int>(
- volatile long long int* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock decq %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- long long int* a_nv = const_cast<long long int*>(a);
- --(*a_nv);
-#else
- using T = long long int;
- Kokkos::atomic_fetch_sub(a, T(1));
-#endif
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_decrement(volatile T* a) {
-#if defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- T* a_nv = const_cast<T*>(a);
- --(*a_nv);
-#else
- Kokkos::atomic_fetch_sub(a, T(1));
-#endif
-}
-
-} // End of namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_EXCHANGE_HPP)
-#define KOKKOS_ATOMIC_EXCHANGE_HPP
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-__inline__ __device__ int atomic_exchange(volatile int* const dest,
- const int val) {
- // return __iAtomicExch( (int*) dest , val );
- return atomicExch((int*)dest, val);
-}
-
-__inline__ __device__ unsigned int atomic_exchange(
- volatile unsigned int* const dest, const unsigned int val) {
- // return __uAtomicExch( (unsigned int*) dest , val );
- return atomicExch((unsigned int*)dest, val);
-}
-
-__inline__ __device__ unsigned long long int atomic_exchange(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- // return __ullAtomicExch( (unsigned long long*) dest , val );
- return atomicExch((unsigned long long*)dest, val);
-}
-
-/** \brief Atomic exchange for any type with compatible size */
-template <typename T>
-__inline__ __device__ T
-atomic_exchange(volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
- // int tmp = __ullAtomicExch( (int*) dest , *((int*)&val) );
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- int tmp = atomicExch(((int*)dest), *((int*)&val));
- return *((T*)&tmp);
-}
-
-template <typename T>
-__inline__ __device__ T atomic_exchange(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T&>
- val) {
- using type = unsigned long long int;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- // type tmp = __ullAtomicExch( (type*) dest , *((type*)&val) );
- type tmp = atomicExch(((type*)dest), *((type*)&val));
- return *((T*)&tmp);
-}
-
-template <typename T>
-__inline__ __device__ T atomic_exchange(
- volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
- T return_val;
- // This is a way to (hopefully) avoid dead lock in a warp
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- int done = 0;
- unsigned int mask = __activemask();
- unsigned int active = __ballot_sync(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda_space((void*)dest)) {
- Kokkos::memory_fence();
- return_val = *dest;
- *dest = val;
- Kokkos::memory_fence();
- Impl::unlock_address_cuda_space((void*)dest);
- done = 1;
- }
- }
- done_active = __ballot_sync(mask, done);
- }
- return return_val;
-}
-/** \brief Atomic exchange for any type with compatible size */
-template <typename T>
-__inline__ __device__ void atomic_assign(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
- // (void) __ullAtomicExch( (int*) dest , *((int*)&val) );
- (void)atomicExch(((int*)dest), *((int*)&val));
-}
-
-template <typename T>
-__inline__ __device__ void atomic_assign(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T&>
- val) {
- using type = unsigned long long int;
- // (void) __ullAtomicExch( (type*) dest , *((type*)&val) );
- (void)atomicExch(((type*)dest), *((type*)&val));
-}
-
-template <typename T>
-__inline__ __device__ void atomic_assign(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) != sizeof(unsigned long long int),
- const T&>
- val) {
- (void)atomic_exchange(dest, val);
-}
-
-#endif
-#endif
-
-//----------------------------------------------------------------------------
-
-#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
-
-template <typename T>
-inline T atomic_exchange(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int) || sizeof(T) == sizeof(long),
- const T&>
- val) {
- using type = std::conditional_t<sizeof(T) == sizeof(int), int, long>;
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- const type v = *((type*)&val); // Extract to be sure the value doesn't change
-
- type assumed;
-
- union U {
- T val_T;
- type val_type;
- inline U() {}
- } old;
-
- old.val_T = *dest;
-
- do {
- assumed = old.val_type;
- old.val_type =
- __sync_val_compare_and_swap((volatile type*)dest, assumed, v);
- } while (assumed != old.val_type);
-
- return old.val_T;
-}
-
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
-template <typename T>
-inline T atomic_exchange(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(Impl::cas128_t), const T&> val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- union U {
- Impl::cas128_t i;
- T t;
- inline U() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
- newval.t = val;
-
- do {
- assume.i = oldval.i;
- oldval.i = Impl::cas128((volatile Impl::cas128_t*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-#endif
-
-//----------------------------------------------------------------------------
-
-template <typename T>
-inline T atomic_exchange(volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- && (sizeof(T) != 16)
-#endif
- ,
- const T>& val) {
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- T return_val = *dest;
- // Don't use the following line of code here:
- //
- // const T tmp = *dest = val;
- //
- // Instead, put each assignment in its own statement. This is
- // because the overload of T::operator= for volatile *this should
- // return void, not volatile T&. See Kokkos #177:
- //
- // https://github.com/kokkos/kokkos/issues/177
- *dest = val;
- const T tmp = *dest;
-#ifndef KOKKOS_COMPILER_CLANG
- (void)tmp;
-#endif
- Kokkos::memory_fence();
- Impl::unlock_address_host_space((void*)dest);
- return return_val;
-}
-
-template <typename T>
-inline void atomic_assign(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int) || sizeof(T) == sizeof(long),
- const T&>
- val) {
- using type = std::conditional_t<sizeof(T) == sizeof(int), int, long>;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- const type v = *((type*)&val); // Extract to be sure the value doesn't change
-
- type assumed;
-
- union U {
- T val_T;
- type val_type;
- inline U() {}
- } old;
-
- old.val_T = *dest;
-
- do {
- assumed = old.val_type;
- old.val_type =
- __sync_val_compare_and_swap((volatile type*)dest, assumed, v);
- } while (assumed != old.val_type);
-}
-
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
-template <typename T>
-inline void atomic_assign(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(Impl::cas128_t), const T&> val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- union U {
- Impl::cas128_t i;
- T t;
- inline U() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
- newval.t = val;
- do {
- assume.i = oldval.i;
- oldval.i = Impl::cas128((volatile Impl::cas128_t*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-}
-#endif
-
-template <typename T>
-inline void atomic_assign(volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- && (sizeof(T) != 16)
-#endif
- ,
- const T>& val) {
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- // This is likely an aggregate type with a defined
- // 'volatile T & operator = ( const T & ) volatile'
- // member. The volatile return value implicitly defines a
- // dereference that some compilers (gcc 4.7.2) warn is being ignored.
- // Suppress warning by casting return to void.
- //(void)( *dest = val );
- *dest = val;
- Kokkos::memory_fence();
- Impl::unlock_address_host_space((void*)dest);
-}
-//----------------------------------------------------------------------------
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <typename T>
-inline T atomic_exchange(volatile T* const dest, const T val) {
- T retval;
- //#pragma omp atomic capture
-#pragma omp critical
- {
- retval = dest[0];
- dest[0] = val;
- }
- return retval;
-}
-
-template <typename T>
-inline void atomic_assign(volatile T* const dest, const T val) {
- //#pragma omp atomic
-#pragma omp critical
- { dest[0] = val; }
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-inline T atomic_exchange(volatile T* const dest_v, const T val) {
- T* dest = const_cast<T*>(dest_v);
- T retval = *dest;
- *dest = val;
- return retval;
-}
-
-template <typename T>
-inline void atomic_assign(volatile T* const dest_v, const T val) {
- T* dest = const_cast<T*>(dest_v);
- *dest = val;
-}
-
-#endif
-#endif
-
-// dummy for non-CUDA Kokkos headers being processed by NVCC
-#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
-template <typename T>
-__inline__ __device__ T atomic_exchange(volatile T* const,
- const Kokkos::Impl::identity_t<T>) {
- return T();
-}
-
-template <typename T>
-__inline__ __device__ void atomic_assign(volatile T* const,
- const Kokkos::Impl::identity_t<T>) {}
-#endif
-
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_ADD_HPP)
-#define KOKKOS_ATOMIC_FETCH_ADD_HPP
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-// Support for int, unsigned int, unsigned long long int, and float
-
-__inline__ __device__ int atomic_fetch_add(volatile int* const dest,
- const int val) {
- return atomicAdd((int*)dest, val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_add(
- volatile unsigned int* const dest, const unsigned int val) {
- return atomicAdd((unsigned int*)dest, val);
-}
-
-__inline__ __device__ unsigned long long int atomic_fetch_add(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return atomicAdd((unsigned long long int*)dest, val);
-}
-
-__inline__ __device__ float atomic_fetch_add(volatile float* const dest,
- const float val) {
- return atomicAdd((float*)dest, val);
-}
-
-#if (600 <= __CUDA_ARCH__)
-__inline__ __device__ double atomic_fetch_add(volatile double* const dest,
- const double val) {
- return atomicAdd((double*)dest, val);
-}
-#endif
-
-template <typename T>
-__inline__ __device__ T
-atomic_fetch_add(volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
- // to work around a bug in the clang cuda compiler, the name here needs to be
- // different from the one internal to the other overloads
- union U1 {
- int i;
- T t;
- KOKKOS_INLINE_FUNCTION U1() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t + val;
- oldval.i = atomicCAS((int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <typename T>
-__inline__ __device__ T atomic_fetch_add(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T>
- val) {
- // to work around a bug in the clang cuda compiler, the name here needs to be
- // different from the one internal to the other overloads
- union U2 {
- unsigned long long int i;
- T t;
- KOKKOS_INLINE_FUNCTION U2() {}
- } assume, oldval, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t + val;
- oldval.i = atomicCAS((unsigned long long int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-//----------------------------------------------------------------------------
-
-template <typename T>
-__inline__ __device__ T atomic_fetch_add(
- volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
- T return_val;
- // This is a way to (hopefully) avoid dead lock in a warp
- int done = 0;
- unsigned int mask = __activemask();
- unsigned int active = __ballot_sync(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- bool locked = Impl::lock_address_cuda_space((void*)dest);
- if (locked) {
- Kokkos::memory_fence();
- return_val = *dest;
- *dest = return_val + val;
- Kokkos::memory_fence();
- Impl::unlock_address_cuda_space((void*)dest);
- done = 1;
- }
- }
-
- done_active = __ballot_sync(mask, done);
- }
- return return_val;
-}
-#endif
-#endif
-//----------------------------------------------------------------------------
-#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
-
-#if defined(KOKKOS_ENABLE_ASM) && (defined(KOKKOS_ENABLE_ISA_X86_64) || \
- defined(KOKKOS_KNL_USE_ASM_WORKAROUND))
-inline int atomic_fetch_add(volatile int* dest, const int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- int original = val;
-
- __asm__ __volatile__("lock xadd %1, %0"
- : "+m"(*dest), "+r"(original)
- : "m"(*dest), "r"(original)
- : "memory");
-
- return original;
-}
-#else
-inline int atomic_fetch_add(volatile int* const dest, const int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_add(dest, val);
-}
-#endif
-
-inline long int atomic_fetch_add(volatile long int* const dest,
- const long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_add(dest, val);
-}
-
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
-
-inline unsigned int atomic_fetch_add(volatile unsigned int* const dest,
- const unsigned int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_add(dest, val);
-}
-
-inline unsigned long int atomic_fetch_add(
- volatile unsigned long int* const dest, const unsigned long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_add(dest, val);
-}
-
-inline unsigned long long int atomic_fetch_add(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_add(dest, val);
-}
-
-#endif
-
-template <typename T>
-inline T atomic_fetch_add(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
- union U {
- int i;
- T t;
- inline U() {}
- } assume, oldval, newval;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t + val;
- oldval.i = __sync_val_compare_and_swap((int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <typename T>
-inline T atomic_fetch_add(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
- const T>
- val) {
- union U {
- long i;
- T t;
- inline U() {}
- } assume, oldval, newval;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t + val;
- oldval.i = __sync_val_compare_and_swap((long*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
-template <typename T>
-inline T atomic_fetch_add(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long) &&
- sizeof(T) == sizeof(Impl::cas128_t),
- const T>
- val) {
- union U {
- Impl::cas128_t i;
- T t;
- inline U() {}
- } assume, oldval, newval;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t + val;
- oldval.i = Impl::cas128((volatile Impl::cas128_t*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-#endif
-
-//----------------------------------------------------------------------------
-
-template <typename T>
-inline T atomic_fetch_add(volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- && (sizeof(T) != 16)
-#endif
- ,
- const T>& val) {
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- T return_val = *dest;
-
- // Don't use the following line of code here:
- //
- // const T tmp = *dest = return_val + val;
- //
- // Instead, put each assignment in its own statement. This is
- // because the overload of T::operator= for volatile *this should
- // return void, not volatile T&. See Kokkos #177:
- //
- // https://github.com/kokkos/kokkos/issues/177
- *dest = return_val + val;
- const T tmp = *dest;
- (void)tmp;
- Kokkos::memory_fence();
- Impl::unlock_address_host_space((void*)dest);
-
- return return_val;
-}
-//----------------------------------------------------------------------------
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <typename T>
-T atomic_fetch_add(volatile T* const dest, const T val) {
- T retval;
-#pragma omp atomic capture
- {
- retval = dest[0];
- dest[0] += val;
- }
- return retval;
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-T atomic_fetch_add(volatile T* const dest_v, std::add_const_t<T> val) {
- T* dest = const_cast<T*>(dest_v);
- T retval = *dest;
- *dest += val;
- return retval;
-}
-
-#endif
-#endif
-//----------------------------------------------------------------------------
-
-// dummy for non-CUDA Kokkos headers being processed by NVCC
-#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
-template <typename T>
-__inline__ __device__ T atomic_fetch_add(volatile T* const,
- Kokkos::Impl::identity_t<T>) {
- return T();
-}
-#endif
-
-} // namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_AND_HPP)
-#define KOKKOS_ATOMIC_FETCH_AND_HPP
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-// Support for int, unsigned int, unsigned long long int, and float
-
-__inline__ __device__ int atomic_fetch_and(volatile int* const dest,
- const int val) {
- return atomicAnd((int*)dest, val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_and(
- volatile unsigned int* const dest, const unsigned int val) {
- return atomicAnd((unsigned int*)dest, val);
-}
-
-#if defined(__CUDA_ARCH__) && (350 <= __CUDA_ARCH__)
-__inline__ __device__ unsigned long long int atomic_fetch_and(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return atomicAnd((unsigned long long int*)dest, val);
-}
-#endif
-#endif
-#endif
-
-// 08/05/20 Overload to work around https://bugs.llvm.org/show_bug.cgi?id=46922
-
-#if (defined(KOKKOS_ENABLE_CUDA) && \
- (defined(__CUDA_ARCH__) || \
- defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND))) || \
- (defined(KOKKOS_ENABLE_HIP))
-__inline__ __device__ unsigned long atomic_fetch_and(
- volatile unsigned long* const dest, const unsigned long val) {
- return atomic_fetch_and<unsigned long>(dest, val);
-}
-__inline__ __device__ long atomic_fetch_and(volatile long* const dest,
- long val) {
- return atomic_fetch_and<long>(dest, val);
-}
-#endif
-
-//----------------------------------------------------------------------------
-#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
-
-inline int atomic_fetch_and(volatile int* const dest, const int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_and(dest, val);
-}
-
-inline long int atomic_fetch_and(volatile long int* const dest,
- const long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_and(dest, val);
-}
-
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
-
-inline unsigned int atomic_fetch_and(volatile unsigned int* const dest,
- const unsigned int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_and(dest, val);
-}
-
-inline unsigned long int atomic_fetch_and(
- volatile unsigned long int* const dest, const unsigned long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_and(dest, val);
-}
-
-inline unsigned long long int atomic_fetch_and(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_and(dest, val);
-}
-
-#endif
-
-//----------------------------------------------------------------------------
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <typename T>
-T atomic_fetch_and(volatile T* const dest, const T val) {
- T retval;
-#pragma omp atomic capture
- {
- retval = dest[0];
- dest[0] &= val;
- }
- return retval;
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-T atomic_fetch_and(volatile T* const dest_v, const T val) {
- T* dest = const_cast<T*>(dest_v);
- T retval = *dest;
- *dest &= val;
- return retval;
-}
-
-#endif
-#endif
-//----------------------------------------------------------------------------
-
-// dummy for non-CUDA Kokkos headers being processed by NVCC
-#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
-template <typename T>
-__inline__ __device__ T atomic_fetch_and(volatile T* const,
- Kokkos::Impl::identity_t<T>) {
- return T();
-}
-#endif
-
-// Simpler version of atomic_fetch_and without the fetch
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_and(volatile T* const dest, const T src) {
- (void)atomic_fetch_and(dest, src);
-}
-
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_OR_HPP)
-#define KOKKOS_ATOMIC_FETCH_OR_HPP
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-// Support for int, unsigned int, unsigned long long int, and float
-
-__inline__ __device__ int atomic_fetch_or(volatile int* const dest,
- const int val) {
- return atomicOr((int*)dest, val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_or(
- volatile unsigned int* const dest, const unsigned int val) {
- return atomicOr((unsigned int*)dest, val);
-}
-
-#if defined(__CUDA_ARCH__) && (350 <= __CUDA_ARCH__)
-__inline__ __device__ unsigned long long int atomic_fetch_or(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return atomicOr((unsigned long long int*)dest, val);
-}
-#endif
-#endif
-#endif
-
-// 08/05/20 Overload to work around https://bugs.llvm.org/show_bug.cgi?id=46922
-
-#if (defined(KOKKOS_ENABLE_CUDA) && \
- (defined(__CUDA_ARCH__) || \
- defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND))) || \
- (defined(KOKKOS_ENABLE_HIP))
-__inline__ __device__ unsigned long atomic_fetch_or(
- volatile unsigned long* const dest, const unsigned long val) {
- return atomic_fetch_or<unsigned long>(dest, val);
-}
-
-__inline__ __device__ long atomic_fetch_or(volatile long* const dest,
- long val) {
- return atomic_fetch_or<long>(dest, val);
-}
-#endif
-
-//----------------------------------------------------------------------------
-#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
-
-inline int atomic_fetch_or(volatile int* const dest, const int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_or(dest, val);
-}
-
-inline long int atomic_fetch_or(volatile long int* const dest,
- const long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_or(dest, val);
-}
-
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
-
-inline unsigned int atomic_fetch_or(volatile unsigned int* const dest,
- const unsigned int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_or(dest, val);
-}
-
-inline unsigned long int atomic_fetch_or(volatile unsigned long int* const dest,
- const unsigned long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_or(dest, val);
-}
-
-inline unsigned long long int atomic_fetch_or(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_or(dest, val);
-}
-
-#endif
-
-//----------------------------------------------------------------------------
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <typename T>
-T atomic_fetch_or(volatile T* const dest, const T val) {
- T retval;
-#pragma omp atomic capture
- {
- retval = dest[0];
- dest[0] |= val;
- }
- return retval;
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-T atomic_fetch_or(volatile T* const dest_v, const T val) {
- T* dest = const_cast<T*>(dest_v);
- T retval = *dest;
- *dest |= val;
- return retval;
-}
-
-#endif
-#endif
-//----------------------------------------------------------------------------
-
-// dummy for non-CUDA Kokkos headers being processed by NVCC
-#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
-template <typename T>
-__inline__ __device__ T atomic_fetch_or(volatile T* const,
- Kokkos::Impl::identity_t<T>) {
- return T();
-}
-#endif
-
-// Simpler version of atomic_fetch_or without the fetch
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_or(volatile T* const dest, const T src) {
- (void)atomic_fetch_or(dest, src);
-}
-
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_SUB_HPP)
-#define KOKKOS_ATOMIC_FETCH_SUB_HPP
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-// Support for int, unsigned int, unsigned long long int, and float
-
-__inline__ __device__ int atomic_fetch_sub(volatile int* const dest,
- const int val) {
- return atomicSub((int*)dest, val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_sub(
- volatile unsigned int* const dest, const unsigned int val) {
- return atomicSub((unsigned int*)dest, val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_sub(
- volatile int64_t* const dest, const int64_t val) {
- return atomic_fetch_add(dest, -val);
-}
-
-__inline__ __device__ unsigned int atomic_fetch_sub(volatile float* const dest,
- const float val) {
- return atomicAdd((float*)dest, -val);
-}
-
-#if (600 <= __CUDA_ARCH__)
-__inline__ __device__ unsigned int atomic_fetch_sub(volatile double* const dest,
- const double val) {
- return atomicAdd((double*)dest, -val);
-}
-#endif
-
-template <typename T>
-__inline__ __device__ T
-atomic_fetch_sub(volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
- union U {
- int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t - val;
- oldval.i = atomicCAS((int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <typename T>
-__inline__ __device__ T atomic_fetch_sub(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T>
- val) {
- union U {
- unsigned long long int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t - val;
- oldval.i = atomicCAS((unsigned long long int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-//----------------------------------------------------------------------------
-
-template <typename T>
-__inline__ __device__ T atomic_fetch_sub(
- volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
- T return_val;
- // This is a way to (hopefully) avoid dead lock in a warp
- int done = 0;
- unsigned int mask = __activemask();
- unsigned int active = __ballot_sync(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda_space((void*)dest)) {
- Kokkos::memory_fence();
- return_val = *dest;
- *dest = return_val - val;
- Kokkos::memory_fence();
- Impl::unlock_address_cuda_space((void*)dest);
- done = 1;
- }
- }
- done_active = __ballot_sync(mask, done);
- }
- return return_val;
-}
-#endif
-#endif
-//----------------------------------------------------------------------------
-#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
-
-inline int atomic_fetch_sub(volatile int* const dest, const int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_sub(dest, val);
-}
-
-inline long int atomic_fetch_sub(volatile long int* const dest,
- const long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_sub(dest, val);
-}
-
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
-
-inline unsigned int atomic_fetch_sub(volatile unsigned int* const dest,
- const unsigned int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_sub(dest, val);
-}
-
-inline unsigned long int atomic_fetch_sub(
- volatile unsigned long int* const dest, const unsigned long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_sub(dest, val);
-}
-
-inline unsigned long long int atomic_fetch_sub(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
- return __sync_fetch_and_sub(dest, val);
-}
-
-#endif
-
-template <typename T>
-inline T atomic_fetch_sub(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
- union U {
- int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t - val;
- oldval.i = __sync_val_compare_and_swap((int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <typename T>
-inline T atomic_fetch_sub(
- volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
- const T>
- val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- union U {
- long i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
- oldval.t = *dest;
-
- do {
- assume.i = oldval.i;
- newval.t = assume.t - val;
- oldval.i = __sync_val_compare_and_swap((long*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-//----------------------------------------------------------------------------
-
-template <typename T>
-inline T atomic_fetch_sub(
- volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)dest, _MM_HINT_ET0);
-#endif
-
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- T return_val = *dest;
- *dest = return_val - val;
- Kokkos::memory_fence();
- Impl::unlock_address_host_space((void*)dest);
- return return_val;
-}
-
-//----------------------------------------------------------------------------
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <typename T>
-T atomic_fetch_sub(volatile T* const dest, const T val) {
- T retval;
-#pragma omp atomic capture
- {
- retval = dest[0];
- dest[0] -= val;
- }
- return retval;
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-T atomic_fetch_sub(volatile T* const dest_v, const T val) {
- T* dest = const_cast<T*>(dest_v);
- T retval = *dest;
- *dest -= val;
- return retval;
-}
-
-#endif
-#endif
-
-// dummy for non-CUDA Kokkos headers being processed by NVCC
-#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
-template <typename T>
-__inline__ __device__ T atomic_fetch_sub(volatile T* const,
- Kokkos::Impl::identity_t<T>) {
- return T();
-}
-#endif
-
-} // namespace Kokkos
-
-#include <impl/Kokkos_Atomic_Assembly.hpp>
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_GENERIC_HPP)
-#define KOKKOS_ATOMIC_GENERIC_HPP
-#include <Kokkos_Macros.hpp>
-
-// Combination operands to be used in an Compare and Exchange based atomic
-// operation
-namespace Kokkos {
-namespace Impl {
-
-template <class Op, class Scalar1, class Scalar2, class Enable = bool>
-struct _check_early_exit_impl {
- KOKKOS_FORCEINLINE_FUNCTION
- static constexpr bool check(Op const&, Scalar1 const&,
- Scalar2 const&) noexcept {
- return false;
- }
-};
-
-template <class Op, class Scalar1, class Scalar2>
-struct _check_early_exit_impl<
- Op, Scalar1, Scalar2,
- decltype(std::declval<Op const&>().check_early_exit(
- std::declval<Scalar1 const&>(), std::declval<Scalar2 const&>()))> {
- KOKKOS_FORCEINLINE_FUNCTION
- static constexpr bool check(Op const& op, Scalar1 const& v1,
- Scalar2 const& v2) {
- return op.check_early_exit(v1, v2);
- }
-};
-
-template <class Op, class Scalar1, class Scalar2>
-KOKKOS_FORCEINLINE_FUNCTION constexpr bool check_early_exit(
- Op const& op, Scalar1 const& v1, Scalar2 const& v2) noexcept {
- return _check_early_exit_impl<Op, Scalar1, Scalar2>::check(op, v1, v2);
-}
-
-template <class Scalar1, class Scalar2>
-struct MaxOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return (val1 > val2 ? val1 : val2);
- }
- KOKKOS_FORCEINLINE_FUNCTION
- static constexpr bool check_early_exit(Scalar1 const& val1,
- Scalar2 const& val2) noexcept {
- return (val1 > val2);
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct MinOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return (val1 < val2 ? val1 : val2);
- }
- KOKKOS_FORCEINLINE_FUNCTION
- static constexpr bool check_early_exit(Scalar1 const& val1,
- Scalar2 const& val2) noexcept {
- return (val1 < val2);
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct AddOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 + val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct SubOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 - val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct MulOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 * val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct DivOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 / val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct ModOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 % val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct AndOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 & val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct OrOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 | val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct XorOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 ^ val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct LShiftOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 << val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct RShiftOper {
- KOKKOS_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 >> val2;
- }
-};
-
-template <class Oper, typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_oper(
- const Oper& op, volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T>
- val) {
- union U {
- unsigned long long int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
- oldval.t = *dest;
-
- do {
- if (check_early_exit(op, oldval.t, val)) return oldval.t;
- assume.i = oldval.i;
- newval.t = op.apply(assume.t, val);
- oldval.i = Kokkos::atomic_compare_exchange((unsigned long long int*)dest,
- assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <class Oper, typename T>
-KOKKOS_INLINE_FUNCTION T atomic_oper_fetch(
- const Oper& op, volatile T* const dest,
- std::enable_if_t<sizeof(T) != sizeof(int) &&
- sizeof(T) == sizeof(unsigned long long int),
- const T>
- val) {
- union U {
- unsigned long long int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
- oldval.t = *dest;
-
- do {
- if (check_early_exit(op, oldval.t, val)) return oldval.t;
- assume.i = oldval.i;
- newval.t = op.apply(assume.t, val);
- oldval.i = Kokkos::atomic_compare_exchange((unsigned long long int*)dest,
- assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return newval.t;
-}
-
-template <class Oper, typename T>
-KOKKOS_INLINE_FUNCTION T
-atomic_fetch_oper(const Oper& op, volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
- union U {
- int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
- oldval.t = *dest;
-
- do {
- if (check_early_exit(op, oldval.t, val)) return oldval.t;
- assume.i = oldval.i;
- newval.t = op.apply(assume.t, val);
- oldval.i = Kokkos::atomic_compare_exchange((int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return oldval.t;
-}
-
-template <class Oper, typename T>
-KOKKOS_INLINE_FUNCTION T
-atomic_oper_fetch(const Oper& op, volatile T* const dest,
- std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
- union U {
- int i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } oldval, assume, newval;
-
- oldval.t = *dest;
-
- do {
- if (check_early_exit(op, oldval.t, val)) return oldval.t;
- assume.i = oldval.i;
- newval.t = op.apply(assume.t, val);
- oldval.i = Kokkos::atomic_compare_exchange((int*)dest, assume.i, newval.i);
- } while (assume.i != oldval.i);
-
- return newval.t;
-}
-
-template <class Oper, typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_oper(
- const Oper& op, volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T> val) {
-#ifdef KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- T return_val = *dest;
- *dest = op.apply(return_val, val);
- Kokkos::memory_fence();
- Impl::unlock_address_host_space((void*)dest);
- return return_val;
-#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA)
- // This is a way to (hopefully) avoid dead lock in a warp
- T return_val;
- int done = 0;
- unsigned int mask = __activemask();
- unsigned int active = __ballot_sync(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda_space((void*)dest)) {
- Kokkos::memory_fence();
- return_val = *dest;
- *dest = op.apply(return_val, val);
- Kokkos::memory_fence();
- Impl::unlock_address_cuda_space((void*)dest);
- done = 1;
- }
- }
- done_active = __ballot_sync(mask, done);
- }
- return return_val;
-#elif defined(__HIP_DEVICE_COMPILE__)
- T return_val = *dest;
- int done = 0;
- unsigned int active = __ballot(1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip_space((void*)dest)) {
- return_val = *dest;
- *dest = op.apply(return_val, val);
- Impl::unlock_address_hip_space((void*)dest);
- done = 1;
- }
- }
- done_active = __ballot(done);
- }
- return return_val;
-#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL)
- // FIXME_SYCL
- Kokkos::abort("Not implemented!");
- (void)op;
- (void)dest;
- (void)val;
- return 0;
-#endif
-}
-
-template <class Oper, typename T>
-KOKKOS_INLINE_FUNCTION T
-atomic_oper_fetch(const Oper& op, volatile T* const dest,
- std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
-#if defined(KOKKOS_ENABLE_ASM) && \
- defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST)
- && (sizeof(T) != 16)
-#endif
- ,
- const T>& val) {
-
-#ifdef KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST
- while (!Impl::lock_address_host_space((void*)dest))
- ;
- Kokkos::memory_fence();
- T return_val = op.apply(*dest, val);
- *dest = return_val;
- Kokkos::memory_fence();
- Impl::unlock_address_host_space((void*)dest);
- return return_val;
-#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA)
- T return_val;
- // This is a way to (hopefully) avoid dead lock in a warp
- int done = 0;
- unsigned int mask = __activemask();
- unsigned int active = __ballot_sync(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda_space((void*)dest)) {
- Kokkos::memory_fence();
- return_val = op.apply(*dest, val);
- *dest = return_val;
- Kokkos::memory_fence();
- Impl::unlock_address_cuda_space((void*)dest);
- done = 1;
- }
- }
- done_active = __ballot_sync(mask, done);
- }
- return return_val;
-#elif defined(__HIP_DEVICE_COMPILE__)
- T return_val;
- int done = 0;
- unsigned int active = __ballot(1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip_space((void*)dest)) {
- return_val = op.apply(*dest, val);
- *dest = return_val;
- Impl::unlock_address_hip_space((void*)dest);
- done = 1;
- }
- }
- done_active = __ballot(done);
- }
- return return_val;
-#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL)
- // FIXME_SYCL
- std::abort();
- (void)op;
- (void)dest;
- (void)val;
- return 0;
-#endif
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-
-// Fetch_Oper atomics: return value before operation
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_max(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::MaxOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_min(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::MinOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_mul(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::MulOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_div(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::DivOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_mod(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::ModOper<T, const T>(), dest, val);
-}
-
-#if !defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_and(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::AndOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_or(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::OrOper<T, const T>(), dest, val);
-}
-
-#endif
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_xor(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::XorOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_lshift(volatile T* const dest,
- const unsigned int val) {
- return Impl::atomic_fetch_oper(Impl::LShiftOper<T, const unsigned int>(),
- dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_rshift(volatile T* const dest,
- const unsigned int val) {
- return Impl::atomic_fetch_oper(Impl::RShiftOper<T, const unsigned int>(),
- dest, val);
-}
-
-// Oper Fetch atomics: return value after operation
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_max_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::MaxOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_min_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::MinOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_mul_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::MulOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_div_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::DivOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_mod_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::ModOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_and_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::AndOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_or_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::OrOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_xor_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::XorOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_lshift_fetch(volatile T* const dest,
- const unsigned int val) {
- return Impl::atomic_oper_fetch(Impl::LShiftOper<T, const unsigned int>(),
- dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_rshift_fetch(volatile T* const dest,
- const unsigned int val) {
- return Impl::atomic_oper_fetch(Impl::RShiftOper<T, const unsigned int>(),
- dest, val);
-}
-
-#ifdef _WIN32
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_add_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::AddOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_sub_fetch(volatile T* const dest, const T val) {
- return Impl::atomic_oper_fetch(Impl::SubOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_add(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::AddOper<T, const T>(), dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_fetch_sub(volatile T* const dest, const T val) {
- return Impl::atomic_fetch_oper(Impl::SubOper<T, const T>(), dest, val);
-}
-#endif
-
-} // namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_GENERIC_SECONDARY_HPP)
-#define KOKKOS_ATOMIC_GENERIC_SECONDARY_HPP
-#include <Kokkos_Macros.hpp>
-
-namespace Kokkos {
-
-#ifndef KOKKOS_ENABLE_SERIAL_ATOMICS
-template <typename T>
-KOKKOS_INLINE_FUNCTION T atomic_exchange(volatile T* const dest, const T val) {
- T oldval = *dest;
- T assume;
- do {
- assume = oldval;
- oldval = atomic_compare_exchange(dest, assume, val);
- } while (assume != oldval);
-
- return oldval;
-}
-#endif
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_add(volatile T* const dest, const T val) {
- (void)atomic_fetch_add(dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_sub(volatile T* const dest, const T val) {
- (void)atomic_fetch_sub(dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_mul(volatile T* const dest, const T val) {
- (void)atomic_fetch_mul(dest, val);
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_div(volatile T* const dest, const T val) {
- (void)atomic_fetch_div(dest, val);
-}
-
-} // namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
-#include <xmmintrin.h>
-#endif
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_INCREMENT_HPP)
-#define KOKKOS_ATOMIC_INCREMENT_HPP
-
-namespace Kokkos {
-
-// Atomic increment
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_increment<char>(volatile char* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock incb %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- char* a_nv = const_cast<char*>(a);
- ++(*a_nv);
-#else
- Kokkos::atomic_fetch_add(a, char(1));
-#endif
-}
-
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_increment<short>(volatile short* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock incw %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- short* a_nv = const_cast<short*>(a);
- ++(*a_nv);
-#else
- Kokkos::atomic_fetch_add(a, short(1));
-#endif
-}
-
-#ifndef _WIN32
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_increment<int>(volatile int* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock incl %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- int* a_nv = const_cast<int*>(a);
- ++(*a_nv);
-#else
- Kokkos::atomic_fetch_add(a, int(1));
-#endif
-}
-#endif
-
-template <>
-KOKKOS_INLINE_FUNCTION void atomic_increment<long long int>(
- volatile long long int* a) {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
- !defined(_WIN32) && !defined(__CUDA_ARCH__)
-#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
- _mm_prefetch((const char*)a, _MM_HINT_ET0);
-#endif
- __asm__ __volatile__("lock incq %0"
- : /* no output registers */
- : "m"(a[0])
- : "memory");
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- long long int* a_nv = const_cast<long long int*>(a);
- ++(*a_nv);
-#else
- using T = long long int;
- Kokkos::atomic_fetch_add(a, T(1));
-#endif
-}
-
-template <typename T>
-KOKKOS_INLINE_FUNCTION void atomic_increment(volatile T* a) {
-#if defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
- T* a_nv = const_cast<T*>(a);
- ++(*a_nv);
-#else
- Kokkos::atomic_fetch_add(a, T(1));
-#endif
-}
-
-} // End of namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_KOKKOS_ATOMIC_LOAD_HPP
-#define KOKKOS_IMPL_KOKKOS_ATOMIC_LOAD_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP)
-
-#include <impl/Kokkos_Atomic_Memory_Order.hpp>
-#include <impl/Kokkos_Atomic_Generic.hpp>
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp>
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-// Olivier's implementation helpfully binds to the same builtins as GNU, so
-// we make this code common across multiple options
-#if (defined(KOKKOS_ENABLE_GNU_ATOMICS) && !defined(__CUDA_ARCH__)) || \
- (defined(KOKKOS_ENABLE_INTEL_ATOMICS) && !defined(__CUDA_ARCH__)) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
-
-#if defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
-#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH __inline__ __device__
-#else
-#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH inline
-#endif
-
-template <class T, class MemoryOrder>
-KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH T _atomic_load(
- T* ptr, MemoryOrder,
- std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
- sizeof(T) == 8) &&
- std::is_same<typename MemoryOrder::memory_order,
- std::remove_cv_t<MemoryOrder>>::value,
- void const**> = nullptr) {
- return __atomic_load_n(ptr, MemoryOrder::gnu_constant);
-}
-
-template <class T, class MemoryOrder>
-KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH T _atomic_load(
- T* ptr, MemoryOrder,
- std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
- sizeof(T) == 8) &&
- std::is_default_constructible<T>::value &&
- std::is_same<typename MemoryOrder::memory_order,
- std::remove_cv_t<MemoryOrder>>::value,
- void const**> = nullptr) {
- T rv{};
- __atomic_load(ptr, &rv, MemoryOrder::gnu_constant);
- return rv;
-}
-
-#undef KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH
-
-#elif defined(__CUDA_ARCH__)
-
-// Not compiling for Volta or later, or Cuda ASM atomics were manually disabled
-
-template <class T>
-__device__ __inline__ T _relaxed_atomic_load_impl(
- T* ptr, std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 ||
- sizeof(T) == 4 || sizeof(T) == 8),
- void const**> = nullptr) {
- return *ptr;
-}
-
-template <class T>
-struct NoOpOper {
- __device__ __inline__ static constexpr T apply(T const& t,
- T const&) noexcept {
- return t;
- }
-};
-
-template <class T>
-__device__ __inline__ T _relaxed_atomic_load_impl(
- T* ptr, std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 ||
- sizeof(T) == 4 || sizeof(T) == 8),
- void const**> = nullptr) {
- T rv{};
- // TODO remove a copy operation here?
- return Kokkos::Impl::atomic_oper_fetch(NoOpOper<T>{}, ptr, rv);
-}
-
-template <class T>
-__device__ __inline__ T _atomic_load(T* ptr, memory_order_seq_cst_t) {
- Kokkos::memory_fence();
- T rv = Impl::_relaxed_atomic_load_impl(ptr);
- Kokkos::memory_fence();
- return rv;
-}
-
-template <class T>
-__device__ __inline__ T _atomic_load(T* ptr, memory_order_acquire_t) {
- T rv = Impl::_relaxed_atomic_load_impl(ptr);
- Kokkos::memory_fence();
- return rv;
-}
-
-template <class T>
-__device__ __inline__ T _atomic_load(T* ptr, memory_order_relaxed_t) {
- return _relaxed_atomic_load_impl(ptr);
-}
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <class T, class MemoryOrder>
-inline T _atomic_load(T* ptr, MemoryOrder) {
- // AFAICT, all OpenMP atomics are sequentially consistent, so memory order
- // doesn't matter
- T retval{};
-#pragma omp atomic read
- { retval = *ptr; }
- return retval;
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <class T, class MemoryOrder>
-inline T _atomic_load(T* ptr, MemoryOrder) {
- return *ptr;
-}
-
-#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
-
-template <class T, class MemoryOrder>
-inline T _atomic_load(T* ptr, MemoryOrder) {
- atomic_compare_exchange(ptr, 0, 0);
- return *ptr;
-}
-
-#endif // end of all atomic implementations
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr,
- Impl::memory_order_seq_cst_t) {
- return _atomic_load(ptr, Impl::memory_order_seq_cst);
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr,
- Impl::memory_order_acquire_t) {
- return _atomic_load(ptr, Impl::memory_order_acquire);
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr,
- Impl::memory_order_relaxed_t) {
- return _atomic_load(ptr, Impl::memory_order_relaxed);
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* /*ptr*/,
- Impl::memory_order_release_t) {
- static_assert(
- sizeof(T) == 0, // just something that will always be false, but only on
- // instantiation
- "atomic_load with memory order release doesn't make any sense!");
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* /*ptr*/,
- Impl::memory_order_acq_rel_t) {
- static_assert(
- sizeof(T) == 0, // just something that will always be false, but only on
- // instantiation
- "atomic_load with memory order acq_rel doesn't make any sense!");
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr) {
- // relaxed by default!
- return _atomic_load(ptr, Impl::memory_order_relaxed);
-}
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp>
-#endif
-
-#endif // defined(KOKKOS_ATOMIC_HPP)
-#endif // KOKKOS_IMPL_KOKKOS_ATOMIC_LOAD_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_KOKKOS_ATOMIC_MEMORY_ORDER_HPP
-#define KOKKOS_KOKKOS_ATOMIC_MEMORY_ORDER_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#include <atomic>
-
-namespace Kokkos {
-namespace Impl {
-
-/** @file
- * Provides strongly-typed analogs of the standard memory order enumerators.
- * In addition to (very slightly) reducing the constant propagation burden on
- * the compiler, this allows us to give compile-time errors for things that
- * don't make sense, like atomic_load with memory order release.
- */
-
-struct memory_order_seq_cst_t {
- using memory_order = memory_order_seq_cst_t;
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || \
- defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
- static constexpr auto gnu_constant = __ATOMIC_SEQ_CST;
-#endif
- static constexpr auto std_constant = std::memory_order_seq_cst;
-};
-constexpr memory_order_seq_cst_t memory_order_seq_cst = {};
-
-struct memory_order_relaxed_t {
- using memory_order = memory_order_relaxed_t;
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || \
- defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
- static constexpr auto gnu_constant = __ATOMIC_RELAXED;
-#endif
- static constexpr auto std_constant = std::memory_order_relaxed;
-};
-constexpr memory_order_relaxed_t memory_order_relaxed = {};
-
-struct memory_order_acquire_t {
- using memory_order = memory_order_acquire_t;
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || \
- defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
- static constexpr auto gnu_constant = __ATOMIC_ACQUIRE;
-#endif
- static constexpr auto std_constant = std::memory_order_acquire;
-};
-constexpr memory_order_acquire_t memory_order_acquire = {};
-
-struct memory_order_release_t {
- using memory_order = memory_order_release_t;
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || \
- defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
- static constexpr auto gnu_constant = __ATOMIC_RELEASE;
-#endif
- static constexpr auto std_constant = std::memory_order_release;
-};
-constexpr memory_order_release_t memory_order_release = {};
-
-struct memory_order_acq_rel_t {
- using memory_order = memory_order_acq_rel_t;
-#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || \
- defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
- static constexpr auto gnu_constant = __ATOMIC_ACQ_REL;
-#endif
- static constexpr auto std_constant = std::memory_order_acq_rel;
-};
-constexpr memory_order_acq_rel_t memory_order_acq_rel = {};
-
-// Intentionally omit consume (for now)
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif // KOKKOS_KOKKOS_ATOMIC_MEMORY_ORDER_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_MINMAX_HPP)
-#define KOKKOS_ATOMIC_MINMAX_HPP
-
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
-
-// Support for int, unsigned int, unsigned long long int, and float
-
-// Atomic_fetch_{min,max}
-
-#ifdef KOKKOS_IMPL_CUDA_CLANG_WORKAROUND
-
-// Host implementations for CLANG compiler
-
-inline __host__ int atomic_fetch_min(volatile int* const dest, const int val) {
- return Impl::atomic_fetch_oper(Impl::MinOper<const int, const int>(), dest,
- val);
-}
-
-inline __host__ unsigned int atomic_fetch_min(volatile unsigned int* const dest,
- const unsigned int val) {
- return Impl::atomic_fetch_oper(
- Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __host__ unsigned long long int atomic_fetch_min(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_fetch_oper(Impl::MinOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-
-inline __host__ int atomic_fetch_max(volatile int* const dest, const int val) {
- return Impl::atomic_fetch_oper(Impl::MaxOper<const int, const int>(), dest,
- val);
-}
-
-inline __host__ unsigned int atomic_fetch_max(volatile unsigned int* const dest,
- const unsigned int val) {
- return Impl::atomic_fetch_oper(
- Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __host__ unsigned long long int atomic_fetch_max(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_fetch_oper(Impl::MaxOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-
-#endif
-
-#if (350 > __CUDA_ARCH__)
-
-// Fallback for atomic{Min,Max} for Kepler
-
-inline __device__ int atomic_fetch_min(volatile int* const dest,
- const int val) {
- return Impl::atomic_fetch_oper(Impl::MinOper<const int, const int>(), dest,
- val);
-}
-
-inline __device__ unsigned int atomic_fetch_min(
- volatile unsigned int* const dest, const unsigned int val) {
- return Impl::atomic_fetch_oper(
- Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __device__ unsigned long long int atomic_fetch_min(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_fetch_oper(Impl::MinOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-
-inline __device__ int atomic_fetch_max(volatile int* const dest,
- const int val) {
- return Impl::atomic_fetch_oper(Impl::MaxOper<const int, const int>(), dest,
- val);
-}
-
-inline __device__ unsigned int atomic_fetch_max(
- volatile unsigned int* const dest, const unsigned int val) {
- return Impl::atomic_fetch_oper(
- Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __device__ unsigned long long int atomic_fetch_max(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_fetch_oper(Impl::MaxOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-
-#else // Supported by devices of compute capability 3.5 and higher
-
-inline __device__ int atomic_fetch_min(volatile int* const dest,
- const int val) {
- return atomicMin((int*)dest, val);
-}
-
-inline __device__ unsigned int atomic_fetch_min(
- volatile unsigned int* const dest, const unsigned int val) {
- return atomicMin((unsigned int*)dest, val);
-}
-
-inline __device__ unsigned long long int atomic_fetch_min(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return atomicMin((unsigned long long int*)dest, val);
-}
-
-inline __device__ int atomic_fetch_max(volatile int* const dest,
- const int val) {
- return atomicMax((int*)dest, val);
-}
-
-inline __device__ unsigned int atomic_fetch_max(
- volatile unsigned int* const dest, const unsigned int val) {
- return atomicMax((unsigned int*)dest, val);
-}
-
-inline __device__ unsigned long long int atomic_fetch_max(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return atomicMax((unsigned long long int*)dest, val);
-}
-
-#endif
-
-// Atomic_{min,max}_fetch
-
-#ifdef KOKKOS_IMPL_CUDA_CLANG_WORKAROUND
-
-// Host implementations for CLANG compiler
-
-inline __host__ int atomic_min_fetch(volatile int* const dest, const int val) {
- return Impl::atomic_oper_fetch(Impl::MinOper<const int, const int>(), dest,
- val);
-}
-
-inline __host__ unsigned int atomic_min_fetch(volatile unsigned int* const dest,
- const unsigned int val) {
- return Impl::atomic_oper_fetch(
- Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __host__ unsigned long long int atomic_min_fetch(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_oper_fetch(Impl::MinOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-
-inline __host__ int atomic_max_fetch(volatile int* const dest, const int val) {
- return Impl::atomic_oper_fetch(Impl::MaxOper<const int, const int>(), dest,
- val);
-}
-
-inline __host__ unsigned int atomic_max_fetch(volatile unsigned int* const dest,
- const unsigned int val) {
- return Impl::atomic_oper_fetch(
- Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __host__ unsigned long long int atomic_max_fetch(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_oper_fetch(Impl::MaxOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-#endif
-
-#if (350 > __CUDA_ARCH__)
-
-// Fallback for atomic{Min,Max} for Kepler
-
-inline __device__ int atomic_min_fetch(volatile int* const dest,
- const int val) {
- return Impl::atomic_oper_fetch(Impl::MinOper<const int, const int>(), dest,
- val);
-}
-
-inline __device__ unsigned int atomic_min_fetch(
- volatile unsigned int* const dest, const unsigned int val) {
- return Impl::atomic_oper_fetch(
- Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __device__ unsigned long long int atomic_min_fetch(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_oper_fetch(Impl::MinOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-
-inline __device__ int atomic_max_fetch(volatile int* const dest,
- const int val) {
- return Impl::atomic_oper_fetch(Impl::MaxOper<const int, const int>(), dest,
- val);
-}
-
-inline __device__ unsigned int atomic_max_fetch(
- volatile unsigned int* const dest, const unsigned int val) {
- return Impl::atomic_oper_fetch(
- Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
-}
-
-inline __device__ unsigned long long int atomic_max_fetch(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- return Impl::atomic_oper_fetch(Impl::MaxOper<const unsigned long long int,
- const unsigned long long int>(),
- dest, val);
-}
-
-#else // Supported by devices of compute capability 3.5 and higher
-
-inline __device__ int atomic_min_fetch(volatile int* const dest,
- const int val) {
- const int old = atomicMin((int*)dest, val);
- return old < val ? old : val;
-}
-
-inline __device__ unsigned int atomic_min_fetch(
- volatile unsigned int* const dest, const unsigned int val) {
- const unsigned int old = atomicMin((unsigned int*)dest, val);
- return old < val ? old : val;
-}
-
-inline __device__ unsigned long long int atomic_min_fetch(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- const unsigned long long old = atomicMin((unsigned long long*)dest, val);
- return old < val ? old : val;
-}
-
-inline __device__ int atomic_max_fetch(volatile int* const dest,
- const int val) {
- const int old = atomicMax((int*)dest, val);
- return old >= val ? old : val;
-}
-
-inline __device__ unsigned int atomic_max_fetch(
- volatile unsigned int* const dest, const unsigned int val) {
- const unsigned int old = atomicMax((unsigned int*)dest, val);
- return old >= val ? old : val;
-}
-
-inline __device__ unsigned long long int atomic_max_fetch(
- volatile unsigned long long int* const dest,
- const unsigned long long int val) {
- const unsigned long long old = atomicMax((unsigned long long*)dest, val);
- return old >= val ? old : val;
-}
-
-#endif
-
-#endif
-#endif
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_KOKKOS_ATOMIC_STORE_HPP
-#define KOKKOS_IMPL_KOKKOS_ATOMIC_STORE_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP)
-
-#include <impl/Kokkos_Atomic_Memory_Order.hpp>
-#include <impl/Kokkos_Atomic_Generic.hpp>
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp>
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-// Olivier's implementation helpfully binds to the same builtins as GNU, so
-// we make this code common across multiple options
-#if (defined(KOKKOS_ENABLE_GNU_ATOMICS) && !defined(__CUDA_ARCH__)) || \
- (defined(KOKKOS_ENABLE_INTEL_ATOMICS) && !defined(__CUDA_ARCH__)) || \
- defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
-
-#if defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
-#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH __inline__ __device__
-#else
-#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH inline
-#endif
-
-template <class T, class MemoryOrder>
-KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH void _atomic_store(
- T* ptr, T val, MemoryOrder,
- std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
- sizeof(T) == 8) &&
- std::is_same<typename MemoryOrder::memory_order,
- std::remove_cv_t<MemoryOrder>>::value,
- void const**> = nullptr) {
- __atomic_store_n(ptr, val, MemoryOrder::gnu_constant);
-}
-
-template <class T, class MemoryOrder>
-KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH void _atomic_store(
- T* ptr, T val, MemoryOrder,
- std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
- sizeof(T) == 8) &&
- std::is_default_constructible<T>::value &&
- std::is_same<typename MemoryOrder::memory_order,
- std::remove_cv_t<MemoryOrder>>::value,
- void const**> = nullptr) {
- __atomic_store(ptr, &val, MemoryOrder::gnu_constant);
-}
-
-#undef KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH
-
-#elif defined(__CUDA_ARCH__)
-
-// Not compiling for Volta or later, or Cuda ASM atomics were manually disabled
-
-template <class T>
-__device__ __inline__ void _relaxed_atomic_store_impl(
- T* ptr, T val,
- std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
- sizeof(T) == 8),
- void const**> = nullptr) {
- *ptr = val;
-}
-
-template <class T>
-struct StoreOper {
- __device__ __inline__ static constexpr T apply(T const&,
- T const& val) noexcept {
- return val;
- }
-};
-
-template <class T>
-__device__ __inline__ void _relaxed_atomic_store_impl(
- T* ptr, T val,
- std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
- sizeof(T) == 8),
- void const**> = nullptr) {
- Kokkos::Impl::atomic_oper_fetch(StoreOper<T>{}, ptr, (T &&) val);
-}
-
-template <class T>
-__device__ __inline__ void _atomic_store(T* ptr, T val,
- memory_order_seq_cst_t) {
- Kokkos::memory_fence();
- Impl::_relaxed_atomic_store_impl(ptr, val);
- Kokkos::memory_fence();
-}
-
-template <class T>
-__device__ __inline__ void _atomic_store(T* ptr, T val,
- memory_order_release_t) {
- Kokkos::memory_fence();
- _relaxed_atomic_store_impl(ptr, val);
-}
-
-template <class T>
-__device__ __inline__ void _atomic_store(T* ptr, T val,
- memory_order_relaxed_t) {
- _relaxed_atomic_store_impl(ptr, val);
-}
-
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-
-template <class T, class MemoryOrder>
-inline void _atomic_store(T* ptr, T val, MemoryOrder) {
- // AFAICT, all OpenMP atomics are sequentially consistent, so memory order
- // doesn't matter
-#pragma omp atomic write
- { *ptr = val; }
-}
-
-#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-
-template <class T, class MemoryOrder>
-inline void _atomic_store(T* ptr, T val, MemoryOrder) {
- *ptr = val;
-}
-
-#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
-
-template <class T, class MemoryOrder>
-inline void _atomic_store(T* ptr, T val, MemoryOrder) {
- atomic_exchange(ptr, val);
-}
-
-#endif // end of all atomic implementations
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val,
- Impl::memory_order_seq_cst_t) {
- _atomic_store(ptr, val, Impl::memory_order_seq_cst);
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val,
- Impl::memory_order_release_t) {
- _atomic_store(ptr, val, Impl::memory_order_release);
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val,
- Impl::memory_order_relaxed_t) {
- _atomic_store(ptr, val, Impl::memory_order_relaxed);
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* /*ptr*/, T /*val*/,
- Impl::memory_order_acquire_t) {
- static_assert(
- sizeof(T) == 0, // just something that will always be false, but only on
- // instantiation
- "atomic_store with memory order acquire doesn't make any sense!");
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* /*ptr*/, T /*val*/,
- Impl::memory_order_acq_rel_t) {
- static_assert(
- sizeof(T) == 0, // just something that will always be false, but only on
- // instantiation
- "atomic_store with memory order acq_rel doesn't make any sense!");
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val) {
- // relaxed by default!
- _atomic_store(ptr, val, Impl::memory_order_relaxed);
-}
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#if defined(KOKKOS_ENABLE_CUDA)
-#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp>
-#endif
-
-#endif // defined(KOKKOS_ATOMIC_HPP)
-#endif // KOKKOS_IMPL_KOKKOS_ATOMIC_STORE_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_ATOMIC_VIEW_HPP
-#define KOKKOS_ATOMIC_VIEW_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <Kokkos_Atomic.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-// The following tag is used to prevent an implicit call of the constructor when
-// trying to assign a literal 0 int ( = 0 );
-struct AtomicViewConstTag {};
-
-template <class ViewTraits>
-class AtomicDataElement {
- public:
- using value_type = typename ViewTraits::value_type;
- using const_value_type = typename ViewTraits::const_value_type;
- using non_const_value_type = typename ViewTraits::non_const_value_type;
- volatile value_type* const ptr;
-
- KOKKOS_INLINE_FUNCTION
- AtomicDataElement(value_type* ptr_, AtomicViewConstTag) : ptr(ptr_) {}
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator=(const_value_type& val) const {
- *ptr = val;
- return val;
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator=(volatile const_value_type& val) const {
- *ptr = val;
- return val;
- }
-
- KOKKOS_INLINE_FUNCTION
- void inc() const { Kokkos::atomic_increment(ptr); }
-
- KOKKOS_INLINE_FUNCTION
- void dec() const { Kokkos::atomic_decrement(ptr); }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator++() const {
- const_value_type tmp =
- Kokkos::atomic_fetch_add(ptr, non_const_value_type(1));
- return tmp + 1;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator--() const {
- const_value_type tmp =
- Kokkos::atomic_fetch_sub(ptr, non_const_value_type(1));
- return tmp - 1;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator++(int) const {
- return Kokkos::atomic_fetch_add(ptr, non_const_value_type(1));
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator--(int) const {
- return Kokkos::atomic_fetch_sub(ptr, non_const_value_type(1));
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator+=(const_value_type& val) const {
- const_value_type tmp = Kokkos::atomic_fetch_add(ptr, val);
- return tmp + val;
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator+=(volatile const_value_type& val) const {
- const_value_type tmp = Kokkos::atomic_fetch_add(ptr, val);
- return tmp + val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator-=(const_value_type& val) const {
- const_value_type tmp = Kokkos::atomic_fetch_sub(ptr, val);
- return tmp - val;
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator-=(volatile const_value_type& val) const {
- const_value_type tmp = Kokkos::atomic_fetch_sub(ptr, val);
- return tmp - val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator*=(const_value_type& val) const {
- return Kokkos::atomic_mul_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator*=(volatile const_value_type& val) const {
- return Kokkos::atomic_mul_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator/=(const_value_type& val) const {
- return Kokkos::atomic_div_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator/=(volatile const_value_type& val) const {
- return Kokkos::atomic_div_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator%=(const_value_type& val) const {
- return Kokkos::atomic_mod_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator%=(volatile const_value_type& val) const {
- return Kokkos::atomic_mod_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator&=(const_value_type& val) const {
- return Kokkos::atomic_and_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator&=(volatile const_value_type& val) const {
- return Kokkos::atomic_and_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator^=(const_value_type& val) const {
- return Kokkos::atomic_xor_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator^=(volatile const_value_type& val) const {
- return Kokkos::atomic_xor_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator|=(const_value_type& val) const {
- return Kokkos::atomic_or_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator|=(volatile const_value_type& val) const {
- return Kokkos::atomic_or_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator<<=(const_value_type& val) const {
- return Kokkos::atomic_lshift_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator<<=(volatile const_value_type& val) const {
- return Kokkos::atomic_lshift_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator>>=(const_value_type& val) const {
- return Kokkos::atomic_rshift_fetch(ptr, val);
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator>>=(volatile const_value_type& val) const {
- return Kokkos::atomic_rshift_fetch(ptr, val);
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator+(const_value_type& val) const { return *ptr + val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator+(volatile const_value_type& val) const {
- return *ptr + val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator-(const_value_type& val) const { return *ptr - val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator-(volatile const_value_type& val) const {
- return *ptr - val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator*(const_value_type& val) const { return *ptr * val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator*(volatile const_value_type& val) const {
- return *ptr * val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator/(const_value_type& val) const { return *ptr / val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator/(volatile const_value_type& val) const {
- return *ptr / val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator%(const_value_type& val) const { return *ptr ^ val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator%(volatile const_value_type& val) const {
- return *ptr ^ val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator!() const { return !*ptr; }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator&&(const_value_type& val) const {
- return *ptr && val;
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator&&(volatile const_value_type& val) const {
- return *ptr && val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator||(const_value_type& val) const {
- return *ptr | val;
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator||(volatile const_value_type& val) const {
- return *ptr | val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator&(const_value_type& val) const { return *ptr & val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator&(volatile const_value_type& val) const {
- return *ptr & val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator|(const_value_type& val) const { return *ptr | val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator|(volatile const_value_type& val) const {
- return *ptr | val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator^(const_value_type& val) const { return *ptr ^ val; }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator^(volatile const_value_type& val) const {
- return *ptr ^ val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator~() const { return ~*ptr; }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator<<(const unsigned int& val) const {
- return *ptr << val;
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator<<(volatile const unsigned int& val) const {
- return *ptr << val;
- }
-
- KOKKOS_INLINE_FUNCTION
- const_value_type operator>>(const unsigned int& val) const {
- return *ptr >> val;
- }
- KOKKOS_INLINE_FUNCTION
- const_value_type operator>>(volatile const unsigned int& val) const {
- return *ptr >> val;
- }
-
- KOKKOS_INLINE_FUNCTION
- bool operator==(const AtomicDataElement& val) const { return *ptr == val; }
- KOKKOS_INLINE_FUNCTION
- bool operator==(volatile const AtomicDataElement& val) const {
- return *ptr == val;
- }
-
- KOKKOS_INLINE_FUNCTION
- bool operator!=(const AtomicDataElement& val) const { return *ptr != val; }
- KOKKOS_INLINE_FUNCTION
- bool operator!=(volatile const AtomicDataElement& val) const {
- return *ptr != val;
- }
-
- KOKKOS_INLINE_FUNCTION
- bool operator>=(const_value_type& val) const { return *ptr >= val; }
- KOKKOS_INLINE_FUNCTION
- bool operator>=(volatile const_value_type& val) const { return *ptr >= val; }
-
- KOKKOS_INLINE_FUNCTION
- bool operator<=(const_value_type& val) const { return *ptr <= val; }
- KOKKOS_INLINE_FUNCTION
- bool operator<=(volatile const_value_type& val) const { return *ptr <= val; }
-
- KOKKOS_INLINE_FUNCTION
- bool operator<(const_value_type& val) const { return *ptr < val; }
- KOKKOS_INLINE_FUNCTION
- bool operator<(volatile const_value_type& val) const { return *ptr < val; }
-
- KOKKOS_INLINE_FUNCTION
- bool operator>(const_value_type& val) const { return *ptr > val; }
- KOKKOS_INLINE_FUNCTION
- bool operator>(volatile const_value_type& val) const { return *ptr > val; }
-
- KOKKOS_INLINE_FUNCTION
- operator const_value_type() const {
- // return Kokkos::atomic_load(ptr);
- return *ptr;
- }
-
- KOKKOS_INLINE_FUNCTION
- operator non_const_value_type() volatile const {
- return Kokkos::Impl::atomic_load(ptr);
- }
-};
-
-template <class ViewTraits>
-class AtomicViewDataHandle {
- public:
- typename ViewTraits::value_type* ptr;
-
- KOKKOS_INLINE_FUNCTION
- AtomicViewDataHandle() : ptr(nullptr) {}
-
- KOKKOS_INLINE_FUNCTION
- AtomicViewDataHandle(typename ViewTraits::value_type* ptr_) : ptr(ptr_) {}
-
- template <class iType>
- KOKKOS_INLINE_FUNCTION AtomicDataElement<ViewTraits> operator[](
- const iType& i) const {
- return AtomicDataElement<ViewTraits>(ptr + i, AtomicViewConstTag());
- }
-
- KOKKOS_INLINE_FUNCTION
- operator typename ViewTraits::value_type*() const { return ptr; }
-};
-
-template <unsigned Size>
-struct Kokkos_Atomic_is_only_allowed_with_32bit_and_64bit_scalars;
-
-template <>
-struct Kokkos_Atomic_is_only_allowed_with_32bit_and_64bit_scalars<4> {
- using type = int;
-};
-
-template <>
-struct Kokkos_Atomic_is_only_allowed_with_32bit_and_64bit_scalars<8> {
- using type = int64_t;
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-#ifndef KOKKOS_ATOMIC_WINDOWS_HPP
-#define KOKKOS_ATOMIC_WINDOWS_HPP
-
-#ifdef _WIN32
-
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-#include <winsock2.h>
-#include <windows.h>
-
-namespace Kokkos {
-namespace Impl {
-#ifdef _MSC_VER
-_declspec(align(16))
-#endif
- struct cas128_t {
- LONGLONG lower;
- LONGLONG upper;
- KOKKOS_INLINE_FUNCTION
- bool operator!=(const cas128_t& a) const {
- return (lower != a.lower) || upper != a.upper;
- }
-}
-#if defined(__GNUC__) || defined(__clang__)
-__attribute__((aligned(16)))
-#endif
-;
-} // namespace Impl
-
-#if !defined(__CUDA_ARCH__) || defined(__clang__)
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(CHAR), const T&> val) {
- union U {
- CHAR i;
- T t;
- KOKKOS_INLINE_FUNCTION U(){};
- } tmp;
-
- tmp.i = _InterlockedCompareExchange8((CHAR*)dest, *((CHAR*)&val),
- *((CHAR*)&compare));
- return tmp.t;
-}
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(SHORT), const T&> val) {
- union U {
- SHORT i;
- T t;
- KOKKOS_INLINE_FUNCTION U(){};
- } tmp;
-
- tmp.i = _InterlockedCompareExchange16((SHORT*)dest, *((SHORT*)&val),
- *((SHORT*)&compare));
- return tmp.t;
-}
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(LONG), const T&> val) {
- union U {
- LONG i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
- tmp.i = _InterlockedCompareExchange((LONG*)dest, *((LONG*)&val),
- *((LONG*)&compare));
- return tmp.t;
-}
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(LONGLONG), const T&> val) {
- union U {
- LONGLONG i;
- T t;
- KOKKOS_INLINE_FUNCTION U() {}
- } tmp;
-
- tmp.i = _InterlockedCompareExchange64((LONGLONG*)dest, *((LONGLONG*)&val),
- *((LONGLONG*)&compare));
- return tmp.t;
-}
-
-template <typename T>
-inline T atomic_compare_exchange(
- volatile T* const dest, const T& compare,
- std::enable_if_t<sizeof(T) == sizeof(Impl::cas128_t), const T&> val) {
- T compare_and_result(compare);
- union U {
- Impl::cas128_t i;
- T t;
- KOKKOS_INLINE_FUNCTION U(){};
- } newval;
- newval.t = val;
- _InterlockedCompareExchange128((LONGLONG*)dest, newval.i.upper,
- newval.i.lower,
- ((LONGLONG*)&compare_and_result));
- return compare_and_result;
-}
-#endif
-
-} // namespace Kokkos
-#endif
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#ifdef _WIN32
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#elif defined(__APPLE__)
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#else
-#include <unistd.h>
-#endif
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
-#include <cerrno>
-#include <string>
-
-namespace Kokkos {
-namespace Impl {
-
-int processors_per_node() {
-#ifdef _SC_NPROCESSORS_ONLN
- int const num_procs = sysconf(_SC_NPROCESSORS_ONLN);
- int const num_procs_max = sysconf(_SC_NPROCESSORS_CONF);
- if ((num_procs < 1) || (num_procs_max < 1)) {
- return -1;
- }
- return num_procs;
-#elif defined(__APPLE__)
- int ncpu;
- int activecpu;
- size_t size = sizeof(int);
- sysctlbyname("hw.ncpu", &ncpu, &size, nullptr, 0);
- sysctlbyname("hw.activecpu", &activecpu, &size, nullptr, 0);
- if (ncpu < 1 || activecpu < 1)
- return -1;
- else
- return activecpu;
-#else
- return -1;
-#endif
-}
-
-int mpi_ranks_per_node() {
- char *str;
- int ppn = 1;
- // if ((str = getenv("SLURM_TASKS_PER_NODE"))) {
- // ppn = std::stoi(str);
- // if(ppn<=0) ppn = 1;
- //}
- if ((str = getenv("MV2_COMM_WORLD_LOCAL_SIZE"))) {
- ppn = std::stoi(str);
- if (ppn <= 0) ppn = 1;
- }
- if ((str = getenv("OMPI_COMM_WORLD_LOCAL_SIZE"))) {
- ppn = std::stoi(str);
- if (ppn <= 0) ppn = 1;
- }
- return ppn;
-}
-
-int mpi_local_rank_on_node() {
- char *str;
- int local_rank = 0;
- // if ((str = getenv("SLURM_LOCALID"))) {
- // local_rank = std::stoi(str);
- //}
- if ((str = getenv("MV2_COMM_WORLD_LOCAL_RANK"))) {
- local_rank = std::stoi(str);
- }
- if ((str = getenv("OMPI_COMM_WORLD_LOCAL_RANK"))) {
- local_rank = std::stoi(str);
- }
- return local_rank;
-}
-
-} // namespace Impl
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-namespace Kokkos {
-namespace Impl {
-
-int processors_per_node();
-int mpi_ranks_per_node();
-int mpi_local_rank_on_node();
-
-} // namespace Impl
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_COMMAND_LINE_PARSING_HPP
-#define KOKKOS_COMMAND_LINE_PARSING_HPP
-
-#include <string>
-#include <regex>
-
-namespace Kokkos {
-namespace Impl {
-bool is_unsigned_int(const char* str);
-bool check_arg(char const* arg, char const* expected);
-bool check_arg_bool(char const* arg, char const* name, bool& val);
-bool check_arg_int(char const* arg, char const* name, int& val);
-bool check_arg_str(char const* arg, char const* name, std::string& val);
-bool check_env_bool(char const* name, bool& val);
-bool check_env_int(char const* name, int& val);
-void warn_deprecated_environment_variable(std::string deprecated);
-void warn_deprecated_environment_variable(std::string deprecated,
- std::string use_instead);
-void warn_deprecated_command_line_argument(std::string deprecated);
-void warn_deprecated_command_line_argument(std::string deprecated,
- std::string use_instead);
-void warn_not_recognized_command_line_argument(std::string not_recognized);
-void do_not_warn_not_recognized_command_line_argument(std::regex ignore);
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
-#define KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
-
-#include <Kokkos_Macros.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class ExecutionSpace>
-struct GraphNodeKernelDefaultImpl;
-
-template <class ExecutionSpace>
-struct GraphNodeAggregateKernelDefaultImpl;
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif // KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_DEVICE_MANAGEMENT_HPP
-#define KOKKOS_DEVICE_MANAGEMENT_HPP
-
-#include <vector>
-
-namespace Kokkos {
-class InitializationSettings;
-namespace Impl {
-int get_gpu(const Kokkos::InitializationSettings& settings);
-// This declaration is provided for testing purposes only
-int get_ctest_gpu(const char* local_rank_str);
-// ditto
-std::vector<int> get_visible_devices(
- Kokkos::InitializationSettings const& settings, int device_count);
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <cstring>
-#include <cstdlib>
-
-#include <iostream>
-#include <sstream>
-#include <iomanip>
-#include <stdexcept>
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_Stacktrace.hpp>
-#include <Cuda/Kokkos_Cuda_Error.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-void traceback_callstack(std::ostream &msg) {
-#ifdef KOKKOS_IMPL_ENABLE_STACKTRACE
- msg << "\nBacktrace:\n";
- save_stacktrace();
- print_demangled_saved_stacktrace(msg);
-#else
- msg << "\nTraceback functionality not available\n";
-#endif
-}
-
-void throw_runtime_exception(const std::string &msg) {
- throw std::runtime_error(msg);
-}
-
-void host_abort(const char *const message) {
- std::cerr << message;
- traceback_callstack(std::cerr);
- ::abort();
-}
-
-std::string human_memory_size(size_t arg_bytes) {
- double bytes = arg_bytes;
- const double K = 1024;
- const double M = K * 1024;
- const double G = M * 1024;
-
- std::ostringstream out;
- if (bytes < K) {
- out << std::setprecision(4) << bytes << " B";
- } else if (bytes < M) {
- bytes /= K;
- out << std::setprecision(4) << bytes << " K";
- } else if (bytes < G) {
- bytes /= M;
- out << std::setprecision(4) << bytes << " M";
- } else {
- bytes /= G;
- out << std::setprecision(4) << bytes << " G";
- }
- return out.str();
-}
-
-} // namespace Impl
-
-void Experimental::RawMemoryAllocationFailure::print_error_message(
- std::ostream &o) const {
- o << "Allocation of size " << Impl::human_memory_size(m_attempted_size);
- o << " failed";
- switch (m_failure_mode) {
- case FailureMode::OutOfMemoryError:
- o << ", likely due to insufficient memory.";
- break;
- case FailureMode::AllocationNotAligned:
- o << " because the allocation was improperly aligned.";
- break;
- case FailureMode::InvalidAllocationSize:
- o << " because the requested allocation size is not a valid size for the"
- " requested allocation mechanism (it's probably too large).";
- break;
- // TODO move this to the subclass for Cuda-related things
- case FailureMode::MaximumCudaUVMAllocationsExceeded:
- o << " because the maximum Cuda UVM allocations was exceeded.";
- break;
- case FailureMode::Unknown: o << " because of an unknown error."; break;
- }
- o << " (The allocation mechanism was ";
- switch (m_mechanism) {
- case AllocationMechanism::StdMalloc: o << "standard malloc()."; break;
- case AllocationMechanism::PosixMemAlign: o << "posix_memalign()."; break;
- case AllocationMechanism::PosixMMap: o << "POSIX mmap()."; break;
- case AllocationMechanism::IntelMMAlloc:
- o << "the Intel _mm_malloc() intrinsic.";
- break;
- case AllocationMechanism::CudaMalloc: o << "cudaMalloc()."; break;
- case AllocationMechanism::CudaMallocManaged:
- o << "cudaMallocManaged().";
- break;
- case AllocationMechanism::CudaHostAlloc: o << "cudaHostAlloc()."; break;
- case AllocationMechanism::HIPMalloc: o << "hipMalloc()."; break;
- case AllocationMechanism::HIPHostMalloc: o << "hipHostMalloc()."; break;
- case AllocationMechanism::HIPMallocManaged:
- o << "hipMallocManaged().";
- break;
- case AllocationMechanism::SYCLMallocDevice:
- o << "sycl::malloc_device().";
- break;
- case AllocationMechanism::SYCLMallocShared:
- o << "sycl::malloc_shared().";
- break;
- case AllocationMechanism::SYCLMallocHost:
- o << "sycl::malloc_host().";
- break;
- }
- append_additional_error_information(o);
- o << ")" << std::endl;
-}
-
-std::string Experimental::RawMemoryAllocationFailure::get_error_message()
- const {
- std::ostringstream out;
- print_error_message(out);
- return out.str();
-}
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-#ifdef KOKKOS_ENABLE_CUDA
-namespace Experimental {
-
-void CudaRawMemoryAllocationFailure::append_additional_error_information(
- std::ostream &o) const {
- if (m_error_code != cudaSuccess) {
- o << " The Cuda allocation returned the error code \"\""
- << cudaGetErrorName(m_error_code) << "\".";
- }
-}
-
-} // end namespace Experimental
-#endif
-
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_ERROR_HPP
-#define KOKKOS_IMPL_ERROR_HPP
-
-#include <string>
-#include <iosfwd>
-#include <Kokkos_Macros.hpp>
-#ifdef KOKKOS_ENABLE_CUDA
-#include <Cuda/Kokkos_Cuda_abort.hpp>
-#endif
-#ifdef KOKKOS_ENABLE_HIP
-#include <HIP/Kokkos_HIP_Abort.hpp>
-#endif
-#ifdef KOKKOS_ENABLE_SYCL
-#include <SYCL/Kokkos_SYCL_Abort.hpp>
-#endif
-
-#ifndef KOKKOS_ABORT_MESSAGE_BUFFER_SIZE
-#define KOKKOS_ABORT_MESSAGE_BUFFER_SIZE 2048
-#endif // ifndef KOKKOS_ABORT_MESSAGE_BUFFER_SIZE
-
-namespace Kokkos {
-namespace Impl {
-
-[[noreturn]] void host_abort(const char *const);
-
-#if defined(KOKKOS_ENABLE_CUDA) && defined(__CUDA_ARCH__)
-
-#if defined(__APPLE__) || defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
-// cuda_abort does not abort when building for macOS.
-// required to workaround failures in random number generator unit tests with
-// pre-volta architectures
-#define KOKKOS_IMPL_ABORT_NORETURN
-#else
-// cuda_abort aborts when building for other platforms than macOS
-#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
-#endif
-
-#elif defined(KOKKOS_COMPILER_NVHPC)
-
-#define KOKKOS_IMPL_ABORT_NORETURN
-
-#elif defined(KOKKOS_ENABLE_HIP) && defined(__HIP_DEVICE_COMPILE__)
-// HIP aborts
-#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
-#elif defined(KOKKOS_ENABLE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
-// FIXME_SYCL SYCL doesn't abort
-#define KOKKOS_IMPL_ABORT_NORETURN
-#elif !defined(KOKKOS_ENABLE_OPENMPTARGET)
-// Host aborts
-#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
-#else
-// Everything else does not abort
-#define KOKKOS_IMPL_ABORT_NORETURN
-#endif
-
-#ifdef KOKKOS_ENABLE_SYCL // FIXME_SYCL
-#define KOKKOS_IMPL_ABORT_NORETURN_DEVICE
-#else
-#define KOKKOS_IMPL_ABORT_NORETURN_DEVICE KOKKOS_IMPL_ABORT_NORETURN
-#endif
-
-#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
- defined(KOKKOS_ENABLE_SYCL) || defined(KOKKOS_ENABLE_OPENMPTARGET)
-KOKKOS_IMPL_ABORT_NORETURN_DEVICE inline KOKKOS_IMPL_DEVICE_FUNCTION void
-device_abort(const char *const msg) {
-#if defined(KOKKOS_ENABLE_CUDA)
- ::Kokkos::Impl::cuda_abort(msg);
-#elif defined(KOKKOS_ENABLE_HIP)
- ::Kokkos::Impl::hip_abort(msg);
-#elif defined(KOKKOS_ENABLE_SYCL)
- ::Kokkos::Impl::sycl_abort(msg);
-#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
- printf("%s", msg); // FIXME_OPENMPTARGET
-#else
-#error faulty logic
-#endif
-}
-#endif
-
-[[noreturn]] void throw_runtime_exception(const std::string &msg);
-
-void traceback_callstack(std::ostream &);
-
-std::string human_memory_size(size_t arg_bytes);
-
-} // namespace Impl
-
-namespace Experimental {
-
-class RawMemoryAllocationFailure : public std::bad_alloc {
- public:
- enum class FailureMode {
- OutOfMemoryError,
- AllocationNotAligned,
- InvalidAllocationSize,
- MaximumCudaUVMAllocationsExceeded,
- Unknown
- };
- enum class AllocationMechanism {
- StdMalloc,
- PosixMemAlign,
- PosixMMap,
- IntelMMAlloc,
- CudaMalloc,
- CudaMallocManaged,
- CudaHostAlloc,
- HIPMalloc,
- HIPHostMalloc,
- HIPMallocManaged,
- SYCLMallocDevice,
- SYCLMallocShared,
- SYCLMallocHost
- };
-
- private:
- size_t m_attempted_size;
- size_t m_attempted_alignment;
- FailureMode m_failure_mode;
- AllocationMechanism m_mechanism;
-
- public:
- RawMemoryAllocationFailure(
- size_t arg_attempted_size, size_t arg_attempted_alignment,
- FailureMode arg_failure_mode = FailureMode::OutOfMemoryError,
- AllocationMechanism arg_mechanism =
- AllocationMechanism::StdMalloc) noexcept
- : m_attempted_size(arg_attempted_size),
- m_attempted_alignment(arg_attempted_alignment),
- m_failure_mode(arg_failure_mode),
- m_mechanism(arg_mechanism) {}
-
- RawMemoryAllocationFailure() noexcept = delete;
-
- RawMemoryAllocationFailure(RawMemoryAllocationFailure const &) noexcept =
- default;
- RawMemoryAllocationFailure(RawMemoryAllocationFailure &&) noexcept = default;
-
- RawMemoryAllocationFailure &operator =(
- RawMemoryAllocationFailure const &) noexcept = default;
- RawMemoryAllocationFailure &operator =(
- RawMemoryAllocationFailure &&) noexcept = default;
-
- ~RawMemoryAllocationFailure() noexcept override = default;
-
- KOKKOS_ATTRIBUTE_NODISCARD
- const char *what() const noexcept override {
- if (m_failure_mode == FailureMode::OutOfMemoryError) {
- return "Memory allocation error: out of memory";
- } else if (m_failure_mode == FailureMode::AllocationNotAligned) {
- return "Memory allocation error: allocation result was under-aligned";
- }
-
- return nullptr; // unreachable
- }
-
- KOKKOS_ATTRIBUTE_NODISCARD
- size_t attempted_size() const noexcept { return m_attempted_size; }
-
- KOKKOS_ATTRIBUTE_NODISCARD
- size_t attempted_alignment() const noexcept { return m_attempted_alignment; }
-
- KOKKOS_ATTRIBUTE_NODISCARD
- AllocationMechanism allocation_mechanism() const noexcept {
- return m_mechanism;
- }
-
- KOKKOS_ATTRIBUTE_NODISCARD
- FailureMode failure_mode() const noexcept { return m_failure_mode; }
-
- void print_error_message(std::ostream &o) const;
- KOKKOS_ATTRIBUTE_NODISCARD
- std::string get_error_message() const;
-
- virtual void append_additional_error_information(std::ostream &) const {}
-};
-
-} // end namespace Experimental
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-KOKKOS_IMPL_ABORT_NORETURN KOKKOS_INLINE_FUNCTION void abort(
- const char *const message) {
- KOKKOS_IF_ON_HOST(::Kokkos::Impl::host_abort(message);)
- KOKKOS_IF_ON_DEVICE(::Kokkos::Impl::device_abort(message);)
-}
-
-#undef KOKKOS_IMPL_ABORT_NORETURN
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#if !defined(NDEBUG) || defined(KOKKOS_ENFORCE_CONTRACTS) || \
- defined(KOKKOS_ENABLE_DEBUG)
-#define KOKKOS_EXPECTS(...) \
- { \
- if (!bool(__VA_ARGS__)) { \
- ::Kokkos::abort( \
- "Kokkos contract violation:\n " \
- " Expected precondition `" #__VA_ARGS__ \
- "` evaluated false.\n" \
- "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
- __LINE__) " \n"); \
- } \
- }
-#define KOKKOS_ENSURES(...) \
- { \
- if (!bool(__VA_ARGS__)) { \
- ::Kokkos::abort( \
- "Kokkos contract violation:\n " \
- " Ensured postcondition `" #__VA_ARGS__ \
- "` evaluated false.\n" \
- "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
- __LINE__) " \n"); \
- } \
- }
-// some projects already define this for themselves, so don't mess
-// them up
-#ifndef KOKKOS_ASSERT
-#define KOKKOS_ASSERT(...) \
- { \
- if (!bool(__VA_ARGS__)) { \
- ::Kokkos::abort( \
- "Kokkos contract violation:\n " \
- " Asserted condition `" #__VA_ARGS__ \
- "` evaluated false.\n" \
- "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
- __LINE__) " \n"); \
- } \
- }
-#endif // ifndef KOKKOS_ASSERT
-#else // not debug mode
-#define KOKKOS_EXPECTS(...)
-#define KOKKOS_ENSURES(...)
-#ifndef KOKKOS_ASSERT
-#define KOKKOS_ASSERT(...)
-#endif // ifndef KOKKOS_ASSERT
-#endif // end debug mode ifdefs
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #ifndef KOKKOS_IMPL_ERROR_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Core.hpp>
-#include <sstream>
-
-namespace Kokkos {
-namespace Impl {
-PerTeamValue::PerTeamValue(size_t arg) : value(arg) {}
-
-PerThreadValue::PerThreadValue(size_t arg) : value(arg) {}
-} // namespace Impl
-
-Impl::PerTeamValue PerTeam(const size_t& arg) {
- return Impl::PerTeamValue(arg);
-}
-
-Impl::PerThreadValue PerThread(const size_t& arg) {
- return Impl::PerThreadValue(arg);
-}
-
-void team_policy_check_valid_storage_level_argument(int level) {
- if (!(level == 0 || level == 1)) {
- std::stringstream ss;
- ss << "TeamPolicy::set_scratch_size(/*level*/ " << level
- << ", ...) storage level argument must be 0 or 1 to be valid\n";
- Impl::throw_runtime_exception(ss.str());
- }
-}
-
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_EXEC_SPACE_MANAGER_HPP
-#define KOKKOS_EXEC_SPACE_MANAGER_HPP
-
-#include <impl/Kokkos_InitializationSettings.hpp>
-#include <Kokkos_DetectionIdiom.hpp>
-#include <Kokkos_Concepts.hpp>
-
-#include <iosfwd>
-#include <map>
-#include <string>
-
-namespace {
-
-template <class T>
-using public_member_types_t = std::enable_if_t<
- Kokkos::is_execution_space<typename T::execution_space>::value &&
- Kokkos::is_memory_space<typename T::memory_space>::value &&
- Kokkos::is_device<typename T::device_type>::value &&
- Kokkos::is_array_layout<typename T::array_layout>::value &&
- std::is_integral<typename T::size_type>::value &&
- Kokkos::is_memory_space<typename T::scratch_memory_space>::value>;
-
-template <class T>
-using print_configuration_t = std::enable_if_t<
- std::is_void<decltype(std::declval<T const&>().print_configuration(
- std::declval<std::ostream&>()))>::value &&
- std::is_void<decltype(std::declval<T const&>().print_configuration(
- std::declval<std::ostream&>(), false))>::value>;
-
-template <class T>
-using initialize_finalize_t = std::enable_if_t<
- std::is_void<decltype(T::impl_initialize(
- std::declval<Kokkos::InitializationSettings const&>()))>::value &&
- std::is_void<decltype(T::impl_finalize())>::value>;
-
-template <class T>
-using fence_t = std::enable_if_t<
- std::is_void<decltype(std::declval<T const&>().fence())>::value &&
- std::is_void<decltype(std::declval<T const&>().fence("name"))>::value &&
- std::is_void<decltype(T::impl_static_fence("name"))>::value>;
-
-#define STATIC_ASSERT(...) static_assert(__VA_ARGS__, "") // FIXME C++17
-
-template <class ExecutionSpace>
-constexpr bool check_valid_execution_space() {
- using Kokkos::is_detected;
- STATIC_ASSERT(std::is_default_constructible<ExecutionSpace>::value);
- STATIC_ASSERT(is_detected<public_member_types_t, ExecutionSpace>::value);
- STATIC_ASSERT(is_detected<print_configuration_t, ExecutionSpace>::value);
- STATIC_ASSERT(is_detected<initialize_finalize_t, ExecutionSpace>::value);
- STATIC_ASSERT(is_detected<fence_t, ExecutionSpace>::value);
-#ifndef KOKKOS_ENABLE_HPX // FIXME_HPX
- STATIC_ASSERT(sizeof(ExecutionSpace) <= 2 * sizeof(void*));
-#endif
- return true;
-}
-
-#undef STATIC_ASSERT
-
-} // namespace
-
-namespace Kokkos {
-namespace Impl {
-
-struct ExecSpaceBase {
- virtual void initialize(InitializationSettings const&) = 0;
- virtual void finalize() = 0;
- virtual void static_fence(std::string const&) = 0;
- virtual void print_configuration(std::ostream& os, bool verbose) = 0;
- virtual ~ExecSpaceBase() = default;
-};
-
-template <class ExecutionSpace>
-struct ExecSpaceDerived : ExecSpaceBase {
- static_assert(check_valid_execution_space<ExecutionSpace>(), "");
- void initialize(InitializationSettings const& settings) final {
- ExecutionSpace::impl_initialize(settings);
- }
- void finalize() final { ExecutionSpace::impl_finalize(); }
- void static_fence(std::string const& label) final {
- ExecutionSpace::impl_static_fence(label);
- }
- void print_configuration(std::ostream& os, bool verbose) final {
- ExecutionSpace().print_configuration(os, verbose);
- }
-};
-
-/* ExecSpaceManager - Responsible for initializing all the registered
- * backends. Backends are registered using the register_space_initializer()
- * function which should be called from a global context so that it is called
- * prior to initialize_spaces() which is called from Kokkos::initialize()
- */
-class ExecSpaceManager {
- std::map<std::string, std::unique_ptr<ExecSpaceBase>> exec_space_factory_list;
- ExecSpaceManager() = default;
-
- public:
- void register_space_factory(std::string name,
- std::unique_ptr<ExecSpaceBase> ptr);
- void initialize_spaces(const Kokkos::InitializationSettings& settings);
- void finalize_spaces();
- void static_fence(const std::string&);
- void print_configuration(std::ostream& os, bool verbose);
- static ExecSpaceManager& get_instance();
-};
-
-template <class ExecutionSpace>
-int initialize_space_factory(std::string name) {
- auto space_ptr = std::make_unique<ExecSpaceDerived<ExecutionSpace>>();
- ExecSpaceManager::get_instance().register_space_factory(name,
- std::move(space_ptr));
- return 1;
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_KOKKOS_FIXEDBUFFERMEMORYPOOL_HPP
-#define KOKKOS_IMPL_KOKKOS_FIXEDBUFFERMEMORYPOOL_HPP
-
-#include <Kokkos_Core_fwd.hpp>
-#include <Kokkos_Atomic.hpp>
-
-#include <Kokkos_PointerOwnership.hpp>
-#include <impl/Kokkos_SimpleTaskScheduler.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class DeviceType, size_t Size, size_t Align = 1,
- class SizeType = typename DeviceType::execution_space::size_type>
-class FixedBlockSizeMemoryPool
- : private MemorySpaceInstanceStorage<typename DeviceType::memory_space> {
- public:
- using memory_space = typename DeviceType::memory_space;
- using size_type = SizeType;
-
- private:
- using memory_space_storage_base =
- MemorySpaceInstanceStorage<typename DeviceType::memory_space>;
- using tracker_type = Kokkos::Impl::SharedAllocationTracker;
- using record_type = Kokkos::Impl::SharedAllocationRecord<memory_space>;
-
- struct alignas(Align) Block {
- union {
- char ignore;
- char data[Size];
- };
- };
-
- static constexpr auto actual_size = sizeof(Block);
-
- // TODO shared allocation tracker
- // TODO @optimization put the index values on different cache lines (CPU) or
- // pages (GPU)?
-
- tracker_type m_tracker = {};
- size_type m_num_blocks = 0;
- size_type m_first_free_idx = 0;
- size_type m_last_free_idx = 0;
- Kokkos::OwningRawPtr<Block> m_first_block = nullptr;
- Kokkos::OwningRawPtr<size_type> m_free_indices = nullptr;
-
- enum : size_type { IndexInUse = ~size_type(0) };
-
- public:
- FixedBlockSizeMemoryPool(memory_space const& mem_space, size_type num_blocks)
- : memory_space_storage_base(mem_space),
- m_tracker(),
- m_num_blocks(num_blocks),
- m_first_free_idx(0),
- m_last_free_idx(num_blocks) {
- // TODO alignment?
- auto block_record = record_type::allocate(
- mem_space, "FixedBlockSizeMemPool_blocks", num_blocks * sizeof(Block));
- KOKKOS_ASSERT(intptr_t(block_record->data()) % Align == 0);
- m_tracker.assign_allocated_record_to_uninitialized(block_record);
- m_first_block = (Block*)block_record->data();
-
- auto idx_record =
- record_type::allocate(mem_space, "Kokkos::FixedBlockSizeMemPool_blocks",
- num_blocks * sizeof(size_type));
- KOKKOS_ASSERT(intptr_t(idx_record->data()) % alignof(size_type) == 0);
- m_tracker.assign_allocated_record_to_uninitialized(idx_record);
- m_free_indices = (size_type*)idx_record->data();
-
- for (size_type i = 0; i < num_blocks; ++i) {
- m_free_indices[i] = i;
- }
-
- Kokkos::memory_fence();
- }
-
- // For compatibility with MemoryPool<>
- FixedBlockSizeMemoryPool(memory_space const& mem_space,
- size_t mempool_capacity, unsigned, unsigned,
- unsigned)
- : FixedBlockSizeMemoryPool(
- mem_space, mempool_capacity /
- actual_size) { /* forwarding ctor, must be empty */
- }
-
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool() = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(
- FixedBlockSizeMemoryPool&&) = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(
- FixedBlockSizeMemoryPool const&) = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(
- FixedBlockSizeMemoryPool&&) = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(
- FixedBlockSizeMemoryPool const&) = default;
-
- KOKKOS_INLINE_FUNCTION
- void* allocate(size_type alloc_size) const noexcept {
- (void)alloc_size;
- KOKKOS_EXPECTS(alloc_size <= Size);
- auto free_idx_counter = Kokkos::atomic_fetch_add(
- (volatile size_type*)&m_first_free_idx, size_type(1));
- auto free_idx_idx = free_idx_counter % m_num_blocks;
-
- // We don't have exclusive access to m_free_indices[free_idx_idx] because
- // the allocate counter might have lapped us since we incremented it
- auto current_free_idx = m_free_indices[free_idx_idx];
- size_type free_idx = IndexInUse;
- free_idx = Kokkos::atomic_compare_exchange(&m_free_indices[free_idx_idx],
- current_free_idx, free_idx);
- Kokkos::memory_fence();
-
- // TODO figure out how to decrement here?
-
- if (free_idx == IndexInUse) {
- return nullptr;
- } else {
- return (void*)&m_first_block[free_idx];
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- void deallocate(void* ptr, size_type /*alloc_size*/) const noexcept {
- // figure out which block we are
- auto offset = intptr_t(ptr) - intptr_t(m_first_block);
-
- KOKKOS_EXPECTS(offset % actual_size == 0 &&
- offset / actual_size < m_num_blocks);
-
- Kokkos::memory_fence();
- auto last_idx_idx = Kokkos::atomic_fetch_add(
- (volatile size_type*)&m_last_free_idx, size_type(1));
- last_idx_idx %= m_num_blocks;
- m_free_indices[last_idx_idx] = offset / actual_size;
- }
-};
-
-#if 0
-template <
- class DeviceType,
- size_t Size,
- size_t Align=1,
- class SizeType = typename DeviceType::execution_space::size_type
->
-class FixedBlockSizeChaseLevMemoryPool
- : private MemorySpaceInstanceStorage<typename DeviceType::memory_space>
-{
-public:
-
- using memory_space = typename DeviceType::memory_space;
- using size_type = SizeType;
-
-private:
-
- using memory_space_storage_base = MemorySpaceInstanceStorage<typename DeviceType::memory_space>;
- using tracker_type = Kokkos::Impl::SharedAllocationTracker;
- using record_type = Kokkos::Impl::SharedAllocationRecord<memory_space>;
-
- struct alignas(Align) Block { union { char ignore; char data[Size]; }; };
-
- static constexpr auto actual_size = sizeof(Block);
-
- tracker_type m_tracker = { };
- size_type m_num_blocks = 0;
- size_type m_first_free_idx = 0;
- size_type m_last_free_idx = 0;
-
-
- enum : size_type { IndexInUse = ~size_type(0) };
-
-public:
-
- FixedBlockSizeMemoryPool(
- memory_space const& mem_space,
- size_type num_blocks
- ) : memory_space_storage_base(mem_space),
- m_tracker(),
- m_num_blocks(num_blocks),
- m_first_free_idx(0),
- m_last_free_idx(num_blocks)
- {
- // TODO alignment?
- auto block_record = record_type::allocate(
- mem_space, "FixedBlockSizeMemPool_blocks", num_blocks * sizeof(Block)
- );
- KOKKOS_ASSERT(intptr_t(block_record->data()) % Align == 0);
- m_tracker.assign_allocated_record_to_uninitialized(block_record);
- m_first_block = (Block*)block_record->data();
-
- auto idx_record = record_type::allocate(
- mem_space, "FixedBlockSizeMemPool_blocks", num_blocks * sizeof(size_type)
- );
- KOKKOS_ASSERT(intptr_t(idx_record->data()) % alignof(size_type) == 0);
- m_tracker.assign_allocated_record_to_uninitialized(idx_record);
- m_free_indices = (size_type*)idx_record->data();
-
- for(size_type i = 0; i < num_blocks; ++i) {
- m_free_indices[i] = i;
- }
-
- Kokkos::memory_fence();
- }
-
- // For compatibility with MemoryPool<>
- FixedBlockSizeMemoryPool(
- memory_space const& mem_space,
- size_t mempool_capacity,
- unsigned, unsigned, unsigned
- ) : FixedBlockSizeMemoryPool(mem_space, mempool_capacity / actual_size)
- { /* forwarding ctor, must be empty */ }
-
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool() = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(FixedBlockSizeMemoryPool&&) = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(FixedBlockSizeMemoryPool const&) = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(FixedBlockSizeMemoryPool&&) = default;
- KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(FixedBlockSizeMemoryPool const&) = default;
-
-
- KOKKOS_INLINE_FUNCTION
- void* allocate(size_type alloc_size) const noexcept
- {
- KOKKOS_EXPECTS(alloc_size <= Size);
- auto free_idx_counter = Kokkos::atomic_fetch_add((volatile size_type*)&m_first_free_idx, size_type(1));
- auto free_idx_idx = free_idx_counter % m_num_blocks;
-
- // We don't have exclusive access to m_free_indices[free_idx_idx] because
- // the allocate counter might have lapped us since we incremented it
- auto current_free_idx = m_free_indices[free_idx_idx];
- size_type free_idx = IndexInUse;
- free_idx =
- Kokkos::atomic_compare_exchange(&m_free_indices[free_idx_idx], current_free_idx, free_idx);
- Kokkos::memory_fence();
-
- // TODO figure out how to decrement here?
-
- if(free_idx == IndexInUse) {
- return nullptr;
- }
- else {
- return (void*)&m_first_block[free_idx];
- }
- }
-
- KOKKOS_INLINE_FUNCTION
- void deallocate(void* ptr, size_type alloc_size) const noexcept
- {
- // figure out which block we are
- auto offset = intptr_t(ptr) - intptr_t(m_first_block);
-
- KOKKOS_EXPECTS(offset % actual_size == 0 && offset/actual_size < m_num_blocks);
-
- Kokkos::memory_fence();
- auto last_idx_idx = Kokkos::atomic_fetch_add((volatile size_type*)&m_last_free_idx, size_type(1));
- last_idx_idx %= m_num_blocks;
- m_free_indices[last_idx_idx] = offset / actual_size;
- }
-
-};
-#endif
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif // KOKKOS_IMPL_KOKKOS_FIXEDBUFFERMEMORYPOOL_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
-#define KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
-
-#include <Kokkos_Macros.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class ExecutionSpace, class Kernel, class Predecessor>
-struct GraphNodeImpl;
-
-template <class ExecutionSpace>
-struct GraphImpl;
-
-template <class ExecutionSpace, class Policy, class Functor,
- class KernelTypeTag, class... Args>
-class GraphNodeKernelImpl;
-
-struct _graph_node_kernel_ctor_tag {};
-struct _graph_node_predecessor_ctor_tag {};
-struct _graph_node_is_root_ctor_tag {};
-
-struct GraphAccess;
-
-// Customizable for backends
-template <class ExecutionSpace>
-struct GraphNodeBackendSpecificDetails;
-
-// Customizable for backends
-template <class ExecutionSpace, class Kernel, class PredecessorRef>
-struct GraphNodeBackendDetailsBeforeTypeErasure;
-
-// TODO move this to a more appropriate place
-struct DoNotExplicitlySpecifyThisTemplateParameter;
-
-struct KernelInGraphProperty {};
-
-struct IsGraphKernelTag {};
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif // KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <cstddef>
-#include <cstdlib>
-#include <cstdint>
-#include <cstring>
-
-#include <iostream>
-#include <sstream>
-#include <cstring>
-#include <algorithm>
-
-#include <Kokkos_HBWSpace.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-#include <Kokkos_Atomic.hpp>
-#ifdef KOKKOS_ENABLE_HBWSPACE
-#include <memkind.h>
-#endif
-
-#include <impl/Kokkos_Tools.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-#ifdef KOKKOS_ENABLE_HBWSPACE
-#define MEMKIND_TYPE MEMKIND_HBW // hbw_get_kind(HBW_PAGESIZE_4KB)
-
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Experimental {
-
-/* Default allocation mechanism */
-HBWSpace::HBWSpace() : m_alloc_mech(HBWSpace::STD_MALLOC) {
- printf("Init\n");
- setenv("MEMKIND_HBW_NODES", "1", 0);
-}
-
-/* Default allocation mechanism */
-HBWSpace::HBWSpace(const HBWSpace::AllocationMechanism &arg_alloc_mech)
- : m_alloc_mech(HBWSpace::STD_MALLOC) {
- printf("Init2\n");
- setenv("MEMKIND_HBW_NODES", "1", 0);
- if (arg_alloc_mech == STD_MALLOC) {
- m_alloc_mech = HBWSpace::STD_MALLOC;
- }
-}
-
-void *HBWSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void *HBWSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-void *HBWSpace::impl_allocate(
- const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- static_assert(sizeof(void *) == sizeof(uintptr_t),
- "Error sizeof(void*) != sizeof(uintptr_t)");
-
- static_assert(
- Kokkos::Impl::power_of_two<Kokkos::Impl::MEMORY_ALIGNMENT>::value,
- "Memory alignment must be power of two");
-
- constexpr uintptr_t alignment = Kokkos::Impl::MEMORY_ALIGNMENT;
- constexpr uintptr_t alignment_mask = alignment - 1;
-
- void *ptr = nullptr;
-
- if (arg_alloc_size) {
- if (m_alloc_mech == STD_MALLOC) {
- // Over-allocate to and round up to guarantee proper alignment.
- size_t size_padded = arg_alloc_size + sizeof(void *) + alignment;
-
- void *alloc_ptr = memkind_malloc(MEMKIND_TYPE, size_padded);
-
- if (alloc_ptr) {
- uintptr_t address = reinterpret_cast<uintptr_t>(alloc_ptr);
-
- // offset enough to record the alloc_ptr
- address += sizeof(void *);
- uintptr_t rem = address % alignment;
- uintptr_t offset = rem ? (alignment - rem) : 0u;
- address += offset;
- ptr = reinterpret_cast<void *>(address);
- // record the alloc'd pointer
- address -= sizeof(void *);
- *reinterpret_cast<void **>(address) = alloc_ptr;
- }
- }
- }
-
- if ((ptr == nullptr) || (reinterpret_cast<uintptr_t>(ptr) == ~uintptr_t(0)) ||
- (reinterpret_cast<uintptr_t>(ptr) & alignment_mask)) {
- std::ostringstream msg;
- msg << "Kokkos::Experimental::HBWSpace::allocate[ ";
- switch (m_alloc_mech) {
- case STD_MALLOC: msg << "STD_MALLOC"; break;
- case POSIX_MEMALIGN: msg << "POSIX_MEMALIGN"; break;
- case POSIX_MMAP: msg << "POSIX_MMAP"; break;
- case INTEL_MM_ALLOC: msg << "INTEL_MM_ALLOC"; break;
- }
- msg << " ]( " << arg_alloc_size << " ) FAILED";
- if (ptr == nullptr) {
- msg << " nullptr";
- } else {
- msg << " NOT ALIGNED " << ptr;
- }
-
- std::cerr << msg.str() << std::endl;
- std::cerr.flush();
-
- Kokkos::Impl::throw_runtime_exception(msg.str());
- }
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
-
- return ptr;
-}
-
-void HBWSpace::deallocate(void *const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-void HBWSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-void HBWSpace::impl_deallocate(
- const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (arg_alloc_ptr) {
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
-
- if (m_alloc_mech == STD_MALLOC) {
- void *alloc_ptr = *(reinterpret_cast<void **>(arg_alloc_ptr) - 1);
- memkind_free(MEMKIND_TYPE, alloc_ptr);
- }
- }
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-#ifdef KOKKOS_ENABLE_DEBUG
-SharedAllocationRecord<void, void>
- SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::s_root_record;
-#endif
-
-void SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::deallocate(
- SharedAllocationRecord<void, void> *arg_rec) {
- delete static_cast<SharedAllocationRecord *>(arg_rec);
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HBWSpace,
- void>::~SharedAllocationRecord()
-#if defined( \
- KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
- noexcept
-#endif
-{
-
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- SharedAllocationRecord<void, void>::m_alloc_size,
- (SharedAllocationRecord<void, void>::m_alloc_size -
- sizeof(SharedAllocationHeader)));
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::
- SharedAllocationRecord(
- const Kokkos::Experimental::HBWSpace &arg_space,
- const std::string &arg_label, const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : SharedAllocationRecord<void, void>(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::Experimental::HBWSpace,
- void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- // Fill in the Header information
- RecordBase::m_alloc_ptr->m_record =
- static_cast<SharedAllocationRecord<void, void> *>(this);
-
- strncpy(RecordBase::m_alloc_ptr->m_label, arg_label.c_str(),
- SharedAllocationHeader::maximum_label_length - 1);
- // Set last element zero, in case c_str is too long
- RecordBase::m_alloc_ptr
- ->m_label[SharedAllocationHeader::maximum_label_length - 1] = '\0';
-}
-
-//----------------------------------------------------------------------------
-
-void *
-SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::allocate_tracked(
- const Kokkos::Experimental::HBWSpace &arg_space,
- const std::string &arg_alloc_label, const size_t arg_alloc_size) {
- if (!arg_alloc_size) return nullptr;
-
- SharedAllocationRecord *const r =
- allocate(arg_space, arg_alloc_label, arg_alloc_size);
-
- RecordBase::increment(r);
-
- return r->data();
-}
-
-void SharedAllocationRecord<Kokkos::Experimental::HBWSpace,
- void>::deallocate_tracked(void *const
- arg_alloc_ptr) {
- if (arg_alloc_ptr != nullptr) {
- SharedAllocationRecord *const r = get_record(arg_alloc_ptr);
-
- RecordBase::decrement(r);
- }
-}
-
-void *SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::
- reallocate_tracked(void *const arg_alloc_ptr, const size_t arg_alloc_size) {
- SharedAllocationRecord *const r_old = get_record(arg_alloc_ptr);
- SharedAllocationRecord *const r_new =
- allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
-
- Kokkos::Impl::DeepCopy<Kokkos::Experimental::HBWSpace,
- Kokkos::Experimental::HBWSpace>(
- r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
- Kokkos::fence(
- "SharedAllocationRecord<Kokkos::Experimental::HBWSpace, "
- "void>::reallocate_tracked(): fence after copying data");
-
- RecordBase::increment(r_new);
- RecordBase::decrement(r_old);
-
- return r_new->data();
-}
-
-SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>
- *SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::get_record(
- void *alloc_ptr) {
- using Header = SharedAllocationHeader;
- using RecordHost =
- SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>;
-
- SharedAllocationHeader const *const head =
- alloc_ptr ? Header::get_header(alloc_ptr) : nullptr;
- RecordHost *const record =
- head ? static_cast<RecordHost *>(head->m_record) : nullptr;
-
- if (!alloc_ptr || record->m_alloc_ptr != head) {
- Kokkos::Impl::throw_runtime_exception(std::string(
- "Kokkos::Impl::SharedAllocationRecord< Kokkos::Experimental::HBWSpace "
- ", void >::get_record ERROR"));
- }
-
- return record;
-}
-
-// Iterate records to print orphaned memory ...
-void SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::
- print_records(std::ostream &s, const Kokkos::Experimental::HBWSpace &space,
- bool detail) {
-#ifdef KOKKOS_ENABLE_DEBUG
- SharedAllocationRecord<void, void>::print_host_accessible_records(
- s, "HBWSpace", &s_root_record, detail);
-#else
- throw_runtime_exception(
- "SharedAllocationRecord<HBWSpace>::print_records"
- " only works with KOKKOS_ENABLE_DEBUG enabled");
-#endif
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace Experimental {
-namespace {
-const unsigned HBW_SPACE_ATOMIC_MASK = 0xFFFF;
-const unsigned HBW_SPACE_ATOMIC_XOR_MASK = 0x5A39;
-static int HBW_SPACE_ATOMIC_LOCKS[HBW_SPACE_ATOMIC_MASK + 1];
-} // namespace
-
-namespace Impl {
-void init_lock_array_hbw_space() {
- static int is_initialized = 0;
- if (!is_initialized)
- for (int i = 0; i < static_cast<int>(HBW_SPACE_ATOMIC_MASK + 1); i++)
- HBW_SPACE_ATOMIC_LOCKS[i] = 0;
-}
-
-bool lock_address_hbw_space(void *ptr) {
- return 0 == atomic_compare_exchange(
- &HBW_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) &
- HBW_SPACE_ATOMIC_MASK) ^
- HBW_SPACE_ATOMIC_XOR_MASK],
- 0, 1);
-}
-
-void unlock_address_hbw_space(void *ptr) {
- atomic_exchange(
- &HBW_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) & HBW_SPACE_ATOMIC_MASK) ^
- HBW_SPACE_ATOMIC_XOR_MASK],
- 0);
-}
-
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <impl/Kokkos_HostBarrier.hpp>
-#include <impl/Kokkos_BitOps.hpp>
-
-#include <impl/Kokkos_HostBarrier.hpp>
-
-#include <thread>
-#if defined(_WIN32)
-#include <process.h>
-#include <winsock2.h>
-#include <windows.h>
-#endif
-
-namespace Kokkos {
-namespace Impl {
-
-void HostBarrier::impl_backoff_wait_until_equal(
- int* ptr, const int v, const bool active_wait) noexcept {
- unsigned count = 0u;
-
- while (!test_equal(ptr, v)) {
- const int c = int_log2(++count);
- if (!active_wait || c > log2_iterations_till_sleep) {
- std::this_thread::sleep_for(
- std::chrono::nanoseconds(c < 16 ? 256 * c : 4096));
- } else if (c > log2_iterations_till_yield) {
- std::this_thread::yield();
- }
-#if defined(KOKKOS_ENABLE_ASM)
-#if defined(__PPC64__)
- for (int j = 0; j < num_nops; ++j) {
- asm volatile("nop\n");
- }
- asm volatile("or 27, 27, 27" ::: "memory");
-#elif defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
- defined(__x86_64__)
- for (int j = 0; j < num_nops; ++j) {
- asm volatile("nop\n");
- }
- asm volatile("pause\n" ::: "memory");
-#endif
-#endif
- }
-}
-} // namespace Impl
-} // namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_Macros.hpp>
-
-#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_MemorySpace.hpp>
-#include <impl/Kokkos_Tools.hpp>
-
-/*--------------------------------------------------------------------------*/
-
-#if defined(KOKKOS_COMPILER_INTEL) && !defined(KOKKOS_ENABLE_CUDA)
-
-// Intel specialized allocator does not interoperate with CUDA memory allocation
-
-#define KOKKOS_ENABLE_INTEL_MM_ALLOC
-
-#endif
-
-/*--------------------------------------------------------------------------*/
-
-#include <cstddef>
-#include <cstdlib>
-#include <cstdint>
-#include <cstring>
-
-#include <iostream>
-#include <sstream>
-#include <cstring>
-
-#include <Kokkos_HostSpace.hpp>
-#include <impl/Kokkos_Error.hpp>
-#include <Kokkos_Atomic.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-/* Default allocation mechanism */
-HostSpace::HostSpace()
- : m_alloc_mech(
-#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
- HostSpace::INTEL_MM_ALLOC
-#else
- HostSpace::STD_MALLOC
-#endif
- ) {
-}
-
-/* Default allocation mechanism */
-HostSpace::HostSpace(const HostSpace::AllocationMechanism &arg_alloc_mech)
- : m_alloc_mech(HostSpace::STD_MALLOC) {
- if (arg_alloc_mech == STD_MALLOC) {
- m_alloc_mech = HostSpace::STD_MALLOC;
- }
-#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
- else if (arg_alloc_mech == HostSpace::INTEL_MM_ALLOC) {
- m_alloc_mech = HostSpace::INTEL_MM_ALLOC;
- }
-#endif
- else {
- const char *const mech =
- (arg_alloc_mech == HostSpace::INTEL_MM_ALLOC)
- ? "INTEL_MM_ALLOC"
- : ((arg_alloc_mech == HostSpace::POSIX_MMAP) ? "POSIX_MMAP" : "");
-
- std::string msg;
- msg.append("Kokkos::HostSpace ");
- msg.append(mech);
- msg.append(" is not available");
- Kokkos::Impl::throw_runtime_exception(msg);
- }
-}
-
-void *HostSpace::allocate(const size_t arg_alloc_size) const {
- return allocate("[unlabeled]", arg_alloc_size);
-}
-void *HostSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
- const size_t
-
- arg_logical_size) const {
- return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
-}
-void *HostSpace::impl_allocate(
- const char *arg_label, const size_t arg_alloc_size,
- const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- const size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- static_assert(sizeof(void *) == sizeof(uintptr_t),
- "Error sizeof(void*) != sizeof(uintptr_t)");
-
- static_assert(
- Kokkos::Impl::is_integral_power_of_two(Kokkos::Impl::MEMORY_ALIGNMENT),
- "Memory alignment must be power of two");
-
- constexpr uintptr_t alignment = Kokkos::Impl::MEMORY_ALIGNMENT;
- constexpr uintptr_t alignment_mask = alignment - 1;
-
- void *ptr = nullptr;
-
- if (arg_alloc_size) {
- if (m_alloc_mech == STD_MALLOC) {
- // Over-allocate to and round up to guarantee proper alignment.
- size_t size_padded = arg_alloc_size + sizeof(void *) + alignment;
-
- void *alloc_ptr = malloc(size_padded);
-
- if (alloc_ptr) {
- auto address = reinterpret_cast<uintptr_t>(alloc_ptr);
-
- // offset enough to record the alloc_ptr
- address += sizeof(void *);
- uintptr_t rem = address % alignment;
- uintptr_t offset = rem ? (alignment - rem) : 0u;
- address += offset;
- ptr = reinterpret_cast<void *>(address);
- // record the alloc'd pointer
- address -= sizeof(void *);
- *reinterpret_cast<void **>(address) = alloc_ptr;
- }
- }
-#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
- else if (m_alloc_mech == INTEL_MM_ALLOC) {
- ptr = _mm_malloc(arg_alloc_size, alignment);
- }
-#endif
- }
-
- if ((ptr == nullptr) || (reinterpret_cast<uintptr_t>(ptr) == ~uintptr_t(0)) ||
- (reinterpret_cast<uintptr_t>(ptr) & alignment_mask)) {
- Experimental::RawMemoryAllocationFailure::FailureMode failure_mode =
- Experimental::RawMemoryAllocationFailure::FailureMode::
- AllocationNotAligned;
- if (ptr == nullptr) {
- failure_mode = Experimental::RawMemoryAllocationFailure::FailureMode::
- OutOfMemoryError;
- }
-
- Experimental::RawMemoryAllocationFailure::AllocationMechanism alloc_mec =
- Experimental::RawMemoryAllocationFailure::AllocationMechanism::
- StdMalloc;
- switch (m_alloc_mech) {
- case STD_MALLOC: break; // default
- case POSIX_MEMALIGN:
- alloc_mec = Experimental::RawMemoryAllocationFailure::
- AllocationMechanism::PosixMemAlign;
- break;
- case POSIX_MMAP:
- alloc_mec = Experimental::RawMemoryAllocationFailure::
- AllocationMechanism::PosixMMap;
- break;
- case INTEL_MM_ALLOC:
- alloc_mec = Experimental::RawMemoryAllocationFailure::
- AllocationMechanism::IntelMMAlloc;
- break;
- }
-
- throw Kokkos::Experimental::RawMemoryAllocationFailure(
- arg_alloc_size, alignment, failure_mode, alloc_mec);
- }
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
- }
- return ptr;
-}
-
-void HostSpace::deallocate(void *const arg_alloc_ptr,
- const size_t arg_alloc_size) const {
- deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
-}
-
-void HostSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size,
- const size_t
-
- arg_logical_size) const {
- impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
-}
-void HostSpace::impl_deallocate(
- const char *arg_label, void *const arg_alloc_ptr,
- const size_t arg_alloc_size, const size_t arg_logical_size,
- const Kokkos::Tools::SpaceHandle arg_handle) const {
- if (arg_alloc_ptr) {
- Kokkos::fence("HostSpace::impl_deallocate before free");
- size_t reported_size =
- (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
- reported_size);
- }
- if (m_alloc_mech == STD_MALLOC) {
- void *alloc_ptr = *(reinterpret_cast<void **>(arg_alloc_ptr) - 1);
- free(alloc_ptr);
- }
-#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
- else if (m_alloc_mech == INTEL_MM_ALLOC) {
- _mm_free(arg_alloc_ptr);
- }
-#endif
- }
-}
-
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-#ifdef KOKKOS_ENABLE_DEBUG
-SharedAllocationRecord<void, void>
- SharedAllocationRecord<Kokkos::HostSpace, void>::s_root_record;
-#endif
-
-SharedAllocationRecord<Kokkos::HostSpace, void>::~SharedAllocationRecord()
-#if defined( \
- KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
- noexcept
-#endif
-{
- m_space.deallocate(m_label.c_str(),
- SharedAllocationRecord<void, void>::m_alloc_ptr,
- SharedAllocationRecord<void, void>::m_alloc_size,
- (SharedAllocationRecord<void, void>::m_alloc_size -
- sizeof(SharedAllocationHeader)));
-}
-
-SharedAllocationHeader *_do_allocation(Kokkos::HostSpace const &space,
- std::string const &label,
- size_t alloc_size) {
- try {
- return reinterpret_cast<SharedAllocationHeader *>(
- space.allocate(alloc_size));
- } catch (Experimental::RawMemoryAllocationFailure const &failure) {
- if (failure.failure_mode() == Experimental::RawMemoryAllocationFailure::
- FailureMode::AllocationNotAligned) {
- // TODO: delete the misaligned memory
- }
-
- std::cerr << "Kokkos failed to allocate memory for label \"" << label
- << "\". Allocation using MemorySpace named \"" << space.name()
- << " failed with the following error: ";
- failure.print_error_message(std::cerr);
- std::cerr.flush();
- Kokkos::Impl::throw_runtime_exception("Memory allocation failure");
- }
- return nullptr; // unreachable
-}
-
-SharedAllocationRecord<Kokkos::HostSpace, void>::SharedAllocationRecord(
- const Kokkos::HostSpace &arg_space, const std::string &arg_label,
- const size_t arg_alloc_size,
- const SharedAllocationRecord<void, void>::function_type arg_dealloc)
- // Pass through allocated [ SharedAllocationHeader , user_memory ]
- // Pass through deallocation function
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::HostSpace, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
- arg_label);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-/*--------------------------------------------------------------------------*/
-
-namespace Kokkos {
-namespace {
-const unsigned HOST_SPACE_ATOMIC_MASK = 0xFFFF;
-const unsigned HOST_SPACE_ATOMIC_XOR_MASK = 0x5A39;
-static int HOST_SPACE_ATOMIC_LOCKS[HOST_SPACE_ATOMIC_MASK + 1];
-} // namespace
-
-namespace Impl {
-void init_lock_array_host_space() {
- static int is_initialized = 0;
- if (!is_initialized)
- for (int i = 0; i < static_cast<int>(HOST_SPACE_ATOMIC_MASK + 1); i++)
- HOST_SPACE_ATOMIC_LOCKS[i] = 0;
-}
-
-bool lock_address_host_space(void *ptr) {
- return 0 == atomic_compare_exchange(
- &HOST_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) &
- HOST_SPACE_ATOMIC_MASK) ^
- HOST_SPACE_ATOMIC_XOR_MASK],
- 0, 1);
-}
-
-void unlock_address_host_space(void *ptr) {
- atomic_exchange(
- &HOST_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) & HOST_SPACE_ATOMIC_MASK) ^
- HOST_SPACE_ATOMIC_XOR_MASK],
- 0);
-}
-
-} // namespace Impl
-} // namespace Kokkos
-
-//==============================================================================
-// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
-
-#include <impl/Kokkos_SharedAlloc_timpl.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-// To avoid additional compilation cost for something that's (mostly?) not
-// performance sensitive, we explicity instantiate these CRTP base classes here,
-// where we have access to the associated *_timpl.hpp header files.
-template class SharedAllocationRecordCommon<Kokkos::HostSpace>;
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
-//==============================================================================
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
-#define KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
-
-#include <cstdint>
-
-namespace Kokkos {
-
-namespace Impl {
-
-void hostspace_fence(const DefaultHostExecutionSpace& exec);
-
-void hostspace_parallel_deepcopy(void* dst, const void* src, ptrdiff_t n);
-// DeepCopy called with an execution space that can't access HostSpace
-void hostspace_parallel_deepcopy_async(void* dst, const void* src, ptrdiff_t n);
-void hostspace_parallel_deepcopy_async(const DefaultHostExecutionSpace& exec,
- void* dst, const void* src, ptrdiff_t n);
-
-} // namespace Impl
-
-} // namespace Kokkos
-
-#endif // KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_INITIALIZATION_SETTINGS_HPP
-#define KOKKOS_INITIALIZATION_SETTINGS_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#include <climits>
-#include <string>
-
-namespace Kokkos {
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-struct InitArguments {
- int num_threads;
- int num_numa;
- int device_id;
- int ndevices;
- int skip_device;
- bool disable_warnings;
- bool tune_internals;
- bool tool_help = false;
- std::string tool_lib = {};
- std::string tool_args = {};
-
- KOKKOS_DEPRECATED_WITH_COMMENT("Use InitializationSettings instead!")
- InitArguments(int nt = -1, int nn = -1, int dv = -1, bool dw = false,
- bool ti = false)
- : num_threads{nt},
- num_numa{nn},
- device_id{dv},
- ndevices{-1},
- skip_device{9999},
- disable_warnings{dw},
- tune_internals{ti} {}
-};
-#endif
-
-namespace Impl {
-// FIXME_CXX17 replace with std::optional
-template <class>
-struct InitializationSettingsHelper;
-template <>
-struct InitializationSettingsHelper<int> {
- using value_type = int;
- using storage_type = int;
-
- static constexpr storage_type unspecified = INT_MIN;
-};
-template <>
-struct InitializationSettingsHelper<bool> {
- using value_type = bool;
- using storage_type = char;
-
- static constexpr storage_type unspecified = CHAR_MAX;
- static_assert(static_cast<storage_type>(true) != unspecified &&
- static_cast<storage_type>(false) != unspecified,
- "");
-};
-template <>
-struct InitializationSettingsHelper<std::string> {
- using value_type = std::string;
- using storage_type = std::string;
-
- static storage_type const unspecified;
-};
-} // namespace Impl
-
-class InitializationSettings {
-#define KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) \
- impl_do_not_use_i_really_mean_it_##NAME##_
-
-#define KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME) impl_##NAME##_type
-
-#define KOKKOS_IMPL_DECLARE(TYPE, NAME) \
- private: \
- using KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME) = TYPE; \
- Impl::InitializationSettingsHelper<TYPE>::storage_type \
- KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) = \
- Impl::InitializationSettingsHelper<TYPE>::unspecified; \
- \
- public: \
- InitializationSettings& set_##NAME( \
- Impl::InitializationSettingsHelper<TYPE>::value_type NAME) { \
- KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) = NAME; \
- return *this; \
- } \
- bool has_##NAME() const noexcept { \
- return KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) != \
- Impl::InitializationSettingsHelper< \
- KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME)>::unspecified; \
- } \
- KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME) get_##NAME() const noexcept { \
- return KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME); \
- } \
- static_assert(true, "no-op to require trailing semicolon")
-
- public:
- KOKKOS_IMPL_DECLARE(int, num_threads);
- KOKKOS_IMPL_DECLARE(int, device_id);
- KOKKOS_IMPL_DECLARE(std::string, map_device_id_by);
- KOKKOS_IMPL_DECLARE(int, num_devices); // deprecated
- KOKKOS_IMPL_DECLARE(int, skip_device); // deprecated
- KOKKOS_IMPL_DECLARE(bool, disable_warnings);
- KOKKOS_IMPL_DECLARE(bool, print_configuration);
- KOKKOS_IMPL_DECLARE(bool, tune_internals);
- KOKKOS_IMPL_DECLARE(bool, tools_help);
- KOKKOS_IMPL_DECLARE(std::string, tools_libs);
- KOKKOS_IMPL_DECLARE(std::string, tools_args);
-
-#undef KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE
-#undef KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER
-#undef KOKKOS_IMPL_DECLARE
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- public:
- InitializationSettings() = default;
-
- InitializationSettings(InitArguments const& old) {
- if (old.num_threads != -1) {
- set_num_threads(old.num_threads);
- }
- if (old.device_id != -1) {
- set_device_id(old.device_id);
- }
- if (old.ndevices != -1) {
- set_num_devices(old.ndevices);
- }
- if (old.skip_device != 9999) {
- set_skip_device(old.skip_device);
- }
- if (old.disable_warnings) {
- set_disable_warnings(true);
- }
- if (old.tune_internals) {
- set_tune_internals(true);
- }
- if (old.tool_help) {
- set_tools_help(true);
- }
- if (!old.tool_lib.empty()) {
- set_tools_libs(old.tool_lib);
- }
- if (!old.tool_args.empty()) {
- set_tools_args(old.tool_args);
- }
- }
-#endif
-};
-
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-// Experimental unified task-data parallel manycore LDRD
-
-#ifndef KOKKOS_IMPL_MEMORYPOOLALLOCATOR_HPP
-#define KOKKOS_IMPL_MEMORYPOOLALLOCATOR_HPP
-
-#include <Kokkos_Macros.hpp>
-
-#include <Kokkos_Core_fwd.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-namespace Kokkos {
-namespace Impl {
-
-template <class MemoryPool, class T>
-class MemoryPoolAllocator {
- public:
- using memory_pool = MemoryPool;
-
- private:
- memory_pool m_pool;
-
- public:
- KOKKOS_DEFAULTED_FUNCTION
- MemoryPoolAllocator() = default;
- KOKKOS_DEFAULTED_FUNCTION
- MemoryPoolAllocator(MemoryPoolAllocator const&) = default;
- KOKKOS_DEFAULTED_FUNCTION
- MemoryPoolAllocator(MemoryPoolAllocator&&) = default;
- KOKKOS_DEFAULTED_FUNCTION
- MemoryPoolAllocator& operator=(MemoryPoolAllocator const&) = default;
- KOKKOS_DEFAULTED_FUNCTION
- MemoryPoolAllocator& operator=(MemoryPoolAllocator&&) = default;
- KOKKOS_DEFAULTED_FUNCTION
- ~MemoryPoolAllocator() = default;
-
- KOKKOS_INLINE_FUNCTION
- explicit MemoryPoolAllocator(memory_pool const& arg_pool)
- : m_pool(arg_pool) {}
- KOKKOS_INLINE_FUNCTION
- explicit MemoryPoolAllocator(memory_pool&& arg_pool)
- : m_pool(std::move(arg_pool)) {}
-
- public:
- using value_type = T;
- using pointer = T*;
- using size_type = typename MemoryPool::memory_space::size_type;
- using difference_type = std::make_signed_t<size_type>;
-
- template <class U>
- struct rebind {
- using other = MemoryPoolAllocator<MemoryPool, U>;
- };
-
- KOKKOS_INLINE_FUNCTION
- pointer allocate(size_t n) {
- void* rv = m_pool.allocate(n * sizeof(T));
- if (rv == nullptr) {
- Kokkos::abort("Kokkos MemoryPool allocator failed to allocate memory");
- }
- return reinterpret_cast<T*>(rv);
- }
-
- KOKKOS_INLINE_FUNCTION
- void deallocate(T* ptr, size_t n) { m_pool.deallocate(ptr, n * sizeof(T)); }
-
- KOKKOS_INLINE_FUNCTION
- size_type max_size() const { return m_pool.max_block_size(); }
-
- KOKKOS_INLINE_FUNCTION
- bool operator==(MemoryPoolAllocator const& other) const {
- return m_pool == other.m_pool;
- }
-
- KOKKOS_INLINE_FUNCTION
- bool operator!=(MemoryPoolAllocator const& other) const {
- return !(*this == other);
- }
-};
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #ifndef KOKKOS_IMPL_MEMORYPOOLALLOCATOR_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/** @file Kokkos_MemorySpace.cpp
- *
- * Operations common to memory space instances, or at least default
- * implementations thereof.
- */
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <impl/Kokkos_MemorySpace.hpp>
-
-#include <iostream>
-#include <string>
-#include <sstream>
-
-namespace Kokkos {
-namespace Impl {
-
-void safe_throw_allocation_with_header_failure(
- std::string const& space_name, std::string const& label,
- Kokkos::Experimental::RawMemoryAllocationFailure const& failure) {
- auto generate_failure_message = [&](std::ostream& o) {
- o << "Kokkos failed to allocate memory for label \"" << label
- << "\". Allocation using MemorySpace named \"" << space_name
- << "\" failed with the following error: ";
- failure.print_error_message(o);
- if (failure.failure_mode() ==
- Kokkos::Experimental::RawMemoryAllocationFailure::FailureMode::
- AllocationNotAligned) {
- // TODO: delete the misaligned memory?
- o << "Warning: Allocation failed due to misalignment; memory may "
- "be leaked.\n";
- }
- o.flush();
- };
- try {
- std::ostringstream sstr;
- generate_failure_message(sstr);
- Kokkos::Impl::throw_runtime_exception(sstr.str());
- } catch (std::bad_alloc const&) {
- // Probably failed to allocate the string because we're so close to out
- // of memory. Try printing to std::cerr instead
- try {
- generate_failure_message(std::cerr);
- } catch (std::bad_alloc const&) {
- // oh well, we tried...
- }
- Kokkos::Impl::throw_runtime_exception(
- "Kokkos encountered an allocation failure, then another allocation "
- "failure while trying to create the error message.");
- }
-}
-
-} // end namespace Impl
-} // end namespace Kokkos
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2019) Sandia Corporation
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/** @file Kokkos_MemorySpace.hpp
- *
- * Operations common to memory space instances, or at least default
- * implementations thereof.
- */
-
-#ifndef KOKKOS_IMPL_MEMORYSPACE_HPP
-#define KOKKOS_IMPL_MEMORYSPACE_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <impl/Kokkos_SharedAlloc.hpp>
-#include <impl/Kokkos_Error.hpp>
-
-#include <string>
-
-namespace Kokkos {
-namespace Impl {
-
-// Defined in implementation file to avoid having to include iostream
-void safe_throw_allocation_with_header_failure(
- std::string const &space_name, std::string const &label,
- Kokkos::Experimental::RawMemoryAllocationFailure const &failure);
-
-template <class MemorySpace>
-SharedAllocationHeader *checked_allocation_with_header(MemorySpace const &space,
- std::string const &label,
- size_t alloc_size) {
- try {
- return reinterpret_cast<SharedAllocationHeader *>(space.allocate(
- label.c_str(), alloc_size + sizeof(SharedAllocationHeader),
- alloc_size));
- } catch (Kokkos::Experimental::RawMemoryAllocationFailure const &failure) {
- safe_throw_allocation_with_header_failure(space.name(), label, failure);
- }
- return nullptr; // unreachable
-}
-
-template <class ExecutionSpace, class MemorySpace>
-SharedAllocationHeader *checked_allocation_with_header(
- ExecutionSpace const &exec_space, MemorySpace const &space,
- std::string const &label, size_t alloc_size) {
- try {
- return reinterpret_cast<SharedAllocationHeader *>(space.allocate(
- exec_space, label.c_str(), alloc_size + sizeof(SharedAllocationHeader),
- alloc_size));
- } catch (Kokkos::Experimental::RawMemoryAllocationFailure const &failure) {
- safe_throw_allocation_with_header_failure(space.name(), label, failure);
- }
- return nullptr; // unreachable
-}
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif // KOKKOS_IMPL_MEMORYSPACE_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact H. Carter Edwards (hcedwar@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_MEMORY_FENCE_HPP)
-#define KOKKOS_MEMORY_FENCE_HPP
-namespace Kokkos {
-
-//----------------------------------------------------------------------------
-#ifndef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
-KOKKOS_FORCEINLINE_FUNCTION
-void memory_fence() {
-#if defined(__CUDA_ARCH__)
- __threadfence();
-#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
-#pragma omp flush
-#elif defined(__HIP_DEVICE_COMPILE__)
- __threadfence();
-#elif defined(KOKKOS_ENABLE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
- sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
-#elif defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- asm volatile("mfence" ::: "memory");
-#elif defined(KOKKOS_ENABLE_GNU_ATOMICS) || \
- (defined(KOKKOS_COMPILER_NVCC) && defined(KOKKOS_ENABLE_INTEL_ATOMICS))
- __sync_synchronize();
-#elif defined(KOKKOS_ENABLE_INTEL_ATOMICS)
- _mm_mfence();
-#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
-#pragma omp flush
-#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
- MemoryBarrier();
-#elif !defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
-#error "Error: memory_fence() not defined"
-#endif
-}
-#endif
-
-//////////////////////////////////////////////////////
-// store_fence()
-//
-// If possible use a store fence on the architecture, if not run a full memory
-// fence
-
-KOKKOS_FORCEINLINE_FUNCTION
-void store_fence() {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- asm volatile("sfence" ::: "memory");
-#else
- memory_fence();
-#endif
-}
-
-//////////////////////////////////////////////////////
-// load_fence()
-//
-// If possible use a load fence on the architecture, if not run a full memory
-// fence
-
-KOKKOS_FORCEINLINE_FUNCTION
-void load_fence() {
-#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
- asm volatile("lfence" ::: "memory");
-#else
- memory_fence();
-#endif
-}
-
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#endif
-
-#include <Kokkos_NumericTraits.hpp>
-
-// NOTE These out-of class definitions are only required with C++14. Since
-// C++17, a static data member declared constexpr is implicitly inline.
-
-#if !defined(KOKKOS_ENABLE_CXX17)
-namespace Kokkos {
-namespace Experimental {
-namespace Impl {
-#define OUT_OF_CLASS_DEFINITION_FLOATING_POINT(TRAIT) \
- constexpr float TRAIT##_helper<float>::value; \
- constexpr double TRAIT##_helper<double>::value; \
- constexpr long double TRAIT##_helper<long double>::value
-
-#define OUT_OF_CLASS_DEFINITION_INTEGRAL(TRAIT) \
- constexpr bool TRAIT##_helper<bool>::value; \
- constexpr char TRAIT##_helper<char>::value; \
- constexpr signed char TRAIT##_helper<signed char>::value; \
- constexpr unsigned char TRAIT##_helper<unsigned char>::value; \
- constexpr short TRAIT##_helper<short>::value; \
- constexpr unsigned short TRAIT##_helper<unsigned short>::value; \
- constexpr int TRAIT##_helper<int>::value; \
- constexpr unsigned int TRAIT##_helper<unsigned int>::value; \
- constexpr long int TRAIT##_helper<long int>::value; \
- constexpr unsigned long int TRAIT##_helper<unsigned long int>::value; \
- constexpr long long int TRAIT##_helper<long long int>::value; \
- constexpr unsigned long long int TRAIT##_helper<unsigned long long int>::value
-
-#define OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(TRAIT) \
- constexpr int TRAIT##_helper<float>::value; \
- constexpr int TRAIT##_helper<double>::value; \
- constexpr int TRAIT##_helper<long double>::value
-
-#define OUT_OF_CLASS_DEFINITION_INTEGRAL_2(TRAIT) \
- constexpr int TRAIT##_helper<bool>::value; \
- constexpr int TRAIT##_helper<char>::value; \
- constexpr int TRAIT##_helper<signed char>::value; \
- constexpr int TRAIT##_helper<unsigned char>::value; \
- constexpr int TRAIT##_helper<short>::value; \
- constexpr int TRAIT##_helper<unsigned short>::value; \
- constexpr int TRAIT##_helper<int>::value; \
- constexpr int TRAIT##_helper<unsigned int>::value; \
- constexpr int TRAIT##_helper<long int>::value; \
- constexpr int TRAIT##_helper<unsigned long int>::value; \
- constexpr int TRAIT##_helper<long long int>::value; \
- constexpr int TRAIT##_helper<unsigned long long int>::value
-
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(infinity);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(epsilon);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(round_error);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(norm_min);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(denorm_min);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(reciprocal_overflow_threshold);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(quiet_NaN);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(signaling_NaN);
-
-OUT_OF_CLASS_DEFINITION_INTEGRAL(finite_min);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(finite_min);
-OUT_OF_CLASS_DEFINITION_INTEGRAL(finite_max);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT(finite_max);
-
-OUT_OF_CLASS_DEFINITION_INTEGRAL_2(digits);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(digits);
-OUT_OF_CLASS_DEFINITION_INTEGRAL_2(digits10);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(digits10);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(max_digits10);
-OUT_OF_CLASS_DEFINITION_INTEGRAL_2(radix);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(radix);
-
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(min_exponent);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(min_exponent10);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(max_exponent);
-OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(max_exponent10);
-} // namespace Impl
-} // namespace Experimental
-} // namespace Kokkos
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_PARSE_COMMAND_LINE_ARGUMENTS_AND_ENVIRONMENT_VARIABLES_HPP
-#define KOKKOS_PARSE_COMMAND_LINE_ARGUMENTS_AND_ENVIRONMENT_VARIABLES_HPP
-
-// These declaration are only provided for testing purposes
-namespace Kokkos {
-class InitializationSettings;
-namespace Impl {
-void parse_command_line_arguments(int& argc, char* argv[],
- InitializationSettings& settings);
-void parse_environment_variables(InitializationSettings& settings);
-} // namespace Impl
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_PHYSICAL_LAYOUT_HPP
-#define KOKKOS_PHYSICAL_LAYOUT_HPP
-
-#include <Kokkos_View.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-struct PhysicalLayout {
- enum LayoutType { Left, Right, Scalar, Error };
- LayoutType layout_type;
- int rank;
- long long int stride[9]; // distance between two neighboring elements in a
- // given dimension
-
- template <class T, class L, class D, class M>
- PhysicalLayout(const View<T, L, D, M>& view)
- : layout_type(
- is_same<typename View<T, L, D, M>::array_layout, LayoutLeft>::value
- ? Left
- : (is_same<typename View<T, L, D, M>::array_layout,
- LayoutRight>::value
- ? Right
- : Error)),
- rank(view.Rank) {
- for (int i = 0; i < 9; i++) stride[i] = 0;
- view.stride(stride);
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-#endif
+++ /dev/null
-/*
- //@HEADER
- // ************************************************************************
- //
- // Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
- //
- // Under the terms of Contract DE-NA0003525 with NTESS,
- // the U.S. Government retains certain rights in this software.
- //
- // Redistribution and use in source and binary forms, with or without
- // modification, are permitted provided that the following conditions are
- // met:
- //
- // 1. Redistributions of source code must retain the above copyright
- // notice, this list of conditions and the following disclaimer.
- //
- // 2. Redistributions in binary form must reproduce the above copyright
- // notice, this list of conditions and the following disclaimer in the
- // documentation and/or other materials provided with the distribution.
- //
- // 3. Neither the name of the Corporation nor the names of the
- // contributors may be used to endorse or promote products derived from
- // this software without specific prior written permission.
- //
- // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
- // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
- // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- //
- // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
- //
- // ************************************************************************
- //@HEADER
-*/
-
-#ifndef KOKKOSP_DEVICE_INFO_HPP
-#define KOKKOSP_DEVICE_INFO_HPP
-
-#include <cstdint>
-#include <impl/Kokkos_Profiling_C_Interface.h>
-namespace Kokkos {
-namespace Profiling {
-using KokkosPDeviceInfo = Kokkos_Profiling_KokkosPDeviceInfo;
-} // namespace Profiling
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SPINWAIT_HPP
-#define KOKKOS_SPINWAIT_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <Kokkos_Atomic.hpp>
-
-#include <cstdint>
-
-#include <type_traits>
-
-namespace Kokkos {
-namespace Impl {
-
-enum class WaitMode : int {
- ACTIVE // Used for tight loops to keep threads active longest
- ,
- PASSIVE // Used to quickly yield the thread to quite down the system
- ,
- ROOT // Never sleep or yield the root thread
-};
-
-void host_thread_yield(const uint32_t i, const WaitMode mode);
-
-template <typename T>
-std::enable_if_t<std::is_integral<T>::value, void> root_spinwait_while_equal(
- T const volatile& flag, const T value) {
- Kokkos::store_fence();
- uint32_t i = 0;
- while (value == flag) {
- host_thread_yield(++i, WaitMode::ROOT);
- }
- Kokkos::load_fence();
-}
-
-template <typename T>
-std::enable_if_t<std::is_integral<T>::value, void> root_spinwait_until_equal(
- T const volatile& flag, const T value) {
- Kokkos::store_fence();
- uint32_t i = 0;
- while (value != flag) {
- host_thread_yield(++i, WaitMode::ROOT);
- }
- Kokkos::load_fence();
-}
-
-template <typename T>
-std::enable_if_t<std::is_integral<T>::value, void> spinwait_while_equal(
- T const volatile& flag, const T value) {
- Kokkos::store_fence();
- uint32_t i = 0;
- while (value == flag) {
- host_thread_yield(++i, WaitMode::ACTIVE);
- }
- Kokkos::load_fence();
-}
-
-template <typename T>
-std::enable_if_t<std::is_integral<T>::value, void> yield_while_equal(
- T const volatile& flag, const T value) {
- Kokkos::store_fence();
- uint32_t i = 0;
- while (value == flag) {
- host_thread_yield(++i, WaitMode::PASSIVE);
- }
- Kokkos::load_fence();
-}
-
-template <typename T>
-std::enable_if_t<std::is_integral<T>::value, void> spinwait_until_equal(
- T const volatile& flag, const T value) {
- Kokkos::store_fence();
- uint32_t i = 0;
- while (value != flag) {
- host_thread_yield(++i, WaitMode::ACTIVE);
- }
- Kokkos::load_fence();
-}
-
-template <typename T>
-std::enable_if_t<std::is_integral<T>::value, void> yield_until_equal(
- T const volatile& flag, const T value) {
- Kokkos::store_fence();
- uint32_t i = 0;
- while (value != flag) {
- host_thread_yield(++i, WaitMode::PASSIVE);
- }
- Kokkos::load_fence();
-}
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-
-#endif /* #ifndef KOKKOS_SPINWAIT_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP
-#define KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP
-
-#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_TASKDAG)
-
-#include <impl/Kokkos_TaskQueueMultiple.hpp>
-
-#define KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING_MULTIPLE 0
-
-namespace Kokkos {
-namespace Impl {
-
-template <class ExecSpace, class MemorySpace>
-void TaskQueueMultiple<ExecSpace,
- MemorySpace>::Destroy::destroy_shared_allocation() {
-// KOKKOS WORKAROUND for CUDA 10.1 with GCC 7.3.0
-#if (KOKKOS_COMPILER_CUDA_VERSION == 101) && defined(KOKKOS_COMPILER_NVCC) && \
- (KOKKOS_COMPILER_GNU >= 730)
- (*m_queue).get_team_queue(0).~TaskQueueMultiple();
-#else
- m_queue->get_team_queue(0).~TaskQueueMultiple();
-#endif
-}
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-
-#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
-#endif /* #ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_IMPLWALLTIME_HPP
-#define KOKKOS_IMPLWALLTIME_HPP
-
-#include <Kokkos_Macros.hpp>
-
-KOKKOS_IMPL_WARNING("This file is deprecated. Use <Kokkos_Timer.hpp> instead.")
-
-#include <Kokkos_Timer.hpp>
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-namespace Kokkos {
-namespace Impl {
-
-/** \brief Time since construction
- * Timer promoted from Impl to Kokkos ns
- * This file included for backwards compatibility
- */
-using Timer KOKKOS_DEPRECATED_WITH_COMMENT("Use Kokkos::Timer instead!") =
- Kokkos::Timer;
-
-} // namespace Impl
-} // namespace Kokkos
-#endif
-
-#endif /* #ifndef KOKKOS_IMPLWALLTIME_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-/**
- * Header file to include all of Kokkos Tooling support
- */
-
-#ifndef KOKKOS_IMPL_KOKKOS_TOOLS_HPP
-#define KOKKOS_IMPL_KOKKOS_TOOLS_HPP
-
-#include <impl/Kokkos_Profiling.hpp>
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_EXPERIMENTAL_VIEW_ARRAY_MAPPING_HPP
-#define KOKKOS_EXPERIMENTAL_VIEW_ARRAY_MAPPING_HPP
-
-#include <Kokkos_Array.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-template <class DataType, class ArrayLayout, class V, size_t N, class P>
-struct ViewDataAnalysis<DataType, ArrayLayout, Kokkos::Array<V, N, P>> {
- private:
- using array_analysis = ViewArrayAnalysis<DataType>;
-
- static_assert(std::is_void<P>::value, "");
- static_assert(std::is_same<typename array_analysis::non_const_value_type,
- Kokkos::Array<V, N, P>>::value,
- "");
- static_assert(std::is_scalar<V>::value,
- "View of Array type must be of a scalar type");
-
- public:
- using specialize = Kokkos::Array<>;
-
- using dimension = typename array_analysis::dimension;
-
- private:
- enum {
- is_const = std::is_same<typename array_analysis::value_type,
- typename array_analysis::const_value_type>::value
- };
-
- using array_scalar_dimension = typename dimension::template append<N>::type;
-
- using scalar_type = std::conditional_t<is_const, const V, V>;
- using non_const_scalar_type = V;
- using const_scalar_type = const V;
-
- public:
- using value_type = typename array_analysis::value_type;
- using const_value_type = typename array_analysis::const_value_type;
- using non_const_value_type = typename array_analysis::non_const_value_type;
-
- using type = typename ViewDataType<value_type, dimension>::type;
- using const_type = typename ViewDataType<const_value_type, dimension>::type;
- using non_const_type =
- typename ViewDataType<non_const_value_type, dimension>::type;
-
- using scalar_array_type =
- typename ViewDataType<scalar_type, array_scalar_dimension>::type;
- using const_scalar_array_type =
- typename ViewDataType<const_scalar_type, array_scalar_dimension>::type;
- using non_const_scalar_array_type =
- typename ViewDataType<non_const_scalar_type,
- array_scalar_dimension>::type;
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-/** \brief View mapping for non-specialized data type and standard layout */
-template <class Traits>
-class ViewMapping<Traits, Kokkos::Array<>> {
- private:
- template <class, class...>
- friend class ViewMapping;
- template <class, class...>
- friend class Kokkos::View;
-
- using offset_type = ViewOffset<typename Traits::dimension,
- typename Traits::array_layout, void>;
-
- using handle_type = typename Traits::value_type::pointer;
-
- handle_type m_impl_handle;
- offset_type m_impl_offset;
- size_t m_stride = 0;
-
- using scalar_type = typename Traits::value_type::value_type;
-
- using contiguous_reference = Kokkos::Array<scalar_type, (~std::size_t(0)),
- Kokkos::Array<>::contiguous>;
- using strided_reference =
- Kokkos::Array<scalar_type, (~std::size_t(0)), Kokkos::Array<>::strided>;
-
- enum {
- is_contiguous_reference =
- (Traits::rank == 0) || (std::is_same<typename Traits::array_layout,
- Kokkos::LayoutRight>::value)
- };
-
- enum { Array_N = Traits::value_type::size() };
- enum { Array_S = is_contiguous_reference ? Array_N : 1 };
-
- KOKKOS_INLINE_FUNCTION
- ViewMapping(const handle_type &arg_handle, const offset_type &arg_offset)
- : m_impl_handle(arg_handle),
- m_impl_offset(arg_offset),
- m_stride(is_contiguous_reference ? 0 : arg_offset.span()) {}
-
- public:
- //----------------------------------------
- // Domain dimensions
-
- enum { Rank = Traits::dimension::rank };
-
- template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr size_t extent(const iType &r) const {
- return m_impl_offset.m_dim.extent(r);
- }
-
- KOKKOS_INLINE_FUNCTION constexpr typename Traits::array_layout layout()
- const {
- return m_impl_offset.layout();
- }
-
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_0() const {
- return m_impl_offset.dimension_0();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_1() const {
- return m_impl_offset.dimension_1();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_2() const {
- return m_impl_offset.dimension_2();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_3() const {
- return m_impl_offset.dimension_3();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_4() const {
- return m_impl_offset.dimension_4();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_5() const {
- return m_impl_offset.dimension_5();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_6() const {
- return m_impl_offset.dimension_6();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t dimension_7() const {
- return m_impl_offset.dimension_7();
- }
-
- // Is a regular layout with uniform striding for each index.
- using is_regular = typename offset_type::is_regular;
-
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
- return m_impl_offset.stride_0();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
- return m_impl_offset.stride_1();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
- return m_impl_offset.stride_2();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
- return m_impl_offset.stride_3();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
- return m_impl_offset.stride_4();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
- return m_impl_offset.stride_5();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
- return m_impl_offset.stride_6();
- }
- KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
- return m_impl_offset.stride_7();
- }
-
- //----------------------------------------
- // Range span
-
- /** \brief Span of the mapped range */
- KOKKOS_INLINE_FUNCTION constexpr size_t span() const {
- return m_impl_offset.span() * Array_N;
- }
-
- /** \brief Is the mapped range span contiguous */
- KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
- return m_impl_offset.span_is_contiguous();
- }
-
- using reference_type =
- std::conditional_t<is_contiguous_reference, contiguous_reference,
- strided_reference>;
-
- using pointer_type = handle_type;
-
- /** \brief If data references are lvalue_reference than can query pointer to
- * memory */
- KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
- return m_impl_handle;
- }
-
- //----------------------------------------
- // The View class performs all rank and bounds checking before
- // calling these element reference methods.
-
- KOKKOS_FORCEINLINE_FUNCTION
- reference_type reference() const {
- return reference_type(m_impl_handle + 0, Array_N, 0);
- }
-
- template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0) const {
- return reference_type(m_impl_handle + m_impl_offset(i0) * Array_S, Array_N,
- m_stride);
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0,
- const I1 &i1) const {
- return reference_type(m_impl_handle + m_impl_offset(i0, i1) * Array_S,
- Array_N, m_stride);
- }
-
- template <typename I0, typename I1, typename I2>
- KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0,
- const I1 &i1,
- const I2 &i2) const {
- return reference_type(m_impl_handle + m_impl_offset(i0, i1, i2) * Array_S,
- Array_N, m_stride);
- }
-
- template <typename I0, typename I1, typename I2, typename I3>
- KOKKOS_FORCEINLINE_FUNCTION reference_type
- reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3) const {
- return reference_type(
- m_impl_handle + m_impl_offset(i0, i1, i2, i3) * Array_S, Array_N,
- m_stride);
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4>
- KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0,
- const I1 &i1,
- const I2 &i2,
- const I3 &i3,
- const I4 &i4) const {
- return reference_type(
- m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4) * Array_S, Array_N,
- m_stride);
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5>
- KOKKOS_FORCEINLINE_FUNCTION reference_type
- reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3,
- const I4 &i4, const I5 &i5) const {
- return reference_type(
- m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4, i5) * Array_S,
- Array_N, m_stride);
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6>
- KOKKOS_FORCEINLINE_FUNCTION reference_type
- reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3,
- const I4 &i4, const I5 &i5, const I6 &i6) const {
- return reference_type(
- m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4, i5, i6) * Array_S,
- Array_N, m_stride);
- }
-
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6, typename I7>
- KOKKOS_FORCEINLINE_FUNCTION reference_type
- reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3,
- const I4 &i4, const I5 &i5, const I6 &i6, const I7 &i7) const {
- return reference_type(
- m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4, i5, i6, i7) * Array_S,
- Array_N, m_stride);
- }
-
- //----------------------------------------
-
- private:
- enum { MemorySpanMask = 8 - 1 /* Force alignment on 8 byte boundary */ };
- enum { MemorySpanSize = sizeof(scalar_type) };
-
- public:
- /** \brief Span, in bytes, of the referenced memory */
- KOKKOS_INLINE_FUNCTION constexpr size_t memory_span() const {
- return (m_impl_offset.span() * Array_N * MemorySpanSize + MemorySpanMask) &
- ~size_t(MemorySpanMask);
- }
-
- //----------------------------------------
-
- KOKKOS_DEFAULTED_FUNCTION ViewMapping() = default;
-
- //----------------------------------------
-
- template <class... Args>
- KOKKOS_INLINE_FUNCTION ViewMapping(pointer_type ptr, Args... args)
- : m_impl_handle(ptr),
- m_impl_offset(std::integral_constant<unsigned, 0>(), args...),
- m_stride(m_impl_offset.span()) {}
-
- //----------------------------------------
-
- template <class... P>
- Kokkos::Impl::SharedAllocationRecord<> *allocate_shared(
- Kokkos::Impl::ViewCtorProp<P...> const &arg_prop,
- typename Traits::array_layout const &arg_layout,
- bool execution_space_specified) {
- using alloc_prop = Kokkos::Impl::ViewCtorProp<P...>;
-
- using execution_space = typename alloc_prop::execution_space;
- using memory_space = typename Traits::memory_space;
- using functor_type =
- ViewValueFunctor<typename Traits::device_type, scalar_type>;
- using record_type =
- Kokkos::Impl::SharedAllocationRecord<memory_space, functor_type>;
-
- // Query the mapping for byte-size of allocation.
- using padding = std::integral_constant<
- unsigned int, alloc_prop::allow_padding ? sizeof(scalar_type) : 0>;
-
- m_impl_offset = offset_type(padding(), arg_layout);
-
- const size_t alloc_size =
- (m_impl_offset.span() * Array_N * MemorySpanSize + MemorySpanMask) &
- ~size_t(MemorySpanMask);
- const auto &alloc_name =
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const &>(
- arg_prop)
- .value;
- const execution_space &exec_space =
- static_cast<Kokkos::Impl::ViewCtorProp<void, execution_space> const &>(
- arg_prop)
- .value;
- const memory_space &mem_space =
- static_cast<Kokkos::Impl::ViewCtorProp<void, memory_space> const &>(
- arg_prop)
- .value;
-
- // Allocate memory from the memory space and create tracking record.
- record_type *const record =
- execution_space_specified
- ? record_type::allocate(exec_space, mem_space, alloc_name,
- alloc_size)
- : record_type::allocate(mem_space, alloc_name, alloc_size);
-
- if (alloc_size) {
- m_impl_handle =
- handle_type(reinterpret_cast<pointer_type>(record->data()));
-
- if (alloc_prop::initialize) {
- // The functor constructs and destroys
- record->m_destroy =
- execution_space_specified
- ? functor_type(exec_space, (pointer_type)m_impl_handle,
- m_impl_offset.span() * Array_N, alloc_name)
- : functor_type((pointer_type)m_impl_handle,
- m_impl_offset.span() * Array_N, alloc_name);
-
- record->m_destroy.construct_shared_allocation();
- }
- }
-
- return record;
- }
-};
-
-/** \brief Assign Array to non-Array */
-
-template <class DstTraits, class SrcTraits>
-class ViewMapping<
- DstTraits, SrcTraits,
- std::enable_if_t<(
- std::is_same<typename DstTraits::memory_space,
- typename SrcTraits::memory_space>::value &&
- std::is_void<typename DstTraits::specialize>::value &&
- (std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutStride>::value) &&
- std::is_same<typename SrcTraits::specialize, Kokkos::Array<>>::value &&
- (std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutStride>::value))>> {
- public:
- // Can only convert to View::array_type
-
- enum {
- is_assignable_data_type =
- std::is_same<typename DstTraits::data_type,
- typename SrcTraits::scalar_array_type>::value &&
- (DstTraits::rank == SrcTraits::rank + 1)
- };
- enum {
- is_assignable =
- std::is_same<typename DstTraits::data_type,
- typename SrcTraits::scalar_array_type>::value &&
- std::is_same<typename DstTraits::array_layout,
- typename SrcTraits::array_layout>::value
- };
-
- using TrackType = Kokkos::Impl::SharedAllocationTracker;
- using DstType = ViewMapping<DstTraits, void>;
- using SrcType = ViewMapping<SrcTraits, Kokkos::Array<>>;
-
- KOKKOS_INLINE_FUNCTION
- static void assign(DstType &dst, const SrcType &src,
- const TrackType & /*src_track*/) {
- static_assert(is_assignable, "Can only convert to array_type");
-
- using dst_offset_type = typename DstType::offset_type;
-
- // Array dimension becomes the last dimension.
- // Arguments beyond the destination rank are ignored.
- if (src.span_is_contiguous()) { // not padded
- dst.m_impl_offset = dst_offset_type(
- std::integral_constant<unsigned, 0>(),
- typename DstTraits::array_layout(
- (0 < SrcType::Rank ? src.dimension_0()
- : SrcTraits::value_type::size()),
- (1 < SrcType::Rank ? src.dimension_1()
- : SrcTraits::value_type::size()),
- (2 < SrcType::Rank ? src.dimension_2()
- : SrcTraits::value_type::size()),
- (3 < SrcType::Rank ? src.dimension_3()
- : SrcTraits::value_type::size()),
- (4 < SrcType::Rank ? src.dimension_4()
- : SrcTraits::value_type::size()),
- (5 < SrcType::Rank ? src.dimension_5()
- : SrcTraits::value_type::size()),
- (6 < SrcType::Rank ? src.dimension_6()
- : SrcTraits::value_type::size()),
- (7 < SrcType::Rank ? src.dimension_7()
- : SrcTraits::value_type::size())));
- } else { // is padded
- using padded = std::integral_constant<
- unsigned int, sizeof(typename SrcTraits::value_type::value_type)>;
-
- dst.m_impl_offset = dst_offset_type(
- padded(), typename DstTraits::array_layout(
- (0 < SrcType::Rank ? src.dimension_0()
- : SrcTraits::value_type::size()),
- (1 < SrcType::Rank ? src.dimension_1()
- : SrcTraits::value_type::size()),
- (2 < SrcType::Rank ? src.dimension_2()
- : SrcTraits::value_type::size()),
- (3 < SrcType::Rank ? src.dimension_3()
- : SrcTraits::value_type::size()),
- (4 < SrcType::Rank ? src.dimension_4()
- : SrcTraits::value_type::size()),
- (5 < SrcType::Rank ? src.dimension_5()
- : SrcTraits::value_type::size()),
- (6 < SrcType::Rank ? src.dimension_6()
- : SrcTraits::value_type::size()),
- (7 < SrcType::Rank ? src.dimension_7()
- : SrcTraits::value_type::size())));
- }
-
- dst.m_impl_handle = src.m_impl_handle;
- }
-};
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-template <class SrcTraits, class... Args>
-class ViewMapping<
- std::enable_if_t<(
- std::is_same<typename SrcTraits::specialize, Kokkos::Array<>>::value &&
- (std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutStride>::value))>,
- SrcTraits, Args...> {
- private:
- static_assert(SrcTraits::rank == sizeof...(Args), "");
-
- enum : bool {
- R0 = is_integral_extent<0, Args...>::value,
- R1 = is_integral_extent<1, Args...>::value,
- R2 = is_integral_extent<2, Args...>::value,
- R3 = is_integral_extent<3, Args...>::value,
- R4 = is_integral_extent<4, Args...>::value,
- R5 = is_integral_extent<5, Args...>::value,
- R6 = is_integral_extent<6, Args...>::value,
- R7 = is_integral_extent<7, Args...>::value
- };
-
- enum {
- rank = unsigned(R0) + unsigned(R1) + unsigned(R2) + unsigned(R3) +
- unsigned(R4) + unsigned(R5) + unsigned(R6) + unsigned(R7)
- };
-
- // Whether right-most rank is a range.
- enum {
- R0_rev =
- 0 == SrcTraits::rank
- ? false
- : (1 == SrcTraits::rank
- ? R0
- : (2 == SrcTraits::rank
- ? R1
- : (3 == SrcTraits::rank
- ? R2
- : (4 == SrcTraits::rank
- ? R3
- : (5 == SrcTraits::rank
- ? R4
- : (6 == SrcTraits::rank
- ? R5
- : (7 == SrcTraits::rank
- ? R6
- : R7)))))))
- };
-
- // Subview's layout
- using array_layout =
- std::conditional_t<((rank == 0) ||
- (rank <= 2 && R0 &&
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value) ||
- (rank <= 2 && R0_rev &&
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value)),
- typename SrcTraits::array_layout,
- Kokkos::LayoutStride>;
-
- using value_type = typename SrcTraits::value_type;
-
- using data_type = std::conditional_t<
- rank == 0, value_type,
- std::conditional_t<
- rank == 1, value_type *,
- std::conditional_t<
- rank == 2, value_type **,
- std::conditional_t<
- rank == 3, value_type ***,
- std::conditional_t<
- rank == 4, value_type ****,
- std::conditional_t<
- rank == 5, value_type *****,
- std::conditional_t<
- rank == 6, value_type ******,
- std::conditional_t<rank == 7, value_type *******,
- value_type ********>>>>>>>>;
-
- public:
- using traits_type = Kokkos::ViewTraits<data_type, array_layout,
- typename SrcTraits::device_type,
- typename SrcTraits::memory_traits>;
-
- using type =
- Kokkos::View<data_type, array_layout, typename SrcTraits::device_type,
- typename SrcTraits::memory_traits>;
-
- KOKKOS_INLINE_FUNCTION
- static void assign(ViewMapping<traits_type, void> &dst,
- ViewMapping<SrcTraits, void> const &src, Args... args) {
- using DstType = ViewMapping<traits_type, void>;
-
- using dst_offset_type = typename DstType::offset_type;
- using dst_handle_type = typename DstType::handle_type;
-
- const SubviewExtents<SrcTraits::rank, rank> extents(src.m_impl_offset.m_dim,
- args...);
-
- dst.m_impl_offset = dst_offset_type(src.m_impl_offset, extents);
- dst.m_impl_handle = dst_handle_type(
- src.m_impl_handle +
- src.m_impl_offset(extents.domain_offset(0), extents.domain_offset(1),
- extents.domain_offset(2), extents.domain_offset(3),
- extents.domain_offset(4), extents.domain_offset(5),
- extents.domain_offset(6), extents.domain_offset(7)));
- }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #ifndef KOKKOS_EXPERIMENTAL_VIEW_ARRAY_MAPPING_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_EXPERIMENTAL_IMPL_VIEW_CTOR_PROP_HPP
-#define KOKKOS_EXPERIMENTAL_IMPL_VIEW_CTOR_PROP_HPP
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-struct WithoutInitializing_t {};
-struct AllowPadding_t {};
-
-template <typename>
-struct is_view_ctor_property : public std::false_type {};
-
-template <>
-struct is_view_ctor_property<WithoutInitializing_t> : public std::true_type {};
-
-template <>
-struct is_view_ctor_property<AllowPadding_t> : public std::true_type {};
-
-//----------------------------------------------------------------------------
-/**\brief Whether a type can be used for a view label */
-
-template <typename>
-struct is_view_label : public std::false_type {};
-
-template <>
-struct is_view_label<std::string> : public std::true_type {};
-
-template <unsigned N>
-struct is_view_label<char[N]> : public std::true_type {};
-
-template <unsigned N>
-struct is_view_label<const char[N]> : public std::true_type {};
-
-//----------------------------------------------------------------------------
-
-template <typename... P>
-struct ViewCtorProp;
-
-// Forward declare
-template <typename Specialize, typename T>
-struct CommonViewAllocProp;
-
-/* Dummy to allow for empty ViewCtorProp object
- */
-template <>
-struct ViewCtorProp<void> {};
-
-/* Common value_type stored as ViewCtorProp
- */
-template <typename Specialize, typename T>
-struct ViewCtorProp<void, CommonViewAllocProp<Specialize, T>> {
- ViewCtorProp() = default;
- ViewCtorProp(const ViewCtorProp &) = default;
- ViewCtorProp &operator=(const ViewCtorProp &) = default;
-
- using type = CommonViewAllocProp<Specialize, T>;
-
- KOKKOS_INLINE_FUNCTION
- ViewCtorProp(const type &arg) : value(arg) {}
- KOKKOS_INLINE_FUNCTION
- ViewCtorProp(type &&arg) : value(arg) {}
-
- type value;
-};
-
-/* std::integral_constant<unsigned,I> are dummy arguments
- * that avoid duplicate base class errors
- */
-template <unsigned I>
-struct ViewCtorProp<void, std::integral_constant<unsigned, I>> {
- ViewCtorProp() = default;
- ViewCtorProp(const ViewCtorProp &) = default;
- ViewCtorProp &operator=(const ViewCtorProp &) = default;
-
- template <typename P>
- KOKKOS_INLINE_FUNCTION ViewCtorProp(const P &) {}
-};
-
-/* Property flags have constexpr value */
-template <typename P>
-struct ViewCtorProp<
- std::enable_if_t<std::is_same<P, AllowPadding_t>::value ||
- std::is_same<P, WithoutInitializing_t>::value>,
- P> {
- ViewCtorProp() = default;
- ViewCtorProp(const ViewCtorProp &) = default;
- ViewCtorProp &operator=(const ViewCtorProp &) = default;
-
- using type = P;
-
- ViewCtorProp(const type &) {}
-
- type value = type();
-};
-
-/* Map input label type to std::string */
-template <typename Label>
-struct ViewCtorProp<std::enable_if_t<is_view_label<Label>::value>, Label> {
- ViewCtorProp() = default;
- ViewCtorProp(const ViewCtorProp &) = default;
- ViewCtorProp &operator=(const ViewCtorProp &) = default;
-
- using type = std::string;
-
- ViewCtorProp(const type &arg) : value(arg) {}
- ViewCtorProp(type &&arg) : value(arg) {}
-
- type value;
-};
-
-template <typename Space>
-struct ViewCtorProp<std::enable_if_t<Kokkos::is_memory_space<Space>::value ||
- Kokkos::is_execution_space<Space>::value>,
- Space> {
- ViewCtorProp() = default;
- ViewCtorProp(const ViewCtorProp &) = default;
- ViewCtorProp &operator=(const ViewCtorProp &) = default;
-
- using type = Space;
-
- ViewCtorProp(const type &arg) : value(arg) {}
-
- type value;
-};
-
-template <typename T>
-struct ViewCtorProp<void, T *> {
- ViewCtorProp() = default;
- ViewCtorProp(const ViewCtorProp &) = default;
- ViewCtorProp &operator=(const ViewCtorProp &) = default;
-
- using type = T *;
-
- KOKKOS_INLINE_FUNCTION
- ViewCtorProp(const type arg) : value(arg) {}
-
- type value;
-};
-
-// For some reason I don't understand I needed this specialization explicitly
-// for NVCC/MSVC
-template <typename T>
-struct ViewCtorProp<T *> {
- ViewCtorProp() = default;
- ViewCtorProp(const ViewCtorProp &) = default;
- ViewCtorProp &operator=(const ViewCtorProp &) = default;
-
- using type = T *;
-
- KOKKOS_INLINE_FUNCTION
- ViewCtorProp(const type arg) : value(arg) {}
-
- enum : bool { has_pointer = true };
- using pointer_type = type;
- type value;
-};
-
-// If we use `ViewCtorProp<Args...>` and `ViewCtorProp<void, Args>...` directly
-// in the parameter lists and base class initializers, respectively, as far as
-// we can tell MSVC 16.5.5+CUDA 10.2 thinks that `ViewCtorProp` refers to the
-// current instantiation, not the template itself, and gets all kinds of
-// confused. To work around this, we just use a couple of alias templates that
-// amount to the same thing.
-template <typename... Args>
-using view_ctor_prop_args = ViewCtorProp<Args...>;
-
-template <typename Arg>
-using view_ctor_prop_base = ViewCtorProp<void, Arg>;
-
-template <typename... P>
-struct ViewCtorProp : public ViewCtorProp<void, P>... {
- private:
- using var_memory_space =
- Kokkos::Impl::has_condition<void, Kokkos::is_memory_space, P...>;
-
- using var_execution_space =
- Kokkos::Impl::has_condition<void, Kokkos::is_execution_space, P...>;
-
- struct VOIDDUMMY {};
-
- using var_pointer =
- Kokkos::Impl::has_condition<VOIDDUMMY, std::is_pointer, P...>;
-
- public:
- /* Flags for the common properties */
- enum { has_memory_space = var_memory_space::value };
- enum { has_execution_space = var_execution_space::value };
- enum { has_pointer = var_pointer::value };
- enum { has_label = Kokkos::Impl::has_type<std::string, P...>::value };
- enum { allow_padding = Kokkos::Impl::has_type<AllowPadding_t, P...>::value };
- enum {
- initialize = !Kokkos::Impl::has_type<WithoutInitializing_t, P...>::value
- };
-
- using memory_space = typename var_memory_space::type;
- using execution_space = typename var_execution_space::type;
- using pointer_type = typename var_pointer::type;
-
- /* Copy from a matching argument list.
- * Requires std::is_same< P , ViewCtorProp< void , Args >::value ...
- */
- template <typename... Args>
- inline ViewCtorProp(Args const &... args) : ViewCtorProp<void, P>(args)... {}
-
- template <typename... Args>
- KOKKOS_INLINE_FUNCTION ViewCtorProp(pointer_type arg0, Args const &... args)
- : ViewCtorProp<void, pointer_type>(arg0),
- ViewCtorProp<void, typename ViewCtorProp<void, Args>::type>(args)... {}
-
- /* Copy from a matching property subset */
- KOKKOS_INLINE_FUNCTION ViewCtorProp(pointer_type arg0)
- : ViewCtorProp<void, pointer_type>(arg0) {}
-
- // If we use `ViewCtorProp<Args...>` and `ViewCtorProp<void, Args>...` here
- // directly, MSVC 16.5.5+CUDA 10.2 appears to think that `ViewCtorProp` refers
- // to the current instantiation, not the template itself, and gets all kinds
- // of confused. To work around this, we just use a couple of alias templates
- // that amount to the same thing.
- template <typename... Args>
- ViewCtorProp(view_ctor_prop_args<Args...> const &arg)
- : view_ctor_prop_base<Args>(
- static_cast<view_ctor_prop_base<Args> const &>(arg))... {
- // Suppress an unused argument warning that (at least at one point) would
- // show up if sizeof...(Args) == 0
- (void)arg;
- }
-};
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-namespace Impl {
-struct ViewAllocateWithoutInitializingBackwardCompat {};
-
-template <>
-struct ViewCtorProp<void, ViewAllocateWithoutInitializingBackwardCompat> {};
-
-// NOTE This specialization is meant to be used as the
-// ViewAllocateWithoutInitializing alias below. All it does is add a
-// constructor that takes the label as single argument.
-template <>
-struct ViewCtorProp<WithoutInitializing_t, std::string,
- ViewAllocateWithoutInitializingBackwardCompat>
- : ViewCtorProp<WithoutInitializing_t, std::string>,
- ViewCtorProp<void, ViewAllocateWithoutInitializingBackwardCompat> {
- ViewCtorProp(std::string label)
- : ViewCtorProp<WithoutInitializing_t, std::string>(
- WithoutInitializing_t(), std::move(label)) {}
-};
-} /* namespace Impl */
-
-using ViewAllocateWithoutInitializing =
- Impl::ViewCtorProp<Impl::WithoutInitializing_t, std::string,
- Impl::ViewAllocateWithoutInitializingBackwardCompat>;
-
-} /* namespace Kokkos */
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_EXPERIMENTAL_VIEWLAYOUTTILE_HPP
-#define KOKKOS_EXPERIMENTAL_VIEWLAYOUTTILE_HPP
-
-#include <Kokkos_Layout.hpp>
-#include <Kokkos_View.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-// View offset and mapping for tiled view's
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1>
-struct is_array_layout<Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, ArgN0, ArgN1, 0, 0, 0, 0, 0, 0, true>>
- : public std::true_type {};
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2>
-struct is_array_layout<Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, ArgN0, ArgN1, ArgN2, 0, 0, 0, 0, 0, true>>
- : public std::true_type {};
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2, unsigned ArgN3>
-struct is_array_layout<Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, 0, 0, 0, 0, true>>
- : public std::true_type {};
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4>
-struct is_array_layout<Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, 0, 0, 0, true>>
- : public std::true_type {};
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
- unsigned ArgN5>
-struct is_array_layout<Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, ArgN5, 0, 0, true>>
- : public std::true_type {};
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
- unsigned ArgN5, unsigned ArgN6>
-struct is_array_layout<Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, ArgN5, ArgN6, 0, true>>
- : public std::true_type {};
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
- unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
-struct is_array_layout<
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, ArgN0, ArgN1, ArgN2,
- ArgN3, ArgN4, ArgN5, ArgN6, ArgN7, true>>
- : public std::true_type {};
-
-template <class L>
-struct is_array_layout_tiled : public std::false_type {};
-
-template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
- unsigned ArgN5, unsigned ArgN6, unsigned ArgN7, bool IsPowerTwo>
-struct is_array_layout_tiled<Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, ArgN5, ArgN6, ArgN7,
- IsPowerTwo>> : public std::true_type {
-}; // Last template parameter "true" meaning this currently only supports
- // powers-of-two
-
-namespace Impl {
-
-template <class Dimension, class Layout>
-struct ViewOffset<
- Dimension, Layout,
- std::enable_if_t<((Dimension::rank <= 8) && (Dimension::rank >= 2) &&
- is_array_layout<Layout>::value &&
- is_array_layout_tiled<Layout>::value)>> {
- public:
- static constexpr Kokkos::Iterate outer_pattern = Layout::outer_pattern;
- static constexpr Kokkos::Iterate inner_pattern = Layout::inner_pattern;
-
- static constexpr int VORank = Dimension::rank;
-
- static constexpr unsigned SHIFT_0 =
- Kokkos::Impl::integral_power_of_two(Layout::N0);
- static constexpr unsigned SHIFT_1 =
- Kokkos::Impl::integral_power_of_two(Layout::N1);
- static constexpr unsigned SHIFT_2 =
- Kokkos::Impl::integral_power_of_two(Layout::N2);
- static constexpr unsigned SHIFT_3 =
- Kokkos::Impl::integral_power_of_two(Layout::N3);
- static constexpr unsigned SHIFT_4 =
- Kokkos::Impl::integral_power_of_two(Layout::N4);
- static constexpr unsigned SHIFT_5 =
- Kokkos::Impl::integral_power_of_two(Layout::N5);
- static constexpr unsigned SHIFT_6 =
- Kokkos::Impl::integral_power_of_two(Layout::N6);
- static constexpr unsigned SHIFT_7 =
- Kokkos::Impl::integral_power_of_two(Layout::N7);
- static constexpr int MASK_0 = Layout::N0 - 1;
- static constexpr int MASK_1 = Layout::N1 - 1;
- static constexpr int MASK_2 = Layout::N2 - 1;
- static constexpr int MASK_3 = Layout::N3 - 1;
- static constexpr int MASK_4 = Layout::N4 - 1;
- static constexpr int MASK_5 = Layout::N5 - 1;
- static constexpr int MASK_6 = Layout::N6 - 1;
- static constexpr int MASK_7 = Layout::N7 - 1;
-
- static constexpr unsigned SHIFT_2T = SHIFT_0 + SHIFT_1;
- static constexpr unsigned SHIFT_3T = SHIFT_0 + SHIFT_1 + SHIFT_2;
- static constexpr unsigned SHIFT_4T = SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3;
- static constexpr unsigned SHIFT_5T =
- SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4;
- static constexpr unsigned SHIFT_6T =
- SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4 + SHIFT_5;
- static constexpr unsigned SHIFT_7T =
- SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4 + SHIFT_5 + SHIFT_6;
- static constexpr unsigned SHIFT_8T = SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 +
- SHIFT_4 + SHIFT_5 + SHIFT_6 + SHIFT_7;
-
- // Is an irregular layout that does not have uniform striding for each index.
- using is_mapping_plugin = std::true_type;
- using is_regular = std::false_type;
-
- using size_type = size_t;
- using dimension_type = Dimension;
- using array_layout = Layout;
-
- dimension_type m_dim;
- size_type m_tile_N0; // Num tiles dim 0
- size_type m_tile_N1;
- size_type m_tile_N2;
- size_type m_tile_N3;
- size_type m_tile_N4;
- size_type m_tile_N5;
- size_type m_tile_N6;
- size_type m_tile_N7;
-
- //----------------------------------------
-
-#define DEBUG_OUTPUT_CHECK 0
-
- // Rank 2
- template <typename I0, typename I1>
- KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0,
- I1 const& i1) const {
- auto tile_offset =
- (outer_pattern == (Kokkos::Iterate::Left))
- ? (((i0 >> SHIFT_0) + m_tile_N0 * ((i1 >> SHIFT_1))) << SHIFT_2T)
- : (((m_tile_N1 * (i0 >> SHIFT_0) + (i1 >> SHIFT_1))) << SHIFT_2T);
- // ( num_tiles[1] * ti0 + ti1 ) * FTD
-
- auto local_offset = (inner_pattern == (Kokkos::Iterate::Left))
- ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0))
- : (((i0 & MASK_0) << SHIFT_1) + (i1 & MASK_1));
- // ( tile_dim[1] * li0 + li1 )
-
-#if DEBUG_OUTPUT_CHECK
- std::cout << "Am I Outer Left? "
- << (outer_pattern == (Kokkos::Iterate::Left)) << std::endl;
- std::cout << "Am I Inner Left? "
- << (inner_pattern == (Kokkos::Iterate::Left)) << std::endl;
- std::cout << "i0 = " << i0 << " i1 = " << i1
- << "\ntilei0 = " << (i0 >> SHIFT_0)
- << " tilei1 = " << (i1 >> SHIFT_1)
- << "locali0 = " << (i0 & MASK_0)
- << "\nlocali1 = " << (i1 & MASK_1) << std::endl;
-#endif
-
- return tile_offset + local_offset;
- }
-
- // Rank 3
- template <typename I0, typename I1, typename I2>
- KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
- I2 const& i2) const {
- auto tile_offset =
- (outer_pattern == Kokkos::Iterate::Left)
- ? (((i0 >> SHIFT_0) +
- m_tile_N0 * ((i1 >> SHIFT_1) + m_tile_N1 * (i2 >> SHIFT_2)))
- << SHIFT_3T)
- : ((m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) + (i1 >> SHIFT_1)) +
- (i2 >> SHIFT_2))
- << SHIFT_3T);
-
- auto local_offset = (inner_pattern == Kokkos::Iterate::Left)
- ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
- ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)))
- : (((i0 & MASK_0) << (SHIFT_2 + SHIFT_1)) +
- ((i1 & MASK_1) << (SHIFT_2)) + (i2 & MASK_2));
-
-#if DEBUG_OUTPUT_CHECK
- std::cout << "Am I Outer Left? "
- << (outer_pattern == (Kokkos::Iterate::Left)) << std::endl;
- std::cout << "Am I Inner Left? "
- << (inner_pattern == (Kokkos::Iterate::Left)) << std::endl;
- std::cout << "i0 = " << i0 << " i1 = " << i1 << " i2 = " << i2
- << "\ntilei0 = " << (i0 >> SHIFT_0)
- << " tilei1 = " << (i1 >> SHIFT_1)
- << " tilei2 = " << (i2 >> SHIFT_2)
- << "\nlocali0 = " << (i0 & MASK_0)
- << "locali1 = " << (i1 & MASK_1) << "locali2 = " << (i2 & MASK_2)
- << std::endl;
-#endif
-
- return tile_offset + local_offset;
- }
-
- // Rank 4
- template <typename I0, typename I1, typename I2, typename I3>
- KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
- I2 const& i2,
- I3 const& i3) const {
- auto tile_offset =
- (outer_pattern == Kokkos::Iterate::Left)
- ? (((i0 >> SHIFT_0) +
- m_tile_N0 * ((i1 >> SHIFT_1) +
- m_tile_N1 * ((i2 >> SHIFT_2) +
- m_tile_N2 * (i3 >> SHIFT_3))))
- << SHIFT_4T)
- : ((m_tile_N3 * (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
- (i1 >> SHIFT_1)) +
- (i2 >> SHIFT_2)) +
- (i3 >> SHIFT_3))
- << SHIFT_4T);
-
- auto local_offset =
- (inner_pattern == Kokkos::Iterate::Left)
- ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
- ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
- ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)))
- : (((i0 & MASK_0) << (SHIFT_3 + SHIFT_2 + SHIFT_1)) +
- ((i1 & MASK_1) << (SHIFT_3 + SHIFT_2)) +
- ((i2 & MASK_2) << (SHIFT_3)) + (i3 & MASK_3));
-
- return tile_offset + local_offset;
- }
-
- // Rank 5
- template <typename I0, typename I1, typename I2, typename I3, typename I4>
- KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
- I2 const& i2, I3 const& i3,
- I4 const& i4) const {
- auto tile_offset =
- (outer_pattern == Kokkos::Iterate::Left)
- ? (((i0 >> SHIFT_0) +
- m_tile_N0 *
- ((i1 >> SHIFT_1) +
- m_tile_N1 * ((i2 >> SHIFT_2) +
- m_tile_N2 * ((i3 >> SHIFT_3) +
- m_tile_N3 * (i4 >> SHIFT_4)))))
- << SHIFT_5T)
- : ((m_tile_N4 *
- (m_tile_N3 * (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
- (i1 >> SHIFT_1)) +
- (i2 >> SHIFT_2)) +
- (i3 >> SHIFT_3)) +
- (i4 >> SHIFT_4))
- << SHIFT_5T);
-
- auto local_offset =
- (inner_pattern == Kokkos::Iterate::Left)
- ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
- ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
- ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
- ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)))
- : (((i0 & MASK_0) << (SHIFT_4 + SHIFT_3 + SHIFT_2 + SHIFT_1)) +
- ((i1 & MASK_1) << (SHIFT_4 + SHIFT_3 + SHIFT_2)) +
- ((i2 & MASK_2) << (SHIFT_4 + SHIFT_3)) +
- ((i3 & MASK_3) << (SHIFT_4)) + (i4 & MASK_4));
-
- return tile_offset + local_offset;
- }
-
- // Rank 6
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5>
- KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
- I2 const& i2, I3 const& i3,
- I4 const& i4,
- I5 const& i5) const {
- auto tile_offset =
- (outer_pattern == Kokkos::Iterate::Left)
- ? (((i0 >> SHIFT_0) +
- m_tile_N0 *
- ((i1 >> SHIFT_1) +
- m_tile_N1 *
- ((i2 >> SHIFT_2) +
- m_tile_N2 *
- ((i3 >> SHIFT_3) +
- m_tile_N3 * ((i4 >> SHIFT_4) +
- m_tile_N4 * (i5 >> SHIFT_5))))))
- << SHIFT_6T)
- : ((m_tile_N5 *
- (m_tile_N4 *
- (m_tile_N3 *
- (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
- (i1 >> SHIFT_1)) +
- (i2 >> SHIFT_2)) +
- (i3 >> SHIFT_3)) +
- (i4 >> SHIFT_4)) +
- (i5 >> SHIFT_5))
- << SHIFT_6T);
-
- auto local_offset =
- (inner_pattern == Kokkos::Iterate::Left)
- ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
- ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
- ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
- ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)) +
- ((i5 & MASK_5)
- << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4)))
- : (((i0 & MASK_0)
- << (SHIFT_5 + SHIFT_4 + SHIFT_3 + SHIFT_2 + SHIFT_1)) +
- ((i1 & MASK_1) << (SHIFT_5 + SHIFT_4 + SHIFT_3 + SHIFT_2)) +
- ((i2 & MASK_2) << (SHIFT_5 + SHIFT_4 + SHIFT_3)) +
- ((i3 & MASK_3) << (SHIFT_5 + SHIFT_4)) +
- ((i4 & MASK_4) << (SHIFT_5)) + (i5 & MASK_5));
-
- return tile_offset + local_offset;
- }
-
- // Rank 7
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6>
- KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
- I2 const& i2, I3 const& i3,
- I4 const& i4, I5 const& i5,
- I6 const& i6) const {
- auto tile_offset =
- (outer_pattern == Kokkos::Iterate::Left)
- ? (((i0 >> SHIFT_0) +
- m_tile_N0 *
- ((i1 >> SHIFT_1) +
- m_tile_N1 *
- ((i2 >> SHIFT_2) +
- m_tile_N2 *
- ((i3 >> SHIFT_3) +
- m_tile_N3 *
- ((i4 >> SHIFT_4) +
- m_tile_N4 *
- ((i5 >> SHIFT_5) +
- m_tile_N5 * (i6 >> SHIFT_6)))))))
- << SHIFT_7T)
- : ((m_tile_N6 *
- (m_tile_N5 *
- (m_tile_N4 *
- (m_tile_N3 *
- (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
- (i1 >> SHIFT_1)) +
- (i2 >> SHIFT_2)) +
- (i3 >> SHIFT_3)) +
- (i4 >> SHIFT_4)) +
- (i5 >> SHIFT_5)) +
- (i6 >> SHIFT_6))
- << SHIFT_7T);
-
- auto local_offset =
- (inner_pattern == Kokkos::Iterate::Left)
- ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
- ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
- ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
- ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)) +
- ((i5 & MASK_5)
- << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4)) +
- ((i6 & MASK_6)
- << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4 + SHIFT_5)))
- : (((i0 & MASK_0) << (SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3 +
- SHIFT_2 + SHIFT_1)) +
- ((i1 & MASK_1)
- << (SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3 + SHIFT_2)) +
- ((i2 & MASK_2) << (SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3)) +
- ((i3 & MASK_3) << (SHIFT_6 + SHIFT_5 + SHIFT_4)) +
- ((i4 & MASK_4) << (SHIFT_6 + SHIFT_5)) +
- ((i5 & MASK_5) << (SHIFT_6)) + (i6 & MASK_6));
-
- return tile_offset + local_offset;
- }
-
- // Rank 8
- template <typename I0, typename I1, typename I2, typename I3, typename I4,
- typename I5, typename I6, typename I7>
- KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
- I2 const& i2, I3 const& i3,
- I4 const& i4, I5 const& i5,
- I6 const& i6,
- I7 const& i7) const {
- auto tile_offset =
- (outer_pattern == Kokkos::Iterate::Left)
- ? (((i0 >> SHIFT_0) +
- m_tile_N0 *
- ((i1 >> SHIFT_1) +
- m_tile_N1 *
- ((i2 >> SHIFT_2) +
- m_tile_N2 *
- ((i3 >> SHIFT_3) +
- m_tile_N3 *
- ((i4 >> SHIFT_4) +
- m_tile_N4 *
- ((i5 >> SHIFT_5) +
- m_tile_N5 *
- ((i6 >> SHIFT_6) +
- m_tile_N6 * (i7 >> SHIFT_7))))))))
- << SHIFT_8T)
- : ((m_tile_N7 *
- (m_tile_N6 *
- (m_tile_N5 *
- (m_tile_N4 *
- (m_tile_N3 *
- (m_tile_N2 *
- (m_tile_N1 * (i0 >> SHIFT_0) +
- (i1 >> SHIFT_1)) +
- (i2 >> SHIFT_2)) +
- (i3 >> SHIFT_3)) +
- (i4 >> SHIFT_4)) +
- (i5 >> SHIFT_5)) +
- (i6 >> SHIFT_6)) +
- (i7 >> SHIFT_7))
- << SHIFT_8T);
-
- auto local_offset =
- (inner_pattern == Kokkos::Iterate::Left)
- ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
- ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
- ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
- ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)) +
- ((i5 & MASK_5)
- << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4)) +
- ((i6 & MASK_6) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 +
- SHIFT_4 + SHIFT_5)) +
- ((i7 & MASK_7) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 +
- SHIFT_4 + SHIFT_5 + SHIFT_6)))
- : (((i0 & MASK_0) << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4 +
- SHIFT_3 + SHIFT_2 + SHIFT_1)) +
- ((i1 & MASK_1) << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4 +
- SHIFT_3 + SHIFT_2)) +
- ((i2 & MASK_2)
- << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3)) +
- ((i3 & MASK_3) << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4)) +
- ((i4 & MASK_4) << (SHIFT_7 + SHIFT_6 + SHIFT_5)) +
- ((i5 & MASK_5) << (SHIFT_7 + SHIFT_6)) +
- ((i6 & MASK_6) << (SHIFT_7)) + (i7 & MASK_7));
-
- return tile_offset + local_offset;
- }
-
- //----------------------------------------
-
- KOKKOS_INLINE_FUNCTION constexpr array_layout layout() const {
- return array_layout((VORank > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
- (VORank > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
- (VORank > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
- (VORank > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
- (VORank > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
- (VORank > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
- (VORank > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
- (VORank > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
- }
-
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
- return m_dim.N0;
- }
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_1() const {
- return m_dim.N1;
- }
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_2() const {
- return m_dim.N2;
- }
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_3() const {
- return m_dim.N3;
- }
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_4() const {
- return m_dim.N4;
- }
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_5() const {
- return m_dim.N5;
- }
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_6() const {
- return m_dim.N6;
- }
- KOKKOS_INLINE_FUNCTION constexpr size_type dimension_7() const {
- return m_dim.N7;
- }
-
- KOKKOS_INLINE_FUNCTION constexpr size_type size() const {
- return m_dim.N0 * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 * m_dim.N5 *
- m_dim.N6 * m_dim.N7;
- }
-
- // Strides are meaningless due to irregularity
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const { return 0; }
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const { return 0; }
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const { return 0; }
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const { return 0; }
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const { return 0; }
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const { return 0; }
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_6() const { return 0; }
- KOKKOS_INLINE_FUNCTION constexpr size_type stride_7() const { return 0; }
-
- // Stride with [ rank ] value is the total length
- template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
- s[0] = 0;
- if (0 < dimension_type::rank) {
- s[1] = 0;
- }
- if (1 < dimension_type::rank) {
- s[2] = 0;
- }
- if (2 < dimension_type::rank) {
- s[3] = 0;
- }
- if (3 < dimension_type::rank) {
- s[4] = 0;
- }
- if (4 < dimension_type::rank) {
- s[5] = 0;
- }
- if (5 < dimension_type::rank) {
- s[6] = 0;
- }
- if (6 < dimension_type::rank) {
- s[7] = 0;
- }
- if (7 < dimension_type::rank) {
- s[8] = 0;
- }
- }
-
- KOKKOS_INLINE_FUNCTION constexpr size_type span() const {
- // Rank2: ( NumTile0 * ( NumTile1 ) ) * TileSize, etc
- return (VORank == 2)
- ? (m_tile_N0 * m_tile_N1) << SHIFT_2T
- : (VORank == 3)
- ? (m_tile_N0 * m_tile_N1 * m_tile_N2) << SHIFT_3T
- : (VORank == 4)
- ? (m_tile_N0 * m_tile_N1 * m_tile_N2 * m_tile_N3)
- << SHIFT_4T
- : (VORank == 5)
- ? (m_tile_N0 * m_tile_N1 * m_tile_N2 *
- m_tile_N3 * m_tile_N4)
- << SHIFT_5T
- : (VORank == 6)
- ? (m_tile_N0 * m_tile_N1 * m_tile_N2 *
- m_tile_N3 * m_tile_N4 * m_tile_N5)
- << SHIFT_6T
- : (VORank == 7)
- ? (m_tile_N0 * m_tile_N1 *
- m_tile_N2 * m_tile_N3 *
- m_tile_N4 * m_tile_N5 *
- m_tile_N6)
- << SHIFT_7T
- : (m_tile_N0 * m_tile_N1 *
- m_tile_N2 * m_tile_N3 *
- m_tile_N4 * m_tile_N5 *
- m_tile_N6 * m_tile_N7)
- << SHIFT_8T;
- }
-
- KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
- return true;
- }
-
- //----------------------------------------
-#ifdef KOKKOS_IMPL_WINDOWS_CUDA
- KOKKOS_FUNCTION ViewOffset() {}
- KOKKOS_FUNCTION ViewOffset(const ViewOffset& src) {
- m_dim = src.m_dim;
- m_tile_N0 = src.m_tile_N0;
- m_tile_N1 = src.m_tile_N1;
- m_tile_N2 = src.m_tile_N2;
- m_tile_N3 = src.m_tile_N3;
- m_tile_N4 = src.m_tile_N4;
- m_tile_N5 = src.m_tile_N5;
- m_tile_N6 = src.m_tile_N6;
- m_tile_N7 = src.m_tile_N7;
- }
- KOKKOS_FUNCTION ViewOffset& operator=(const ViewOffset& src) {
- m_dim = src.m_dim;
- m_tile_N0 = src.m_tile_N0;
- m_tile_N1 = src.m_tile_N1;
- m_tile_N2 = src.m_tile_N2;
- m_tile_N3 = src.m_tile_N3;
- m_tile_N4 = src.m_tile_N4;
- m_tile_N5 = src.m_tile_N5;
- m_tile_N6 = src.m_tile_N6;
- m_tile_N7 = src.m_tile_N7;
- return *this;
- }
-#else
- KOKKOS_DEFAULTED_FUNCTION ~ViewOffset() = default;
- KOKKOS_DEFAULTED_FUNCTION ViewOffset() = default;
- KOKKOS_DEFAULTED_FUNCTION ViewOffset(const ViewOffset&) = default;
- KOKKOS_DEFAULTED_FUNCTION ViewOffset& operator=(const ViewOffset&) = default;
-#endif
-
- template <unsigned TrivialScalarSize>
- KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
- std::integral_constant<unsigned, TrivialScalarSize> const&,
- array_layout const arg_layout)
- : m_dim(arg_layout.dimension[0], arg_layout.dimension[1],
- arg_layout.dimension[2], arg_layout.dimension[3],
- arg_layout.dimension[4], arg_layout.dimension[5],
- arg_layout.dimension[6], arg_layout.dimension[7]),
- m_tile_N0((arg_layout.dimension[0] + MASK_0) >>
- SHIFT_0 /* number of tiles in first dimension */),
- m_tile_N1((arg_layout.dimension[1] + MASK_1) >> SHIFT_1),
- m_tile_N2((VORank > 2) ? (arg_layout.dimension[2] + MASK_2) >> SHIFT_2
- : 0),
- m_tile_N3((VORank > 3) ? (arg_layout.dimension[3] + MASK_3) >> SHIFT_3
- : 0),
- m_tile_N4((VORank > 4) ? (arg_layout.dimension[4] + MASK_4) >> SHIFT_4
- : 0),
- m_tile_N5((VORank > 5) ? (arg_layout.dimension[5] + MASK_5) >> SHIFT_5
- : 0),
- m_tile_N6((VORank > 6) ? (arg_layout.dimension[6] + MASK_6) >> SHIFT_6
- : 0),
- m_tile_N7((VORank > 7) ? (arg_layout.dimension[7] + MASK_7) >> SHIFT_7
- : 0) {}
-};
-
-// FIXME Remove the out-of-class definitions when we require C++17
-#define KOKKOS_ITERATE_VIEW_OFFSET_ENABLE \
- std::enable_if_t<((Dimension::rank <= 8) && (Dimension::rank >= 2) && \
- is_array_layout<Layout>::value && \
- is_array_layout_tiled<Layout>::value)>
-template <class Dimension, class Layout>
-constexpr Kokkos::Iterate ViewOffset<
- Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::outer_pattern;
-template <class Dimension, class Layout>
-constexpr Kokkos::Iterate ViewOffset<
- Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::inner_pattern;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::VORank;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_0;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_1;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_2;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_3;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_4;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_5;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_6;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_7;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_0;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_1;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_2;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_3;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_4;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_5;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_6;
-template <class Dimension, class Layout>
-constexpr int
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_7;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_2T;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_3T;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_4T;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_5T;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_6T;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_7T;
-template <class Dimension, class Layout>
-constexpr unsigned
- ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_8T;
-#undef KOKKOS_ITERATE_VIEW_OFFSET_ENABLE
-
-//----------------------------------------
-
-// ViewMapping assign method needed in order to return a 'subview' tile as a
-// proper View The outer iteration pattern determines the mapping of the pointer
-// offset to the beginning of requested tile The inner iteration pattern is
-// needed for the layout of the tile's View to be returned Rank 2
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
- typename iType1>
-class ViewMapping<std::enable_if_t<(N2 == 0 && N3 == 0 && N4 == 0 && N5 == 0 &&
- N6 == 0 && N7 == 0)> // void
- ,
- Kokkos::ViewTraits<
- T**,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
- N3, N4, N5, N6, N7, true>,
- iType0, iType1> {
- public:
- using src_layout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
- using src_traits = Kokkos::ViewTraits<T**, src_layout, P...>;
-
- static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
- static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
- using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using traits = Kokkos::ViewTraits<T[N0][N1], array_layout, P...>;
- using type = Kokkos::View<T[N0][N1], array_layout, P...>;
-
- KOKKOS_INLINE_FUNCTION static void assign(
- ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
- const src_layout&, const iType0 i_tile0, const iType1 i_tile1) {
- using dst_map_type = ViewMapping<traits, void>;
- using src_map_type = ViewMapping<src_traits, void>;
- using dst_handle_type = typename dst_map_type::handle_type;
- using dst_offset_type = typename dst_map_type::offset_type;
- using src_offset_type = typename src_map_type::offset_type;
-
- dst = dst_map_type(
- dst_handle_type(
- src.m_impl_handle +
- (is_outer_left ? ((i_tile0 + src.m_impl_offset.m_tile_N0 * i_tile1)
- << src_offset_type::SHIFT_2T)
- : ((src.m_impl_offset.m_tile_N1 * i_tile0 + i_tile1)
- << src_offset_type::SHIFT_2T)) // offset to start
- // of the tile
- ),
- dst_offset_type());
- }
-};
-
-// Rank 3
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
- typename iType1, typename iType2>
-class ViewMapping<std::enable_if_t<(N3 == 0 && N4 == 0 && N5 == 0 && N6 == 0 &&
- N7 == 0)> // void
- ,
- Kokkos::ViewTraits<
- T***,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
- N3, N4, N5, N6, N7, true>,
- iType0, iType1, iType2> {
- public:
- using src_layout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
- using src_traits = Kokkos::ViewTraits<T***, src_layout, P...>;
-
- static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
- static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
- using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using traits = Kokkos::ViewTraits<T[N0][N1][N2], array_layout, P...>;
- using type = Kokkos::View<T[N0][N1][N2], array_layout, P...>;
-
- KOKKOS_INLINE_FUNCTION static void assign(
- ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
- const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
- const iType2 i_tile2) {
- using dst_map_type = ViewMapping<traits, void>;
- using src_map_type = ViewMapping<src_traits, void>;
- using dst_handle_type = typename dst_map_type::handle_type;
- using dst_offset_type = typename dst_map_type::offset_type;
- using src_offset_type = typename src_map_type::offset_type;
-
- dst = dst_map_type(
- dst_handle_type(
- src.m_impl_handle +
- (is_outer_left
- ? ((i_tile0 +
- src.m_impl_offset.m_tile_N0 *
- (i_tile1 + src.m_impl_offset.m_tile_N1 * i_tile2))
- << src_offset_type::SHIFT_3T)
- : ((src.m_impl_offset.m_tile_N2 *
- (src.m_impl_offset.m_tile_N1 * i_tile0 + i_tile1) +
- i_tile2)
- << src_offset_type::SHIFT_3T))) // offset to start of the
- // tile
- ,
- dst_offset_type());
- }
-};
-
-// Rank 4
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
- typename iType1, typename iType2, typename iType3>
-class ViewMapping<
- std::enable_if_t<(N4 == 0 && N5 == 0 && N6 == 0 && N7 == 0)> // void
- ,
- Kokkos::ViewTraits<
- T****,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4,
- N5, N6, N7, true>,
- P...>,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>,
- iType0, iType1, iType2, iType3> {
- public:
- using src_layout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
- using src_traits = Kokkos::ViewTraits<T****, src_layout, P...>;
-
- static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
- static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
- using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using traits = Kokkos::ViewTraits<T[N0][N1][N2][N3], array_layout, P...>;
- using type = Kokkos::View<T[N0][N1][N2][N3], array_layout, P...>;
-
- KOKKOS_INLINE_FUNCTION static void assign(
- ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
- const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
- const iType2 i_tile2, const iType3 i_tile3) {
- using dst_map_type = ViewMapping<traits, void>;
- using src_map_type = ViewMapping<src_traits, void>;
- using dst_handle_type = typename dst_map_type::handle_type;
- using dst_offset_type = typename dst_map_type::offset_type;
- using src_offset_type = typename src_map_type::offset_type;
-
- dst = dst_map_type(
- dst_handle_type(
- src.m_impl_handle +
- (is_outer_left
- ? ((i_tile0 +
- src.m_impl_offset.m_tile_N0 *
- (i_tile1 + src.m_impl_offset.m_tile_N1 *
- (i_tile2 + src.m_impl_offset.m_tile_N2 *
- i_tile3)))
- << src_offset_type::SHIFT_4T)
- : ((src.m_impl_offset.m_tile_N3 *
- (src.m_impl_offset.m_tile_N2 *
- (src.m_impl_offset.m_tile_N1 * i_tile0 +
- i_tile1) +
- i_tile2) +
- i_tile3)
- << src_offset_type::SHIFT_4T))) // offset to start of the
- // tile
- ,
- dst_offset_type());
- }
-};
-
-// Rank 5
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
- typename iType1, typename iType2, typename iType3, typename iType4>
-class ViewMapping<std::enable_if_t<(N5 == 0 && N6 == 0 && N7 == 0)> // void
- ,
- Kokkos::ViewTraits<
- T*****,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
- N3, N4, N5, N6, N7, true>,
- iType0, iType1, iType2, iType3, iType4> {
- public:
- using src_layout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
- using src_traits = Kokkos::ViewTraits<T*****, src_layout, P...>;
-
- static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
- static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
- using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using traits = Kokkos::ViewTraits<T[N0][N1][N2][N3][N4], array_layout, P...>;
- using type = Kokkos::View<T[N0][N1][N2][N3][N4], array_layout, P...>;
-
- KOKKOS_INLINE_FUNCTION static void assign(
- ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
- const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
- const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4) {
- using dst_map_type = ViewMapping<traits, void>;
- using src_map_type = ViewMapping<src_traits, void>;
- using dst_handle_type = typename dst_map_type::handle_type;
- using dst_offset_type = typename dst_map_type::offset_type;
- using src_offset_type = typename src_map_type::offset_type;
-
- dst = dst_map_type(
- dst_handle_type(
- src.m_impl_handle +
- (is_outer_left
- ? ((i_tile0 +
- src.m_impl_offset.m_tile_N0 *
- (i_tile1 +
- src.m_impl_offset.m_tile_N1 *
- (i_tile2 +
- src.m_impl_offset.m_tile_N2 *
- (i_tile3 +
- src.m_impl_offset.m_tile_N3 * i_tile4))))
- << src_offset_type::SHIFT_5T)
- : ((src.m_impl_offset.m_tile_N4 *
- (src.m_impl_offset.m_tile_N3 *
- (src.m_impl_offset.m_tile_N2 *
- (src.m_impl_offset.m_tile_N1 * i_tile0 +
- i_tile1) +
- i_tile2) +
- i_tile3) +
- i_tile4)
- << src_offset_type::SHIFT_5T))) // offset to start of the
- // tile
- ,
- dst_offset_type());
- }
-};
-
-// Rank 6
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
- typename iType1, typename iType2, typename iType3, typename iType4,
- typename iType5>
-class ViewMapping<std::enable_if_t<(N6 == 0 && N7 == 0)> // void
- ,
- Kokkos::ViewTraits<
- T******,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
- N3, N4, N5, N6, N7, true>,
- iType0, iType1, iType2, iType3, iType4, iType5> {
- public:
- using src_layout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
- using src_traits = Kokkos::ViewTraits<T******, src_layout, P...>;
-
- static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
- static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
- using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using traits =
- Kokkos::ViewTraits<T[N0][N1][N2][N3][N4][N5], array_layout, P...>;
- using type = Kokkos::View<T[N0][N1][N2][N3][N4][N5], array_layout, P...>;
-
- KOKKOS_INLINE_FUNCTION static void assign(
- ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
- const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
- const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4,
- const iType5 i_tile5) {
- using dst_map_type = ViewMapping<traits, void>;
- using src_map_type = ViewMapping<src_traits, void>;
- using dst_handle_type = typename dst_map_type::handle_type;
- using dst_offset_type = typename dst_map_type::offset_type;
- using src_offset_type = typename src_map_type::offset_type;
-
- dst = dst_map_type(
- dst_handle_type(
- src.m_impl_handle +
- (is_outer_left
- ? ((i_tile0 +
- src.m_impl_offset.m_tile_N0 *
- (i_tile1 +
- src.m_impl_offset.m_tile_N1 *
- (i_tile2 +
- src.m_impl_offset.m_tile_N2 *
- (i_tile3 +
- src.m_impl_offset.m_tile_N3 *
- (i_tile4 + src.m_impl_offset.m_tile_N4 *
- i_tile5)))))
- << src_offset_type::SHIFT_6T)
- : ((src.m_impl_offset.m_tile_N5 *
- (src.m_impl_offset.m_tile_N4 *
- (src.m_impl_offset.m_tile_N3 *
- (src.m_impl_offset.m_tile_N2 *
- (src.m_impl_offset.m_tile_N1 * i_tile0 +
- i_tile1) +
- i_tile2) +
- i_tile3) +
- i_tile4) +
- i_tile5)
- << src_offset_type::SHIFT_6T))) // offset to start of the
- // tile
- ,
- dst_offset_type());
- }
-};
-
-// Rank 7
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
- typename iType1, typename iType2, typename iType3, typename iType4,
- typename iType5, typename iType6>
-class ViewMapping<std::enable_if_t<(N7 == 0)> // void
- ,
- Kokkos::ViewTraits<
- T*******,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
- N3, N4, N5, N6, N7, true>,
- iType0, iType1, iType2, iType3, iType4, iType5, iType6> {
- public:
- using src_layout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
- using src_traits = Kokkos::ViewTraits<T*******, src_layout, P...>;
-
- static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
- static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
- using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using traits =
- Kokkos::ViewTraits<T[N0][N1][N2][N3][N4][N5][N6], array_layout, P...>;
- using type = Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6], array_layout, P...>;
-
- KOKKOS_INLINE_FUNCTION static void assign(
- ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
- const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
- const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4,
- const iType5 i_tile5, const iType6 i_tile6) {
- using dst_map_type = ViewMapping<traits, void>;
- using src_map_type = ViewMapping<src_traits, void>;
- using dst_handle_type = typename dst_map_type::handle_type;
- using dst_offset_type = typename dst_map_type::offset_type;
- using src_offset_type = typename src_map_type::offset_type;
-
- dst = dst_map_type(
- dst_handle_type(
- src.m_impl_handle +
- (is_outer_left
- ? ((i_tile0 +
- src.m_impl_offset.m_tile_N0 *
- (i_tile1 +
- src.m_impl_offset.m_tile_N1 *
- (i_tile2 +
- src.m_impl_offset.m_tile_N2 *
- (i_tile3 +
- src.m_impl_offset.m_tile_N3 *
- (i_tile4 +
- src.m_impl_offset.m_tile_N4 *
- (i_tile5 +
- src.m_impl_offset.m_tile_N5 *
- i_tile6))))))
- << src_offset_type::SHIFT_7T)
- : ((src.m_impl_offset.m_tile_N6 *
- (src.m_impl_offset.m_tile_N5 *
- (src.m_impl_offset.m_tile_N4 *
- (src.m_impl_offset.m_tile_N3 *
- (src.m_impl_offset.m_tile_N2 *
- (src.m_impl_offset.m_tile_N1 *
- i_tile0 +
- i_tile1) +
- i_tile2) +
- i_tile3) +
- i_tile4) +
- i_tile5) +
- i_tile6)
- << src_offset_type::SHIFT_7T))) // offset to start of the
- // tile
- ,
- dst_offset_type());
- }
-};
-
-// Rank 8
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
- typename iType1, typename iType2, typename iType3, typename iType4,
- typename iType5, typename iType6, typename iType7>
-class ViewMapping<
- std::enable_if_t<(N0 != 0 && N1 != 0 && N2 != 0 && N3 != 0 && N4 != 0 &&
- N5 != 0 && N6 != 0 && N7 != 0)> // void
- ,
- Kokkos::ViewTraits<
- T********,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4,
- N5, N6, N7, true>,
- P...>,
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>,
- iType0, iType1, iType2, iType3, iType4, iType5, iType6, iType7> {
- public:
- using src_layout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
- using src_traits = Kokkos::ViewTraits<T********, src_layout, P...>;
-
- static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
- static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
- using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using traits =
- Kokkos::ViewTraits<T[N0][N1][N2][N3][N4][N5][N6][N7], array_layout, P...>;
- using type =
- Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6][N7], array_layout, P...>;
-
- KOKKOS_INLINE_FUNCTION static void assign(
- ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
- const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
- const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4,
- const iType5 i_tile5, const iType6 i_tile6, const iType7 i_tile7) {
- using dst_map_type = ViewMapping<traits, void>;
- using src_map_type = ViewMapping<src_traits, void>;
- using dst_handle_type = typename dst_map_type::handle_type;
- using dst_offset_type = typename dst_map_type::offset_type;
- using src_offset_type = typename src_map_type::offset_type;
-
- dst = dst_map_type(
- dst_handle_type(
- src.m_impl_handle +
- (is_outer_left
- ? ((i_tile0 +
- src.m_impl_offset.m_tile_N0 *
- (i_tile1 +
- src.m_impl_offset.m_tile_N1 *
- (i_tile2 +
- src.m_impl_offset.m_tile_N2 *
- (i_tile3 +
- src.m_impl_offset.m_tile_N3 *
- (i_tile4 +
- src.m_impl_offset.m_tile_N4 *
- (i_tile5 +
- src.m_impl_offset.m_tile_N5 *
- (i_tile6 +
- src.m_impl_offset.m_tile_N6 *
- i_tile7)))))))
- << src_offset_type::SHIFT_8T)
- : ((src.m_impl_offset.m_tile_N7 *
- (src.m_impl_offset.m_tile_N6 *
- (src.m_impl_offset.m_tile_N5 *
- (src.m_impl_offset.m_tile_N4 *
- (src.m_impl_offset.m_tile_N3 *
- (src.m_impl_offset.m_tile_N2 *
- (src.m_impl_offset.m_tile_N1 *
- i_tile0 +
- i_tile1) +
- i_tile2) +
- i_tile3) +
- i_tile4) +
- i_tile5) +
- i_tile6) +
- i_tile7)
- << src_offset_type::SHIFT_8T))) // offset to start of the
- // tile
- ,
- dst_offset_type());
- }
-};
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-
-//----------------------------------------
-
-namespace Kokkos {
-
-// Rank 2
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P>
-KOKKOS_INLINE_FUNCTION
- Kokkos::View<T[N0][N1],
- std::conditional_t<(InnerP == Kokkos::Iterate::Left),
- Kokkos::LayoutLeft, Kokkos::LayoutRight>,
- P...>
- tile_subview(const Kokkos::View<
- T**,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>& src,
- const size_t i_tile0, const size_t i_tile1) {
- // Force the specialized ViewMapping for extracting a tile
- // by using the first subview argument as the layout.
- using array_layout =
- std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using SrcLayout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
-
- return Kokkos::View<T[N0][N1], array_layout, P...>(src, SrcLayout(), i_tile0,
- i_tile1);
-}
-
-// Rank 3
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P>
-KOKKOS_INLINE_FUNCTION
- Kokkos::View<T[N0][N1][N2],
- std::conditional_t<(InnerP == Kokkos::Iterate::Left),
- Kokkos::LayoutLeft, Kokkos::LayoutRight>,
- P...>
- tile_subview(const Kokkos::View<
- T***,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>& src,
- const size_t i_tile0, const size_t i_tile1,
- const size_t i_tile2) {
- // Force the specialized ViewMapping for extracting a tile
- // by using the first subview argument as the layout.
- using array_layout =
- std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using SrcLayout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
-
- return Kokkos::View<T[N0][N1][N2], array_layout, P...>(
- src, SrcLayout(), i_tile0, i_tile1, i_tile2);
-}
-
-// Rank 4
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P>
-KOKKOS_INLINE_FUNCTION
- Kokkos::View<T[N0][N1][N2][N3],
- std::conditional_t<(InnerP == Kokkos::Iterate::Left),
- Kokkos::LayoutLeft, Kokkos::LayoutRight>,
- P...>
- tile_subview(const Kokkos::View<
- T****,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>& src,
- const size_t i_tile0, const size_t i_tile1,
- const size_t i_tile2, const size_t i_tile3) {
- // Force the specialized ViewMapping for extracting a tile
- // by using the first subview argument as the layout.
- using array_layout =
- std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using SrcLayout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
-
- return Kokkos::View<T[N0][N1][N2][N3], array_layout, P...>(
- src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3);
-}
-
-// Rank 5
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P>
-KOKKOS_INLINE_FUNCTION
- Kokkos::View<T[N0][N1][N2][N3][N4],
- std::conditional_t<(InnerP == Kokkos::Iterate::Left),
- Kokkos::LayoutLeft, Kokkos::LayoutRight>,
- P...>
- tile_subview(const Kokkos::View<
- T*****,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>& src,
- const size_t i_tile0, const size_t i_tile1,
- const size_t i_tile2, const size_t i_tile3,
- const size_t i_tile4) {
- // Force the specialized ViewMapping for extracting a tile
- // by using the first subview argument as the layout.
- using array_layout =
- std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using SrcLayout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
-
- return Kokkos::View<T[N0][N1][N2][N3][N4], array_layout, P...>(
- src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4);
-}
-
-// Rank 6
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P>
-KOKKOS_INLINE_FUNCTION
- Kokkos::View<T[N0][N1][N2][N3][N4][N5],
- std::conditional_t<(InnerP == Kokkos::Iterate::Left),
- Kokkos::LayoutLeft, Kokkos::LayoutRight>,
- P...>
- tile_subview(const Kokkos::View<
- T******,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>& src,
- const size_t i_tile0, const size_t i_tile1,
- const size_t i_tile2, const size_t i_tile3,
- const size_t i_tile4, const size_t i_tile5) {
- // Force the specialized ViewMapping for extracting a tile
- // by using the first subview argument as the layout.
- using array_layout =
- std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using SrcLayout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
-
- return Kokkos::View<T[N0][N1][N2][N3][N4][N5], array_layout, P...>(
- src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4, i_tile5);
-}
-
-// Rank 7
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P>
-KOKKOS_INLINE_FUNCTION
- Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6],
- std::conditional_t<(InnerP == Kokkos::Iterate::Left),
- Kokkos::LayoutLeft, Kokkos::LayoutRight>,
- P...>
- tile_subview(const Kokkos::View<
- T*******,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>& src,
- const size_t i_tile0, const size_t i_tile1,
- const size_t i_tile2, const size_t i_tile3,
- const size_t i_tile4, const size_t i_tile5,
- const size_t i_tile6) {
- // Force the specialized ViewMapping for extracting a tile
- // by using the first subview argument as the layout.
- using array_layout =
- std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using SrcLayout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
-
- return Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6], array_layout, P...>(
- src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4, i_tile5,
- i_tile6);
-}
-
-// Rank 8
-template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
- unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
- unsigned N5, unsigned N6, unsigned N7, class... P>
-KOKKOS_INLINE_FUNCTION
- Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6][N7],
- std::conditional_t<(InnerP == Kokkos::Iterate::Left),
- Kokkos::LayoutLeft, Kokkos::LayoutRight>,
- P...>
- tile_subview(const Kokkos::View<
- T********,
- Kokkos::Experimental::LayoutTiled<
- OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
- P...>& src,
- const size_t i_tile0, const size_t i_tile1,
- const size_t i_tile2, const size_t i_tile3,
- const size_t i_tile4, const size_t i_tile5,
- const size_t i_tile6, const size_t i_tile7) {
- // Force the specialized ViewMapping for extracting a tile
- // by using the first subview argument as the layout.
- using array_layout =
- std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
- Kokkos::LayoutRight>;
- using SrcLayout =
- Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
- N6, N7, true>;
-
- return Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6][N7], array_layout, P...>(
- src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4, i_tile5,
- i_tile6, i_tile7);
-}
-
-} /* namespace Kokkos */
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-#endif /* #ifndef KOKKOS_EXPERIENTAL_VIEWLAYOUTTILE_HPP */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_CUDA_SETUP_HPP_
-#define KOKKOS_CUDA_SETUP_HPP_
-
-#if !defined(KOKKOS_ENABLE_CUDA)
-#error \
- "KOKKOS_ENABLE_CUDA was not defined, but Kokkos_Setup_Cuda.hpp was included anyway."
-#endif
-
-#if defined(KOKKOS_ENABLE_CUDA) && !defined(__CUDACC__)
-#error \
- "KOKKOS_ENABLE_CUDA defined but the compiler is not defining the __CUDACC__ macro as expected"
-// Some tooling environments will still function better if we do this here.
-#define __CUDACC__
-#endif /* defined(KOKKOS_ENABLE_CUDA) && !defined(__CUDACC__) */
-
-// Compiling with a CUDA compiler.
-//
-// Include <cuda.h> to pick up the CUDA_VERSION macro defined as:
-// CUDA_VERSION = ( MAJOR_VERSION * 1000 ) + ( MINOR_VERSION * 10 )
-//
-// When generating device code the __CUDA_ARCH__ macro is defined as:
-// __CUDA_ARCH__ = ( MAJOR_CAPABILITY * 100 ) + ( MINOR_CAPABILITY * 10 )
-
-#include <cuda_runtime.h>
-#include <cuda.h>
-
-#if defined(_WIN32)
-#define KOKKOS_IMPL_WINDOWS_CUDA
-#endif
-
-#if !defined(CUDA_VERSION)
-#error "#include <cuda.h> did not define CUDA_VERSION."
-#endif
-
-#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 300)
-// Compiling with CUDA compiler for device code.
-#error "Cuda device capability >= 3.0 is required."
-#endif
-
-#ifdef KOKKOS_ENABLE_CUDA_LAMBDA
-#define KOKKOS_LAMBDA [=] __host__ __device__
-
-#if defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)
-#define KOKKOS_CLASS_LAMBDA [ =, *this ] __host__ __device__
-#endif
-
-#else // !defined(KOKKOS_ENABLE_CUDA_LAMBDA)
-#undef KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA
-#endif // !defined(KOKKOS_ENABLE_CUDA_LAMBDA)
-
-#if (10000 > CUDA_VERSION)
-#define KOKKOS_ENABLE_PRE_CUDA_10_DEPRECATION_API
-#endif
-
-#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
-// PTX atomics with memory order semantics are only available on volta and later
-#if !defined(KOKKOS_DISABLE_CUDA_ASM)
-#if !defined(KOKKOS_ENABLE_CUDA_ASM)
-#define KOKKOS_ENABLE_CUDA_ASM
-#if !defined(KOKKOS_DISABLE_CUDA_ASM_ATOMICS) && \
- defined(KOKKOS_ENABLE_GNU_ATOMICS)
-#define KOKKOS_ENABLE_CUDA_ASM_ATOMICS
-#endif
-#endif
-#endif
-#endif
-
-#define KOKKOS_IMPL_FORCEINLINE_FUNCTION __device__ __host__ __forceinline__
-#define KOKKOS_IMPL_FORCEINLINE __forceinline__
-#define KOKKOS_IMPL_INLINE_FUNCTION __device__ __host__ inline
-#define KOKKOS_IMPL_FUNCTION __device__ __host__
-#define KOKKOS_IMPL_HOST_FUNCTION __host__
-#define KOKKOS_IMPL_DEVICE_FUNCTION __device__
-#if defined(KOKKOS_COMPILER_NVCC)
-#define KOKKOS_INLINE_FUNCTION_DELETED inline
-#else
-#define KOKKOS_INLINE_FUNCTION_DELETED __device__ __host__ inline
-#endif
-#if (CUDA_VERSION < 10000)
-#define KOKKOS_DEFAULTED_FUNCTION __host__ __device__ inline
-#else
-#define KOKKOS_DEFAULTED_FUNCTION inline
-#endif
-
-#if (CUDA_VERSION >= 10000)
-#define KOKKOS_CUDA_ENABLE_GRAPHS
-#endif
-
-#endif /* KOKKOS_CUDA_SETUP_HPP_ */
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SETUP_HIP_HPP_
-#define KOKKOS_SETUP_HIP_HPP_
-
-#if defined(KOKKOS_ENABLE_HIP)
-
-#define KOKKOS_IMPL_HIP_CLANG_WORKAROUND
-
-#define HIP_ENABLE_PRINTF
-#include <hip/hip_runtime.h>
-#include <hip/hip_runtime_api.h>
-
-#define KOKKOS_LAMBDA [=] __host__ __device__
-#if defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)
-#define KOKKOS_CLASS_LAMBDA [ =, *this ] __host__ __device__
-#endif
-
-#define KOKKOS_IMPL_FORCEINLINE_FUNCTION __device__ __host__ __forceinline__
-#define KOKKOS_IMPL_INLINE_FUNCTION __device__ __host__ inline
-#define KOKKOS_DEFAULTED_FUNCTION __device__ __host__ inline
-#define KOKKOS_INLINE_FUNCTION_DELETED __device__ __host__ inline
-#define KOKKOS_IMPL_FUNCTION __device__ __host__
-#define KOKKOS_IMPL_HOST_FUNCTION __host__
-#define KOKKOS_IMPL_DEVICE_FUNCTION __device__
-
-#endif // #if defined( KOKKOS_ENABLE_HIP )
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SETUP_SYCL_HPP_
-#define KOKKOS_SETUP_SYCL_HPP_
-
-// FIXME_SYCL the fallback assert is temporarily disabled by default in the
-// compiler so we need to force it
-#ifndef SYCL_ENABLE_FALLBACK_ASSERT
-#define SYCL_ENABLE_FALLBACK_ASSERT
-#endif
-#ifndef SYCL_FALLBACK_ASSERT
-#define SYCL_FALLBACK_ASSERT 1
-#endif
-
-#include <CL/sycl.hpp>
-
-#ifdef __SYCL_DEVICE_ONLY__
-#define KOKKOS_IMPL_DO_NOT_USE_PRINTF(format, ...) \
- do { \
- const __attribute__((opencl_constant)) char fmt[] = (format); \
- sycl::ext::oneapi::experimental::printf(fmt, ##__VA_ARGS__); \
- } while (0)
-#endif
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
-#define KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
-
-#include <Kokkos_Macros.hpp>
-#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
-#include <impl/Kokkos_GraphImpl_fwd.hpp> // IsGraphKernelTag
-#include <traits/Kokkos_Traits_fwd.hpp>
-#include <impl/Kokkos_Utilities.hpp>
-
-namespace Kokkos {
-namespace Impl {
-
-//==============================================================================
-// <editor-fold desc="trait specification"> {{{1
-
-struct GraphKernelTrait : TraitSpecificationBase<GraphKernelTrait> {
- struct base_traits {
- using is_graph_kernel = std::false_type;
- KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
- };
- template <class, class AnalyzeNextTrait>
- struct mixin_matching_trait : AnalyzeNextTrait {
- using base_t = AnalyzeNextTrait;
- using base_t::base_t;
- using is_graph_kernel = std::true_type;
- };
- template <class T>
- using trait_matches_specification = std::is_same<T, IsGraphKernelTag>;
-};
-
-// </editor-fold> end trait specification }}}1
-//==============================================================================
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif // KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#include <impl/Kokkos_Utilities.hpp> // type_list
-
-#include <traits/Kokkos_Traits_fwd.hpp>
-
-#ifndef KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
-#define KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
-
-namespace Kokkos {
-namespace Impl {
-
-//==============================================================================
-// <editor-fold desc="PolicyTraitMatcher"> {{{1
-
-// To handle the WorkTag case, we need more than just a predicate; we need
-// something that we can default to in the unspecialized case, just like we
-// do for AnalyzeExecPolicy
-template <class TraitSpec, class Trait, class Enable = void>
-struct PolicyTraitMatcher : std::false_type {};
-
-template <class TraitSpec, class Trait>
-struct PolicyTraitMatcher<
- TraitSpec, Trait,
- std::enable_if_t<
- TraitSpec::template trait_matches_specification<Trait>::value>>
- : std::true_type {};
-
-// </editor-fold> end PolicyTraitMatcher }}}1
-//==============================================================================
-
-} // end namespace Impl
-} // end namespace Kokkos
-
-#endif // KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
+++ /dev/null
-TRIBITS_PACKAGE_DEFINE_DEPENDENCIES(
- LIB_REQUIRED_PACKAGES KokkosCore
- LIB_OPTIONAL_TPLS Pthread CUDA HWLOC HPX
- TEST_OPTIONAL_TPLS CUSPARSE
- )
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SIMD_HPP
-#define KOKKOS_SIMD_HPP
-
-#include <Kokkos_SIMD_Common.hpp>
-
-#include <Kokkos_SIMD_Scalar.hpp>
-
-#ifdef KOKKOS_ARCH_AVX512XEON
-#include <Kokkos_SIMD_AVX512.hpp>
-#endif
-
-namespace Kokkos {
-namespace Experimental {
-
-namespace simd_abi {
-
-namespace Impl {
-
-#if defined(KOKKOS_ARCH_AVX512XEON)
-using host_native = avx512_fixed_size<8>;
-#else
-using host_native = scalar;
-#endif
-
-template <class T>
-struct ForSpace;
-
-#ifdef KOKKOS_ENABLE_SERIAL
-template <>
-struct ForSpace<Kokkos::Serial> {
- using type = host_native;
-};
-#endif
-
-#ifdef KOKKOS_ENABLE_CUDA
-template <>
-struct ForSpace<Kokkos::Cuda> {
- using type = scalar;
-};
-#endif
-
-#ifdef KOKKOS_ENABLE_THREADS
-template <>
-struct ForSpace<Kokkos::Threads> {
- using type = host_native;
-};
-#endif
-
-#ifdef KOKKOS_ENABLE_HPX
-template <>
-struct ForSpace<Kokkos::Experimental::HPX> {
- using type = scalar;
-};
-#endif
-
-#ifdef KOKKOS_ENABLE_OPENMP
-template <>
-struct ForSpace<Kokkos::OpenMP> {
- using type = host_native;
-};
-#endif
-
-#ifdef KOKKOS_ENABLE_OPENMPTARGET
-template <>
-struct ForSpace<Kokkos::Experimental::OpenMPTarget> {
- using type = scalar;
-};
-#endif
-
-#ifdef KOKKOS_ENABLE_HIP
-template <>
-struct ForSpace<Kokkos::Experimental::HIP> {
- using type = scalar;
-};
-#endif
-
-#ifdef KOKKOS_ENABLE_SYCL
-template <>
-struct ForSpace<Kokkos::Experimental::SYCL> {
- using type = scalar;
-};
-#endif
-
-} // namespace Impl
-
-template <class Space>
-using ForSpace = typename Impl::ForSpace<typename Space::execution_space>::type;
-
-template <class T>
-using native = ForSpace<Kokkos::DefaultExecutionSpace>;
-
-} // namespace simd_abi
-
-template <class T>
-using native_simd = simd<T, simd_abi::native<T>>;
-template <class T>
-using native_simd_mask = simd_mask<T, simd_abi::native<T>>;
-
-namespace Impl {
-
-template <class... Abis>
-class abi_set {};
-
-#ifdef KOKKOS_ARCH_AVX512XEON
-using host_abi_set = abi_set<simd_abi::scalar, simd_abi::avx512_fixed_size<8>>;
-#else
-using host_abi_set = abi_set<simd_abi::scalar>;
-#endif
-
-using device_abi_set = abi_set<simd_abi::scalar>;
-
-} // namespace Impl
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SIMD_AVX512_HPP
-#define KOKKOS_SIMD_AVX512_HPP
-
-#include <functional>
-#include <type_traits>
-
-#include <Kokkos_SIMD_Common.hpp>
-
-#include <immintrin.h>
-
-namespace Kokkos {
-namespace Experimental {
-
-namespace simd_abi {
-
-template <int N>
-class avx512_fixed_size {};
-
-} // namespace simd_abi
-
-template <class T>
-class simd_mask<T, simd_abi::avx512_fixed_size<8>> {
- __mmask8 m_value;
-
- public:
- class reference {
- __mmask8& m_mask;
- int m_lane;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __mmask8 bit_mask() const {
- return __mmask8(std::int16_t(1 << m_lane));
- }
-
- public:
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__mmask8& mask_arg,
- int lane_arg)
- : m_mask(mask_arg), m_lane(lane_arg) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
- operator=(bool value) const {
- if (value) {
- m_mask |= bit_mask();
- } else {
- m_mask &= ~bit_mask();
- }
- return *this;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
- return (m_mask & bit_mask()) != 0;
- }
- };
- using value_type = bool;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
- : m_value(-std::int16_t(value)) {}
- template <class U>
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
- simd_mask<U, simd_abi::avx512_fixed_size<8>> const& other)
- : m_value(static_cast<__mmask8>(other)) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
- return 8;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
- __mmask8 const& value_in)
- : m_value(value_in) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __mmask8()
- const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
- return reference(m_value, int(i));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
- operator[](std::size_t i) const {
- return static_cast<value_type>(reference(m_value, int(i)));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
- operator||(simd_mask const& other) const {
- return simd_mask(_kor_mask8(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
- operator&&(simd_mask const& other) const {
- return simd_mask(_kand_mask8(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
- static const __mmask8 true_value(static_cast<__mmask8>(simd_mask(true)));
- return simd_mask(_kxor_mask8(true_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
- simd_mask const& other) const {
- return m_value == other.m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
- simd_mask const& other) const {
- return m_value != other.m_value;
- }
-};
-
-template <>
-class simd<std::int32_t, simd_abi::avx512_fixed_size<8>> {
- __m256i m_value;
-
- public:
- using value_type = std::int32_t;
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using mask_type = simd_mask<value_type, abi_type>;
- using reference = value_type&;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
- return 8;
- }
- template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
- bool> = false>
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
- : m_value(_mm256_set1_epi32(value_type(value))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
- __m256i const& value_in)
- : m_value(value_in) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
- simd<std::uint64_t, abi_type> const& other);
- template <class G,
- std::enable_if_t<
- // basically, can you do { value_type r =
- // gen(std::integral_constant<std::size_t, i>()); }
- std::is_invocable_r_v<value_type, G,
- std::integral_constant<std::size_t, 0>>,
- bool> = false>
- KOKKOS_FORCEINLINE_FUNCTION simd(G&& gen)
- : m_value(
- _mm256_setr_epi32(gen(std::integral_constant<std::size_t, 0>()),
- gen(std::integral_constant<std::size_t, 1>()),
- gen(std::integral_constant<std::size_t, 2>()),
- gen(std::integral_constant<std::size_t, 3>()),
- gen(std::integral_constant<std::size_t, 4>()),
- gen(std::integral_constant<std::size_t, 5>()),
- gen(std::integral_constant<std::size_t, 6>()),
- gen(std::integral_constant<std::size_t, 7>()))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
- return reinterpret_cast<value_type*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
- operator[](std::size_t i) const {
- return reinterpret_cast<value_type const*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
- value_type* ptr, element_aligned_tag) const {
- _mm256_mask_storeu_epi32(ptr, static_cast<__mmask8>(mask_type(true)),
- m_value);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
- element_aligned_tag) {
- m_value = _mm256_mask_loadu_epi32(
- _mm256_set1_epi32(0), static_cast<__mmask8>(mask_type(true)), ptr);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
- const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<(simd const& other) const {
- return mask_type(_mm256_cmplt_epi32_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>(simd const& other) const {
- return mask_type(_mm256_cmplt_epi32_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<=(simd const& other) const {
- return mask_type(_mm256_cmple_epi32_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>=(simd const& other) const {
- return mask_type(_mm256_cmple_epi32_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator==(simd const& other) const {
- return mask_type(_mm256_cmpeq_epi32_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator!=(simd const& other) const {
- return mask_type(_mm256_cmpneq_epi32_mask(m_value, other.m_value));
- }
-};
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
- operator*(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_mullo_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
- operator+(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_add_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
- operator-(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_sub_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
- operator-(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& a) {
- return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(0) - a;
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<std::int32_t, simd_abi::avx512_fixed_size<8>> condition(
- simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>> const& a,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& b,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& c) {
- return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_mask_blend_epi32(static_cast<__mmask8>(a), static_cast<__m256i>(c),
- static_cast<__m256i>(b)));
-}
-
-template <>
-class simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> {
- __m256i m_value;
-
- public:
- using value_type = std::uint32_t;
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using mask_type = simd_mask<value_type, abi_type>;
- using reference = value_type&;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
- return 8;
- }
- template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
- bool> = false>
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
- : m_value(_mm256_set1_epi32(bit_cast<std::int32_t>(value_type(value)))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
- __m256i const& value_in)
- : m_value(value_in) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& other)
- : m_value(static_cast<__m256i>(other)) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
- return reinterpret_cast<value_type*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
- operator[](std::size_t i) const {
- return reinterpret_cast<value_type const*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
- const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<(simd const& other) const {
- return mask_type(_mm256_cmplt_epu32_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>(simd const& other) const {
- return mask_type(_mm256_cmplt_epu32_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<=(simd const& other) const {
- return mask_type(_mm256_cmple_epu32_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>=(simd const& other) const {
- return mask_type(_mm256_cmple_epu32_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator==(simd const& other) const {
- return mask_type(_mm256_cmpeq_epu32_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator!=(simd const& other) const {
- return mask_type(_mm256_cmpneq_epu32_mask(m_value, other.m_value));
- }
-};
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>
- operator*(simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_mullo_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>
- operator+(simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_add_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>
- operator-(simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_sub_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> condition(
- simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& a,
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& b,
- simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& c) {
- return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
- _mm256_mask_blend_epi32(static_cast<__mmask8>(a), static_cast<__m256i>(c),
- static_cast<__m256i>(b)));
-}
-
-template <>
-class simd<std::int64_t, simd_abi::avx512_fixed_size<8>> {
- __m512i m_value;
-
- public:
- using value_type = std::int64_t;
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using mask_type = simd_mask<value_type, abi_type>;
- using reference = value_type&;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
- return 8;
- }
- template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
- bool> = false>
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
- : m_value(_mm512_set1_epi64(value_type(value))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& other)
- : m_value(_mm512_cvtepi32_epi64(static_cast<__m256i>(other))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other);
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr simd(__m512i const& value_in)
- : m_value(value_in) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
- return reinterpret_cast<value_type*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
- operator[](std::size_t i) const {
- return reinterpret_cast<value_type const*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
- value_type* ptr, element_aligned_tag) const {
- _mm512_mask_storeu_epi64(ptr, static_cast<__mmask8>(mask_type(true)),
- m_value);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator>>(int rhs) const {
- return _mm512_srai_epi64(m_value, rhs);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
- operator>>(simd<int, simd_abi::avx512_fixed_size<8>> const& rhs) const {
- return _mm512_srav_epi64(m_value,
- _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator<<(int rhs) const {
- return _mm512_slli_epi64(m_value, rhs);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
- operator<<(simd<int, simd_abi::avx512_fixed_size<8>> const& rhs) const {
- return _mm512_sllv_epi64(m_value,
- _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
- const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<(simd const& other) const {
- return mask_type(_mm512_cmplt_epi64_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>(simd const& other) const {
- return mask_type(_mm512_cmplt_epi64_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<=(simd const& other) const {
- return mask_type(_mm512_cmple_epi64_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>=(simd const& other) const {
- return mask_type(_mm512_cmple_epi64_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator==(simd const& other) const {
- return mask_type(_mm512_cmpeq_epi64_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator!=(simd const& other) const {
- return mask_type(_mm512_cmpneq_epi64_mask(m_value, other.m_value));
- }
-};
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
- operator*(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_mullo_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
- operator+(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_add_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
- operator-(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_sub_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
- operator-(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& a) {
- return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(0) - a;
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<std::int64_t, simd_abi::avx512_fixed_size<8>> condition(
- simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>> const& a,
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& b,
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& c) {
- return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_mask_blend_epi64(static_cast<__mmask8>(a), static_cast<__m512i>(c),
- static_cast<__m512i>(b)));
-}
-
-template <>
-class simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> {
- __m512i m_value;
-
- public:
- using value_type = std::uint64_t;
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using mask_type = simd_mask<value_type, abi_type>;
- using reference = value_type&;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
- return 8;
- }
- template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
- bool> = false>
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
- : m_value(_mm512_set1_epi64(bit_cast<std::int64_t>(value_type(value)))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr simd(__m512i const& value_in)
- : m_value(value_in) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
- simd<std::int32_t, abi_type> const& other)
- : m_value(_mm512_cvtepi32_epi64(static_cast<__m256i>(other))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
- simd<std::int64_t, abi_type> const& other)
- : m_value(static_cast<__m512i>(other)) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
- return reinterpret_cast<value_type*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
- operator[](std::size_t i) const {
- return reinterpret_cast<value_type const*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
- operator>>(unsigned int rhs) const {
- return _mm512_srli_epi64(m_value, rhs);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator>>(
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) const {
- return _mm512_srlv_epi64(m_value,
- _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
- operator<<(unsigned int rhs) const {
- return _mm512_slli_epi64(m_value, rhs);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator<<(
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) const {
- return _mm512_sllv_epi64(m_value,
- _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
- operator&(simd const& other) const {
- return _mm512_and_epi64(m_value, other.m_value);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
- operator|(simd const& other) const {
- return _mm512_or_epi64(m_value, other.m_value);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
- const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<(simd const& other) const {
- return mask_type(_mm512_cmplt_epu64_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>(simd const& other) const {
- return mask_type(_mm512_cmplt_epu64_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<=(simd const& other) const {
- return mask_type(_mm512_cmple_epu64_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>=(simd const& other) const {
- return mask_type(_mm512_cmple_epu64_mask(other.m_value, m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator==(simd const& other) const {
- return mask_type(_mm512_cmpeq_epu64_mask(m_value, other.m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator!=(simd const& other) const {
- return mask_type(_mm512_cmpneq_epu64_mask(m_value, other.m_value));
- }
-};
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>
- operator*(simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_mullo_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>
- operator+(simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_add_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>
- operator-(simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_sub_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> condition(
- simd_mask<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& a,
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& b,
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& c) {
- return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
- _mm512_mask_blend_epi64(static_cast<__mmask8>(a), static_cast<__m512i>(c),
- static_cast<__m512i>(b)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<std::int32_t, simd_abi::avx512_fixed_size<8>>::simd(
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other)
- : m_value(_mm512_cvtepi64_epi32(static_cast<__m512i>(other))) {}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<std::int64_t, simd_abi::avx512_fixed_size<8>>::simd(
- simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other)
- : m_value(static_cast<__m512i>(other)) {}
-
-template <>
-class simd<double, simd_abi::avx512_fixed_size<8>> {
- __m512d m_value;
-
- public:
- using value_type = double;
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using mask_type = simd_mask<value_type, abi_type>;
- using reference = value_type&;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
- return 8;
- }
- template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
- bool> = false>
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
- : m_value(_mm512_set1_pd(value_type(value))) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(double a, double b, double c,
- double d, double e, double f,
- double g, double h)
- : m_value(_mm512_setr_pd(a, b, c, d, e, f, g, h)) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
- __m512d const& value_in)
- : m_value(value_in) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
- return reinterpret_cast<value_type*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
- operator[](std::size_t i) const {
- return reinterpret_cast<value_type const*>(&m_value)[i];
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
- element_aligned_tag) {
- m_value = _mm512_loadu_pd(ptr);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
- value_type* ptr, element_aligned_tag) const {
- _mm512_storeu_pd(ptr, m_value);
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512d()
- const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<(simd const& other) const {
- return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_LT_OS));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>(simd const& other) const {
- return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_GT_OS));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator<=(simd const& other) const {
- return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_LE_OS));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator>=(simd const& other) const {
- return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_GE_OS));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator==(simd const& other) const {
- return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_EQ_OS));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
- operator!=(simd const& other) const {
- return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_NEQ_OS));
- }
-};
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<double, simd_abi::avx512_fixed_size<8>>
- operator*(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_mul_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<double, simd_abi::avx512_fixed_size<8>>
- operator/(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_div_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<double, simd_abi::avx512_fixed_size<8>>
- operator+(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_add_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<double, simd_abi::avx512_fixed_size<8>>
- operator-(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
- simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_sub_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- simd<double, simd_abi::avx512_fixed_size<8>>
- operator-(simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_sub_pd(_mm512_set1_pd(0.0), static_cast<__m512d>(a)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> copysign(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a,
- simd<double, simd_abi::avx512_fixed_size<8>> const& b) {
- static const __m512i sign_mask = reinterpret_cast<__m512i>(
- static_cast<__m512d>(simd<double, simd_abi::avx512_fixed_size<8>>(-0.0)));
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- reinterpret_cast<__m512d>(_mm512_xor_epi64(
- _mm512_andnot_epi64(
- sign_mask, reinterpret_cast<__m512i>(static_cast<__m512d>(a))),
- _mm512_and_epi64(
- sign_mask, reinterpret_cast<__m512i>(static_cast<__m512d>(b))))));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> abs(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
- __m512d const rhs = static_cast<__m512d>(a);
- return simd<double, simd_abi::avx512_fixed_size<8>>(reinterpret_cast<__m512d>(
- _mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),
- reinterpret_cast<__m512i>(rhs))));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> sqrt(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_sqrt_pd(static_cast<__m512d>(a)));
-}
-
-#ifdef __INTEL_COMPILER
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> cbrt(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_cbrt_pd(static_cast<__m512d>(a)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> exp(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_exp_pd(static_cast<__m512d>(a)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> log(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_log_pd(static_cast<__m512d>(a)));
-}
-
-#endif
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> fma(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a,
- simd<double, simd_abi::avx512_fixed_size<8>> const& b,
- simd<double, simd_abi::avx512_fixed_size<8>> const& c) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_fmadd_pd(static_cast<__m512d>(a), static_cast<__m512d>(b),
- static_cast<__m512d>(c)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> max(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a,
- simd<double, simd_abi::avx512_fixed_size<8>> const& b) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_max_pd(static_cast<__m512d>(a), static_cast<__m512d>(b)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> min(
- simd<double, simd_abi::avx512_fixed_size<8>> const& a,
- simd<double, simd_abi::avx512_fixed_size<8>> const& b) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_min_pd(static_cast<__m512d>(a), static_cast<__m512d>(b)));
-}
-
-KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
-simd<double, simd_abi::avx512_fixed_size<8>> condition(
- simd_mask<double, simd_abi::avx512_fixed_size<8>> const& a,
- simd<double, simd_abi::avx512_fixed_size<8>> const& b,
- simd<double, simd_abi::avx512_fixed_size<8>> const& c) {
- return simd<double, simd_abi::avx512_fixed_size<8>>(
- _mm512_mask_blend_pd(static_cast<__mmask8>(a), static_cast<__m512d>(c),
- static_cast<__m512d>(b)));
-}
-
-template <>
-class const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
- simd<double, simd_abi::avx512_fixed_size<8>>> {
- public:
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using value_type = simd<double, abi_type>;
- using mask_type = simd_mask<double, abi_type>;
-
- protected:
- value_type& m_value;
- mask_type const& m_mask;
-
- public:
- const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
- : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
- [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr mask_type const&
- mask() const {
- return m_mask;
- }
- [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr value_type const&
- value() const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- void copy_to(double* mem, element_aligned_tag) const {
- _mm512_mask_storeu_pd(mem, static_cast<__mmask8>(m_mask),
- static_cast<__m512d>(m_value));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- void scatter_to(
- double* mem,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
- _mm512_mask_i32scatter_pd(mem, static_cast<__mmask8>(m_mask),
- static_cast<__m256i>(index),
- static_cast<__m512d>(m_value), 8);
- }
-};
-
-template <>
-class where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
- simd<double, simd_abi::avx512_fixed_size<8>>>
- : public const_where_expression<
- simd_mask<double, simd_abi::avx512_fixed_size<8>>,
- simd<double, simd_abi::avx512_fixed_size<8>>> {
- public:
- where_expression(
- simd_mask<double, simd_abi::avx512_fixed_size<8>> const& mask_arg,
- simd<double, simd_abi::avx512_fixed_size<8>>& value_arg)
- : const_where_expression(mask_arg, value_arg) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- void copy_from(double const* mem, element_aligned_tag) {
- m_value = value_type(_mm512_mask_loadu_pd(
- _mm512_set1_pd(0.0), static_cast<__mmask8>(m_mask), mem));
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- void gather_from(
- double const* mem,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
- m_value = value_type(_mm512_mask_i32gather_pd(
- _mm512_set1_pd(0.0), static_cast<__mmask8>(m_mask),
- static_cast<__m256i>(index), mem, 8));
- }
- template <class U, std::enable_if_t<
- std::is_convertible_v<
- U, simd<double, simd_abi::avx512_fixed_size<8>>>,
- bool> = false>
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
- auto const x_as_value_type =
- static_cast<simd<double, simd_abi::avx512_fixed_size<8>>>(
- std::forward<U>(x));
- m_value = simd<double, simd_abi::avx512_fixed_size<8>>(_mm512_mask_blend_pd(
- static_cast<__mmask8>(m_mask), static_cast<__m512d>(m_value),
- static_cast<__m512d>(x_as_value_type)));
- }
-};
-
-template <>
-class const_where_expression<
- simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> {
- public:
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using value_type = simd<std::int32_t, abi_type>;
- using mask_type = simd_mask<std::int32_t, abi_type>;
-
- protected:
- value_type& m_value;
- mask_type const& m_mask;
-
- public:
- const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
- : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
- [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr mask_type const&
- mask() const {
- return m_mask;
- }
- [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr value_type const&
- value() const {
- return m_value;
- }
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- void copy_to(std::int32_t* mem, element_aligned_tag) const {
- _mm256_mask_storeu_epi32(mem, static_cast<__mmask8>(m_mask),
- static_cast<__m256i>(m_value));
- }
-};
-
-template <>
-class where_expression<simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>>
- : public const_where_expression<
- simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> {
- public:
- where_expression(
- simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>> const& mask_arg,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>& value_arg)
- : const_where_expression(mask_arg, value_arg) {}
- KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
- void copy_from(std::int32_t const* mem, element_aligned_tag) {
- m_value = value_type(_mm256_mask_loadu_epi32(
- _mm256_set1_epi32(0), static_cast<__mmask8>(m_mask), mem));
- }
-};
-
-template <>
-class const_where_expression<
- simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>>> {
- public:
- using abi_type = simd_abi::avx512_fixed_size<8>;
- using value_type = simd<std::int64_t, abi_type>;
- using mask_type = simd_mask<std::int64_t, abi_type>;
-
- protected:
- value_type& m_value;
- mask_type const& m_mask;
-
- public:
- const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
- : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
- [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr mask_type const&
- mask() const {
- return m_mask;
- }
- [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr value_type const&
- value() const {
- return m_value;
- }
-};
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION std::int32_t hmax(
- const_where_expression<
- simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
- simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> const& x) {
- return _mm512_mask_reduce_max_epi32(
- static_cast<__mmask8>(x.mask()),
- _mm512_castsi256_si512(static_cast<__m256i>(x.value())));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION double hmin(
- const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
- simd<double, simd_abi::avx512_fixed_size<8>>> const&
- x) {
- return _mm512_mask_reduce_min_pd(static_cast<__mmask8>(x.mask()),
- static_cast<__m512d>(x.value()));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION std::int64_t reduce(
- const_where_expression<
- simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
- simd<std::int64_t, simd_abi::avx512_fixed_size<8>>> const& x,
- std::int64_t, std::plus<>) {
- return _mm512_mask_reduce_add_epi64(static_cast<__mmask8>(x.mask()),
- static_cast<__m512i>(x.value()));
-}
-
-[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION double reduce(
- const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
- simd<double, simd_abi::avx512_fixed_size<8>>> const&
- x,
- double, std::plus<>) {
- return _mm512_mask_reduce_add_pd(static_cast<__mmask8>(x.mask()),
- static_cast<__m512d>(x.value()));
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-/*
-//@HEADER
-// ************************************************************************
-//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
-// Solutions of Sandia, LLC (NTESS).
-//
-// Under the terms of Contract DE-NA0003525 with NTESS,
-// the U.S. Government retains certain rights in this software.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
-//@HEADER
-*/
-
-#ifndef KOKKOS_SIMD_SCALAR_HPP
-#define KOKKOS_SIMD_SCALAR_HPP
-
-#include <type_traits>
-#include <climits>
-#include <cfloat>
-
-#include <Kokkos_SIMD_Common.hpp>
-
-namespace Kokkos {
-namespace Experimental {
-
-namespace simd_abi {
-
-class scalar {};
-
-} // namespace simd_abi
-
-template <class T>
-class simd_mask<T, simd_abi::scalar> {
- bool m_value;
-
- public:
- using value_type = bool;
- using simd_type = simd<T, simd_abi::scalar>;
- using abi_type = simd_abi::scalar;
- using reference = value_type&;
- KOKKOS_DEFAULTED_FUNCTION simd_mask() = default;
- KOKKOS_FORCEINLINE_FUNCTION static constexpr std::size_t size() { return 1; }
- KOKKOS_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
- : m_value(value) {}
- template <class U>
- KOKKOS_FORCEINLINE_FUNCTION simd_mask(
- simd_mask<U, simd_abi::scalar> const& other)
- : m_value(static_cast<bool>(other)) {}
- KOKKOS_FORCEINLINE_FUNCTION constexpr explicit operator bool() const {
- return m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION reference operator[](std::size_t) {
- return m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION value_type operator[](std::size_t) const {
- return m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION simd_mask
- operator||(simd_mask const& other) const {
- return simd_mask(m_value || other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION simd_mask
- operator&&(simd_mask const& other) const {
- return simd_mask(m_value && other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION simd_mask operator!() const {
- return simd_mask(!m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION bool operator==(simd_mask const& other) const {
- return m_value == other.m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION bool operator!=(simd_mask const& other) const {
- return m_value != other.m_value;
- }
-};
-
-template <class T>
-class simd<T, simd_abi::scalar> {
- T m_value;
-
- public:
- using value_type = T;
- using abi_type = simd_abi::scalar;
- using mask_type = simd_mask<T, abi_type>;
- using reference = value_type&;
- KOKKOS_DEFAULTED_FUNCTION simd() = default;
- KOKKOS_DEFAULTED_FUNCTION simd(simd const&) = default;
- KOKKOS_DEFAULTED_FUNCTION simd(simd&&) = default;
- KOKKOS_DEFAULTED_FUNCTION simd& operator=(simd const&) = default;
- KOKKOS_DEFAULTED_FUNCTION simd& operator=(simd&&) = default;
- KOKKOS_FORCEINLINE_FUNCTION static constexpr std::size_t size() { return 1; }
- template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
- bool> = false>
- KOKKOS_FORCEINLINE_FUNCTION simd(U&& value) : m_value(value) {}
- template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
- bool> = false>
- KOKKOS_FORCEINLINE_FUNCTION explicit simd(simd<U, abi_type> const& other)
- : m_value(static_cast<U>(other)) {}
- template <class G,
- std::enable_if_t<
- // basically, can you do { value_type r =
- // gen(std::integral_constant<std::size_t, i>()); }
- std::is_invocable_r_v<value_type, G,
- std::integral_constant<std::size_t, 0>>,
- bool> = false>
- KOKKOS_FORCEINLINE_FUNCTION simd(G&& gen)
- : m_value(gen(std::integral_constant<std::size_t, 0>())) {}
- KOKKOS_FORCEINLINE_FUNCTION simd operator-() const { return simd(-m_value); }
- KOKKOS_FORCEINLINE_FUNCTION simd operator>>(int rhs) const {
- return simd(m_value >> rhs);
- }
- KOKKOS_FORCEINLINE_FUNCTION simd
- operator>>(simd<int, abi_type> const& rhs) const {
- return simd(m_value >> static_cast<int>(rhs));
- }
- KOKKOS_FORCEINLINE_FUNCTION simd operator<<(int rhs) const {
- return simd(m_value << rhs);
- }
- KOKKOS_FORCEINLINE_FUNCTION simd
- operator<<(simd<int, abi_type> const& rhs) const {
- return simd(m_value << static_cast<int>(rhs));
- }
- KOKKOS_FORCEINLINE_FUNCTION simd operator&(simd const& other) const {
- return m_value & other.m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION simd operator|(simd const& other) const {
- return m_value | other.m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION constexpr explicit operator T() const {
- return m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION mask_type operator<(simd const& other) const {
- return mask_type(m_value < other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION mask_type operator>(simd const& other) const {
- return mask_type(m_value > other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION mask_type operator<=(simd const& other) const {
- return mask_type(m_value <= other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION mask_type operator>=(simd const& other) const {
- return mask_type(m_value >= other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION mask_type operator==(simd const& other) const {
- return mask_type(m_value == other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION mask_type operator!=(simd const& other) const {
- return mask_type(m_value != other.m_value);
- }
- KOKKOS_FORCEINLINE_FUNCTION void copy_from(T const* ptr,
- element_aligned_tag) {
- m_value = *ptr;
- }
- KOKKOS_FORCEINLINE_FUNCTION void copy_to(T* ptr, element_aligned_tag) const {
- *ptr = m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION reference operator[](std::size_t) {
- return m_value;
- }
- KOKKOS_FORCEINLINE_FUNCTION value_type operator[](std::size_t) const {
- return m_value;
- }
-};
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator*(
- simd<T, simd_abi::scalar> const& lhs,
- simd<T, simd_abi::scalar> const& rhs) {
- return simd<T, simd_abi::scalar>(static_cast<T>(lhs) * static_cast<T>(rhs));
-}
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator/(
- simd<T, simd_abi::scalar> const& lhs,
- simd<T, simd_abi::scalar> const& rhs) {
- return simd<T, simd_abi::scalar>(static_cast<T>(lhs) / static_cast<T>(rhs));
-}
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator+(
- simd<T, simd_abi::scalar> const& lhs,
- simd<T, simd_abi::scalar> const& rhs) {
- return simd<T, simd_abi::scalar>(static_cast<T>(lhs) + static_cast<T>(rhs));
-}
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator-(
- simd<T, simd_abi::scalar> const& lhs,
- simd<T, simd_abi::scalar> const& rhs) {
- return simd<T, simd_abi::scalar>(static_cast<T>(lhs) - static_cast<T>(rhs));
-}
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> abs(
- simd<T, simd_abi::scalar> const& a) {
- return simd<T, simd_abi::scalar>(std::abs(static_cast<T>(a)));
-}
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> sqrt(
- simd<T, simd_abi::scalar> const& a) {
- return simd<T, simd_abi::scalar>(std::sqrt(static_cast<T>(a)));
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> fma(
- simd<T, simd_abi::scalar> const& x, simd<T, simd_abi::scalar> const& y,
- simd<T, simd_abi::scalar> const& z) {
- return simd<T, simd_abi::scalar>((static_cast<T>(x) * static_cast<T>(y)) +
- static_cast<T>(z));
-}
-
-template <class T>
-KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> condition(
- desul::Impl::dont_deduce_this_parameter_t<
- simd_mask<T, simd_abi::scalar>> const& a,
- simd<T, simd_abi::scalar> const& b, simd<T, simd_abi::scalar> const& c) {
- return simd<T, simd_abi::scalar>(static_cast<bool>(a) ? static_cast<T>(b)
- : static_cast<T>(c));
-}
-
-template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, Abi> copysign(
- simd<T, Abi> const& a, simd<T, Abi> const& b) {
- return std::copysign(static_cast<T>(a), static_cast<T>(b));
-}
-
-template <class T>
-class const_where_expression<simd_mask<T, simd_abi::scalar>,
- simd<T, simd_abi::scalar>> {
- public:
- using abi_type = simd_abi::scalar;
- using value_type = simd<T, abi_type>;
- using mask_type = simd_mask<T, abi_type>;
-
- protected:
- value_type& m_value;
- mask_type const& m_mask;
-
- public:
- KOKKOS_FORCEINLINE_FUNCTION
- const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
- : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
- KOKKOS_FORCEINLINE_FUNCTION
- mask_type const& mask() const { return m_mask; }
- KOKKOS_FORCEINLINE_FUNCTION
- value_type const& value() const { return m_value; }
- KOKKOS_FORCEINLINE_FUNCTION
- void copy_to(T* mem, element_aligned_tag) const {
- if (static_cast<bool>(m_mask)) *mem = static_cast<T>(m_value);
- }
- template <class Integral>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<std::is_integral_v<Integral>>
- scatter_to(T* mem, simd<Integral, simd_abi::scalar> const& index) const {
- if (static_cast<bool>(m_mask))
- mem[static_cast<Integral>(index)] = static_cast<T>(m_value);
- }
-};
-
-template <class T>
-class where_expression<simd_mask<T, simd_abi::scalar>,
- simd<T, simd_abi::scalar>>
- : public const_where_expression<simd_mask<T, simd_abi::scalar>,
- simd<T, simd_abi::scalar>> {
- using base_type = const_where_expression<simd_mask<T, simd_abi::scalar>,
- simd<T, simd_abi::scalar>>;
-
- public:
- using typename base_type::value_type;
- KOKKOS_FORCEINLINE_FUNCTION
- where_expression(simd_mask<T, simd_abi::scalar> const& mask_arg,
- simd<T, simd_abi::scalar>& value_arg)
- : base_type(mask_arg, value_arg) {}
- KOKKOS_FORCEINLINE_FUNCTION
- void copy_from(T const* mem, element_aligned_tag) {
- if (static_cast<bool>(this->m_mask)) this->m_value = *mem;
- }
- template <class Integral>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<std::is_integral_v<Integral>>
- gather_from(T const* mem, simd<Integral, simd_abi::scalar> const& index) {
- if (static_cast<bool>(this->m_mask))
- this->m_value = mem[static_cast<Integral>(index)];
- }
- template <class U, std::enable_if_t<
- std::is_convertible_v<U, simd<T, simd_abi::scalar>>,
- bool> = false>
- KOKKOS_FORCEINLINE_FUNCTION void operator=(U&& x) {
- if (static_cast<bool>(this->m_mask))
- this->m_value =
- static_cast<simd<T, simd_abi::scalar>>(std::forward<U>(x));
- }
-};
-
-template <class T, class BinaryOp>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
-reduce(const_where_expression<simd_mask<T, simd_abi::scalar>,
- simd<T, simd_abi::scalar>> const& x,
- T identity_element, BinaryOp) {
- return static_cast<bool>(x.mask()) ? static_cast<T>(x.value())
- : identity_element;
-}
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
-hmax(const_where_expression<simd_mask<T, simd_abi::scalar>,
- simd<T, simd_abi::scalar>> const& x) {
- return static_cast<bool>(x.mask()) ? static_cast<T>(x.value())
- : Kokkos::reduction_identity<T>::max();
-}
-
-template <class T>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
-hmin(const_where_expression<simd_mask<T, simd_abi::scalar>,
- simd<T, simd_abi::scalar>> const& x) {
- return static_cast<bool>(x.mask()) ? static_cast<T>(x.value())
- : Kokkos::reduction_identity<T>::min();
-}
-
-} // namespace Experimental
-} // namespace Kokkos
-
-#endif
+++ /dev/null
-// This file is needed in order to get the linker language
-// for the header only submodule.
-// While we set the language properties in our normal cmake
-// path it does not get set in the Trilinos environment.
-// Furthermore, setting LINKER_LANGUAGE is only supported
-// in CMAKE 3.19 and up.
-void KOKKOS_SIMD_SRC_DUMMY_PREVENT_LINK_ERROR() {}
+++ /dev/null
-DisableFormat: true
-SortIncludes: false
-
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMIC_REF_IMPL_HPP_
-#define DESUL_ATOMIC_REF_IMPL_HPP_
-
-#include <cstddef>
-#include <memory>
-#include <type_traits>
-
-#include "desul/atomics/Common.hpp"
-#include "desul/atomics/Generic.hpp"
-#include "desul/atomics/Macros.hpp"
-
-namespace desul {
-namespace Impl {
-
-// TODO current implementation is missing the following:
-// * member functions
-// * wait
-// * notify_one
-// * notify_all
-
-template <typename T,
- typename MemoryOrder,
- typename MemoryScope,
- bool = std::is_integral<T>{},
- bool = std::is_floating_point<T>{}>
-struct basic_atomic_ref;
-
-// base class for non-integral, non-floating-point, non-pointer types
-template <typename T, typename MemoryOrder, typename MemoryScope>
-struct basic_atomic_ref<T, MemoryOrder, MemoryScope, false, false> {
- static_assert(std::is_trivially_copyable<T>{}, "");
-
- private:
- T* _ptr;
-
- // 1/2/4/8/16-byte types must be aligned to at least their size
- static constexpr int _min_alignment = (sizeof(T) & (sizeof(T) - 1)) || sizeof(T) > 16
- ? 0
- : sizeof(T);
-
- public:
- using value_type = T;
-
- static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
-
- static constexpr std::size_t required_alignment = _min_alignment > alignof(T)
- ? _min_alignment
- : alignof(T);
-
- basic_atomic_ref() = delete;
- basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
-
- basic_atomic_ref(basic_atomic_ref const&) = default;
-
- explicit basic_atomic_ref(T& obj) : _ptr(std::addressof(obj)) {}
-
- T operator=(T desired) const noexcept {
- this->store(desired);
- return desired;
- }
-
- operator T() const noexcept { return this->load(); }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION void store(T desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- atomic_store(_ptr, desired, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T load(_MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T exchange(T desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, desired, order, MemoryScope());
- }
-
- DESUL_FUNCTION bool is_lock_free() const noexcept {
- return atomic_is_lock_free<sizeof(T), required_alignment>();
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_weak(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(
- T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_weak(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_strong(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_strong(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-};
-
-// base class for atomic_ref<integral-type>
-template <typename T, typename MemoryOrder, typename MemoryScope>
-struct basic_atomic_ref<T, MemoryOrder, MemoryScope, true, false> {
- static_assert(std::is_integral<T>{}, "");
-
- private:
- T* _ptr;
-
- public:
- using value_type = T;
- using difference_type = value_type;
-
- static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
-
- static constexpr std::size_t required_alignment = sizeof(T) > alignof(T) ? sizeof(T)
- : alignof(T);
-
- basic_atomic_ref() = delete;
- basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
-
- explicit basic_atomic_ref(T& obj) : _ptr(&obj) {}
-
- basic_atomic_ref(basic_atomic_ref const&) = default;
-
- T operator=(T desired) const noexcept {
- this->store(desired);
- return desired;
- }
-
- operator T() const noexcept { return this->load(); }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION void store(T desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- atomic_store(_ptr, desired, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T load(_MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T exchange(T desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, desired, order, MemoryScope());
- }
-
- DESUL_FUNCTION bool is_lock_free() const noexcept {
- return atomic_is_lock_free<sizeof(T), required_alignment>();
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_weak(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(
- T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_weak(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_strong(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_strong(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_add(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_add(_ptr, arg, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_sub(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_sub(_ptr, arg, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_and(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_and(_ptr, arg, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_or(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_or(_ptr, arg, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_xor(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_xor(_ptr, arg, order, MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator++() const noexcept {
- return atomic_add_fetch(_ptr, value_type(1), MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator++(int) const noexcept { return fetch_add(1); }
-
- DESUL_FUNCTION value_type operator--() const noexcept {
- return atomic_sub_fetch(_ptr, value_type(1), MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator--(int) const noexcept { return fetch_sub(1); }
-
- DESUL_FUNCTION value_type operator+=(value_type arg) const noexcept {
- atomic_add_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator-=(value_type arg) const noexcept {
- atomic_sub_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator&=(value_type arg) const noexcept {
- atomic_and_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator|=(value_type arg) const noexcept {
- atomic_or_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator^=(value_type arg) const noexcept {
- atomic_xor_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
- }
-};
-
-// base class for atomic_ref<floating-point-type>
-template <typename T, typename MemoryOrder, typename MemoryScope>
-struct basic_atomic_ref<T, MemoryOrder, MemoryScope, false, true> {
- static_assert(std::is_floating_point<T>{}, "");
-
- private:
- T* _ptr;
-
- public:
- using value_type = T;
- using difference_type = value_type;
-
- static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
-
- static constexpr std::size_t required_alignment = alignof(T);
-
- basic_atomic_ref() = delete;
- basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
-
- explicit basic_atomic_ref(T& obj) : _ptr(&obj) {}
-
- basic_atomic_ref(basic_atomic_ref const&) = default;
-
- T operator=(T desired) const noexcept {
- this->store(desired);
- return desired;
- }
-
- operator T() const noexcept { return this->load(); }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION void store(T desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- atomic_store(_ptr, desired, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T load(_MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T exchange(T desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, desired, order, MemoryScope());
- }
-
- DESUL_FUNCTION bool is_lock_free() const noexcept {
- return atomic_is_lock_free<sizeof(T), required_alignment>();
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_weak(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(
- T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_weak(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_strong(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_strong(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_add(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_add(_ptr, arg, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_sub(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_sub(_ptr, arg, order, MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator+=(value_type arg) const noexcept {
- atomic_add_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator-=(value_type arg) const noexcept {
- atomic_sub_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
- }
-};
-
-// base class for atomic_ref<pointer-type>
-template <typename T, typename MemoryOrder, typename MemoryScope>
-struct basic_atomic_ref<T*, MemoryOrder, MemoryScope, false, false> {
- private:
- T** _ptr;
-
- public:
- using value_type = T*;
- using difference_type = std::ptrdiff_t;
-
- static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
-
- static constexpr std::size_t required_alignment = alignof(T*);
-
- basic_atomic_ref() = delete;
- basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
-
- explicit basic_atomic_ref(T*& arg) : _ptr(std::addressof(arg)) {}
-
- basic_atomic_ref(basic_atomic_ref const&) = default;
-
- T* operator=(T* desired) const noexcept {
- this->store(desired);
- return desired;
- }
-
- operator T*() const noexcept { return this->load(); }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION void store(T* desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- atomic_store(_ptr, desired, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T* load(_MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION T* exchange(T* desired,
- _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_load(_ptr, desired, order, MemoryScope());
- }
-
- DESUL_FUNCTION bool is_lock_free() const noexcept {
- return atomic_is_lock_free<sizeof(T*), required_alignment>();
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(T*& expected,
- T* desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_weak(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_weak(
- T*& expected, T* desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_weak(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-
- template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T*& expected,
- T* desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure) const noexcept {
- return atomic_compare_exchange_strong(
- _ptr, expected, desired, success, failure, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION bool compare_exchange_strong(
- T*& expected, T* desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return compare_exchange_strong(expected,
- desired,
- order,
- cmpexch_failure_memory_order<_MemoryOrder>(),
- MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_add(difference_type d, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_add(_ptr, _type_size(d), order, MemoryScope());
- }
-
- template <typename _MemoryOrder = MemoryOrder>
- DESUL_FUNCTION value_type
- fetch_sub(difference_type d, _MemoryOrder order = _MemoryOrder()) const noexcept {
- return atomic_fetch_sub(_ptr, _type_size(d), order, MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator++() const noexcept {
- return atomic_add_fetch(_ptr, _type_size(1), MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator++(int) const noexcept { return fetch_add(1); }
-
- DESUL_FUNCTION value_type operator--() const noexcept {
- return atomic_sub_fetch(_ptr, _type_size(1), MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator--(int) const noexcept { return fetch_sub(1); }
-
- DESUL_FUNCTION value_type operator+=(difference_type d) const noexcept {
- atomic_add_fetch(_ptr, _type_size(d), MemoryOrder(), MemoryScope());
- }
-
- DESUL_FUNCTION value_type operator-=(difference_type d) const noexcept {
- atomic_sub_fetch(_ptr, _type_size(d), MemoryOrder(), MemoryScope());
- }
-
- private:
- static constexpr std::ptrdiff_t _type_size(std::ptrdiff_t d) noexcept {
- static_assert(std::is_object<T>{}, "");
- return d * sizeof(T);
- }
-};
-
-} // namespace Impl
-
-template <typename T, typename MemoryOrder, typename MemoryScope>
-struct scoped_atomic_ref : Impl::basic_atomic_ref<T, MemoryOrder, MemoryScope> {
- explicit scoped_atomic_ref(T& obj) noexcept
- : Impl::basic_atomic_ref<T, MemoryOrder, MemoryScope>(obj) {}
-
- scoped_atomic_ref& operator=(scoped_atomic_ref const&) = delete;
-
- scoped_atomic_ref(scoped_atomic_ref const&) = default;
-
- using Impl::basic_atomic_ref<T, MemoryOrder, MemoryScope>::operator=;
-};
-
-} // namespace desul
-
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-#ifndef DESUL_ATOMICS_CUDA_HPP_
-#define DESUL_ATOMICS_CUDA_HPP_
-
-#ifdef DESUL_HAVE_CUDA_ATOMICS
-// When building with Clang we need to include the device functions always since Clang
-// must see a consistent overload set in both device and host compilation, but that
-// means we need to know on the host what to make visible, i.e. we need a host side
-// compile knowledge of architecture.
-#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)) || \
- (!defined(__NVCC__) && !defined(DESUL_CUDA_ARCH_IS_PRE_VOLTA))
-#define DESUL_HAVE_CUDA_ATOMICS_ASM
-#include <desul/atomics/cuda/CUDA_asm.hpp>
-#endif
-
-#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)) || \
- (!defined(__NVCC__) && !defined(DESUL_HAVE_CUDA_ATOMICS_ASM))
-namespace desul {
-namespace Impl {
-template <class T>
-struct is_cuda_atomic_integer_type {
- static constexpr bool value = std::is_same<T, int>::value ||
- std::is_same<T, unsigned int>::value ||
- std::is_same<T, unsigned long long int>::value;
-};
-
-template <class T>
-struct is_cuda_atomic_add_type {
- static constexpr bool value = is_cuda_atomic_integer_type<T>::value ||
-#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
- std::is_same<T, double>::value ||
-#endif
- std::is_same<T, float>::value;
-};
-
-template <class T>
-struct is_cuda_atomic_sub_type {
- static constexpr bool value =
- std::is_same<T, int>::value || std::is_same<T, unsigned int>::value;
-};
-} // namespace Impl
-
-// Atomic Add
-template <class T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
- atomic_fetch_add(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicAdd(dest, val);
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
- atomic_fetch_add(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicAdd(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
- atomic_fetch_add(T* dest, T val, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_add(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic Sub
-template <class T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
- atomic_fetch_sub(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicSub(dest, val);
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
- atomic_fetch_sub(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicSub(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
- atomic_fetch_sub(T* dest, T val, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_sub(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Wrap around atomic add
-__device__ inline unsigned int atomic_fetch_inc_mod(unsigned int* dest,
- unsigned int val,
- MemoryOrderRelaxed,
- MemoryScopeDevice) {
- return atomicInc(dest, val);
-}
-
-template <typename MemoryOrder>
-__device__ inline unsigned int atomic_fetch_inc_mod(unsigned int* dest,
- unsigned int val,
- MemoryOrder,
- MemoryScopeDevice) {
- __threadfence();
- unsigned int return_val = atomicInc(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <typename MemoryOrder>
-__device__ inline unsigned int atomic_fetch_inc_mod(unsigned int* dest,
- unsigned int val,
- MemoryOrder,
- MemoryScopeCore) {
- return atomic_fetch_inc_mod(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Wrap around atomic sub
-__device__ inline unsigned int atomic_fetch_dec_mod(unsigned int* dest,
- unsigned int val,
- MemoryOrderRelaxed,
- MemoryScopeDevice) {
- return atomicDec(dest, val);
-}
-
-template <typename MemoryOrder>
-__device__ inline unsigned int atomic_fetch_dec_mod(unsigned int* dest,
- unsigned int val,
- MemoryOrder,
- MemoryScopeDevice) {
- __threadfence();
- unsigned int return_val = atomicDec(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <typename MemoryOrder>
-__device__ inline unsigned int atomic_fetch_dec_mod(unsigned int* dest,
- unsigned int val,
- MemoryOrder,
- MemoryScopeCore) {
- return atomic_fetch_dec_mod(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic Inc
-template <typename T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
- atomic_fetch_inc(T* dest, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicAdd(dest, T(1));
-}
-
-template <typename T, typename MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
- atomic_fetch_inc(T* dest, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicAdd(dest, T(1));
- __threadfence();
-
- return return_val;
-}
-
-template <typename T, typename MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
- atomic_fetch_inc(T* dest, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_add(dest, T(1), MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic Dec
-template <typename T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
- atomic_fetch_dec(T* dest, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicSub(dest, T(1));
-}
-
-template <typename T, typename MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
- atomic_fetch_dec(T* dest, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicSub(dest, T(1));
- __threadfence();
- return return_val;
-}
-
-template <typename T, typename MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
- atomic_fetch_dec(T* dest, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_sub(dest, T(1), MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic Max
-template <class T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_max(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicMax(dest, val);
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_max(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicMax(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_max(T* dest, T val, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_max(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic Min
-template <class T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_min(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicMin(dest, val);
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_min(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicMin(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_min(T* dest, T val, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_min(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic And
-template <class T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_and(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicAnd(dest, val);
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_and(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicAnd(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_and(T* dest, T val, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_and(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic XOR
-template <class T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_xor(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicXor(dest, val);
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_xor(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicXor(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_xor(T* dest, T val, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_xor(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-
-// Atomic OR
-template <class T>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_or(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
- return atomicOr(dest, val);
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_or(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
- __threadfence();
- T return_val = atomicOr(dest, val);
- __threadfence();
- return return_val;
-}
-
-template <class T, class MemoryOrder>
-__device__ inline
- std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
- atomic_fetch_or(T* dest, T val, MemoryOrder, MemoryScopeCore) {
- return atomic_fetch_or(dest, val, MemoryOrder(), MemoryScopeDevice());
-}
-} // namespace desul
-#endif
-
-#if !defined(__NVCC__)
-// Functions defined as device functions in CUDA which don't exist in the GCC overload
-// set
-namespace desul {
-
-#if defined(DESUL_HAVE_CUDA_ATOMICS_ASM)
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(TYPE, ORDER, SCOPE) \
- inline void atomic_add(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
- (void)atomic_fetch_add(dest, val, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(int32_t, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(long,
- MemoryOrderRelaxed,
- MemoryScopeDevice); // only for ASM?
-DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(unsigned int, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(unsigned long long,
- MemoryOrderRelaxed,
- MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(float, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(double, MemoryOrderRelaxed, MemoryScopeDevice);
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(TYPE, ORDER, SCOPE) \
- inline void atomic_sub(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
- (void)atomic_fetch_sub(dest, val, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(int32_t, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(long,
- MemoryOrderRelaxed,
- MemoryScopeDevice); // only for ASM?
-DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(unsigned int, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(float, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(double, MemoryOrderRelaxed, MemoryScopeDevice);
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_INC(TYPE, ORDER, SCOPE) \
- inline void atomic_inc(TYPE* const dest, ORDER order, SCOPE scope) { \
- (void)atomic_fetch_inc(dest, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_INC(unsigned int,
- MemoryOrderRelaxed,
- MemoryScopeDevice); // only for ASM?
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_DEC(TYPE, ORDER, SCOPE) \
- inline void atomic_dec(TYPE* const dest, ORDER order, SCOPE scope) { \
- (void)atomic_fetch_dec(dest, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_DEC(unsigned,
- MemoryOrderRelaxed,
- MemoryScopeDevice); // only for ASM?
-
-#endif // DESUL_HAVE_CUDA_ATOMICS_ASM
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_INC_MOD(TYPE, ORDER, SCOPE) \
- inline TYPE atomic_fetch_inc_mod(TYPE* dest, TYPE val, ORDER order, SCOPE scope) { \
- using cas_t = typename Impl::atomic_compare_exchange_type<sizeof(TYPE)>::type; \
- cas_t oldval = reinterpret_cast<cas_t&>(*dest); \
- cas_t assume = oldval; \
- do { \
- assume = oldval; \
- TYPE newval = (reinterpret_cast<TYPE&>(assume) >= val) \
- ? static_cast<TYPE>(0) \
- : reinterpret_cast<TYPE&>(assume) + static_cast<TYPE>(1); \
- oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest), \
- assume, \
- reinterpret_cast<cas_t&>(newval), \
- order, \
- scope); \
- } while (assume != oldval); \
- return reinterpret_cast<TYPE&>(oldval); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_INC_MOD(unsigned int,
- MemoryOrderRelaxed,
- MemoryScopeDevice);
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_DEC_MOD(TYPE, ORDER, SCOPE) \
- inline TYPE atomic_fetch_dec_mod(TYPE* dest, TYPE val, ORDER order, SCOPE scope) { \
- using cas_t = typename Impl::atomic_compare_exchange_type<sizeof(TYPE)>::type; \
- cas_t oldval = reinterpret_cast<cas_t&>(*dest); \
- cas_t assume = oldval; \
- do { \
- assume = oldval; \
- TYPE newval = ((reinterpret_cast<TYPE&>(assume) == static_cast<TYPE>(0)) | \
- (reinterpret_cast<TYPE&>(assume) > val)) \
- ? val \
- : reinterpret_cast<TYPE&>(assume) - static_cast<TYPE>(1); \
- oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest), \
- assume, \
- reinterpret_cast<cas_t&>(newval), \
- order, \
- scope); \
- } while (assume != oldval); \
- return reinterpret_cast<TYPE&>(oldval); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_DEC_MOD(unsigned int,
- MemoryOrderRelaxed,
- MemoryScopeDevice);
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_ADD(TYPE, ORDER, SCOPE) \
- inline TYPE atomic_fetch_add(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
- return Impl::atomic_fetch_oper( \
- Impl::AddOper<TYPE, const TYPE>(), dest, val, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_ADD(float, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_ADD(double, MemoryOrderRelaxed, MemoryScopeDevice);
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_SUB(TYPE, ORDER, SCOPE) \
- inline TYPE atomic_fetch_sub(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
- return Impl::atomic_fetch_oper( \
- Impl::SubOper<TYPE, const TYPE>(), dest, val, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_SUB(float, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_SUB(double, MemoryOrderRelaxed, MemoryScopeDevice);
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(TYPE, ORDER, SCOPE) \
- inline TYPE atomic_fetch_max(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
- return Impl::atomic_fetch_oper( \
- Impl::MaxOper<TYPE, const TYPE>(), dest, val, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(int, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(long,
- MemoryOrderRelaxed,
- MemoryScopeDevice); // only for ASM?
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(unsigned int,
- MemoryOrderRelaxed,
- MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(unsigned long,
- MemoryOrderRelaxed,
- MemoryScopeDevice);
-// DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(unsigned long
-// long,MemoryOrderRelaxed,MemoryScopeDevice);
-
-#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(TYPE, ORDER, SCOPE) \
- inline TYPE atomic_fetch_min(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
- return Impl::atomic_fetch_oper( \
- Impl::MinOper<TYPE, const TYPE>(), dest, val, order, scope); \
- }
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(int, MemoryOrderRelaxed, MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(long,
- MemoryOrderRelaxed,
- MemoryScopeDevice); // only for ASM?
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(unsigned int,
- MemoryOrderRelaxed,
- MemoryScopeDevice);
-DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(unsigned long,
- MemoryOrderRelaxed,
- MemoryScopeDevice);
-// DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(unsigned long
-// long,MemoryOrderRelaxed,MemoryScopeDevice); inline void atomic_fetch_max(int32_t*
-// const dest, int32_t val, MemoryOrderRelaxed order, MemoryScopeDevice scope) {
-
-} // namespace desul
-
-// Functions defined int the GCC overload set but not in the device overload set
-namespace desul {
-__device__ inline unsigned long long atomic_fetch_add(unsigned long long* const dest,
- unsigned long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::AddOper<unsigned long long, const unsigned long long>(),
- dest,
- val,
- order,
- scope);
-}
-__device__ inline long long atomic_fetch_add(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::AddOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_fetch_add(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::AddOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_fetch_sub(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::SubOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_fetch_sub(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::SubOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_fetch_max(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::MaxOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_fetch_min(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::MinOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_fetch_or(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::OrOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_fetch_or(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::OrOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_fetch_xor(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::XorOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_fetch_xor(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::XorOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_fetch_and(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::AndOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_fetch_and(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_fetch_oper(
- Impl::AndOper<long long, const long long>(), dest, val, order, scope);
-}
-
-__device__ inline unsigned long long atomic_add_fetch(unsigned long long* const dest,
- unsigned long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::AddOper<unsigned long long, const unsigned long long>(),
- dest,
- val,
- order,
- scope);
-}
-__device__ inline long long atomic_add_fetch(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::AddOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_add_fetch(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::AddOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_sub_fetch(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::SubOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_sub_fetch(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::SubOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_or_fetch(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::OrOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_or_fetch(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::OrOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_xor_fetch(long long* const dest,
- long long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::XorOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_xor_fetch(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::XorOper<long, const long>(), dest, val, order, scope);
-}
-__device__ inline long long atomic_and_fetch(long long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::AndOper<long long, const long long>(), dest, val, order, scope);
-}
-__device__ inline long atomic_and_fetch(long* const dest,
- long val,
- MemoryOrderRelaxed order,
- MemoryScopeDevice scope) {
- return Impl::atomic_oper_fetch(
- Impl::AndOper<long, const long>(), dest, val, order, scope);
-}
-} // namespace desul
-#endif
-
-#endif // DESUL_HAVE_CUDA_ATOMICS
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_CUDA_HPP_
-#define DESUL_ATOMICS_COMPARE_EXCHANGE_CUDA_HPP_
-#include "desul/atomics/Common.hpp"
-#include "desul/atomics/Lock_Array_Cuda.hpp"
-
-#ifdef DESUL_HAVE_CUDA_ATOMICS
-namespace desul {
-// Only include if compiling device code, or the CUDA compiler is not NVCC (i.e. Clang)
-// atomic_thread_fence implementation
-#if defined(__CUDA_ARCH__) || !defined(__NVCC__)
-__device__ inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
- __threadfence();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
- __threadfence();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
- __threadfence();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
- __threadfence();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
- __threadfence_block();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
- __threadfence_block();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
- __threadfence_block();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
- __threadfence_block();
-}
-#if (__CUDA_ARCH__ >= 600) || !defined(__NVCC__)
-__device__ inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeNode) {
- __threadfence_system();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeNode) {
- __threadfence_system();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeNode) {
- __threadfence_system();
-}
-__device__ inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeNode) {
- __threadfence_system();
-}
-#endif
-#endif
-} // namespace desul
-
-// Compare Exchange for PRE Volta, not supported with CLANG as CUDA compiler, since we
-// do NOT have a way of having the code included for clang only when the CC is smaller
-// than 700 But on Clang the device side symbol list must be independent of
-// __CUDA_ARCH__
-// FIXME temporary fix for https://github.com/kokkos/kokkos/issues/4390
-#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700) || \
- (!defined(__NVCC__) && defined(DESUL_CUDA_ARCH_IS_PRE_VOLTA) && 0)
-namespace desul {
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned int) == 4,
- "this function assumes an unsigned int is 32-bit");
- unsigned int return_val = atomicCAS(reinterpret_cast<unsigned int*>(dest),
- reinterpret_cast<unsigned int&>(compare),
- reinterpret_cast<unsigned int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned long long int) == 8,
- "this function assumes an unsigned long long is 64-bit");
- unsigned long long int return_val =
- atomicCAS(reinterpret_cast<unsigned long long int*>(dest),
- reinterpret_cast<unsigned long long int&>(compare),
- reinterpret_cast<unsigned long long int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderAcquire, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- return return_val;
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(
- T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned int) == 4,
- "this function assumes an unsigned int is 32-bit");
- unsigned int return_val = atomicExch(reinterpret_cast<unsigned int*>(dest),
- reinterpret_cast<unsigned int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(
- T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned long long int) == 8,
- "this function assumes an unsigned long long is 64-bit");
- unsigned long long int return_val =
- atomicExch(reinterpret_cast<unsigned long long int*>(dest),
- reinterpret_cast<unsigned long long int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_exchange(T* const dest, T value, MemoryOrderRelease, MemoryScope) {
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_exchange(T* const dest, T value, MemoryOrderAcquire, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_exchange(T* const dest, T value, MemoryOrderAcqRel, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return reinterpret_cast<T&>(return_val);
-}
-} // namespace desul
-#endif
-
-// Including CUDA ptx based exchange atomics
-// When building with clang we need to include the device functions always
-// since clang must see a consistent overload set in both device and host compilation
-// but that means we need to know on the host what to make visible, i.e. we need
-// a host side compile knowledge of architecture.
-// We simply can say DESUL proper doesn't support clang CUDA build pre Volta,
-// Kokkos has that knowledge and so I use it here, allowing in Kokkos to use
-// clang with pre Volta as CUDA compiler
-#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)) || \
- (!defined(__NVCC__) && !defined(DESUL_CUDA_ARCH_IS_PRE_VOLTA))
-#include <desul/atomics/cuda/CUDA_asm_exchange.hpp>
-#endif
-
-// SeqCst is not directly supported by PTX, need the additional fences:
-
-#if defined(__CUDA_ARCH__) || !defined(__NVCC__)
-namespace desul {
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(
- T* const dest, T value, MemoryOrderSeqCst, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(
- T* const dest, T value, MemoryOrderSeqCst, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-} // namespace desul
-#endif
-
-#if defined(__CUDA_ARCH__) || !defined(__NVCC__)
-namespace desul {
-template <typename T, class MemoryOrder, class MemoryScope>
-__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrder, MemoryScope scope) {
- // This is a way to avoid dead lock in a warp or wave front
- T return_val;
- int done = 0;
- unsigned int mask = DESUL_IMPL_ACTIVEMASK;
- unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda((void*)dest, scope)) {
- if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
- atomic_thread_fence(MemoryOrderRelease(), scope);
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = *dest;
- if (return_val == compare) {
- *dest = value;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- }
- Impl::unlock_address_cuda((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
- }
- return return_val;
-}
-template <typename T, class MemoryOrder, class MemoryScope>
-__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
-atomic_exchange(T* const dest, T value, MemoryOrder, MemoryScope scope) {
- // This is a way to avoid dead lock in a warp or wave front
- T return_val;
- int done = 0;
- unsigned int mask = DESUL_IMPL_ACTIVEMASK;
- unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda((void*)dest, scope)) {
- if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
- atomic_thread_fence(MemoryOrderRelease(), scope);
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = *dest;
- *dest = value;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address_cuda((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
- }
- return return_val;
-}
-} // namespace desul
-#endif
-
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_GCC_HPP_
-#define DESUL_ATOMICS_COMPARE_EXCHANGE_GCC_HPP_
-#include "desul/atomics/Common.hpp"
-
-#ifdef DESUL_HAVE_GCC_ATOMICS
-#if !defined(DESUL_HAVE_16BYTE_COMPARE_AND_SWAP) && !defined(__CUDACC__)
-// This doesn't work in WSL??
-//#define DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
-#endif
-namespace desul {
-
-namespace Impl {
-template <class T>
-struct atomic_exchange_available_gcc {
- constexpr static bool value =
-#ifndef DESUL_HAVE_LIBATOMIC
- ((sizeof(T) == 4 && alignof(T) == 4) ||
-#ifdef DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
- (sizeof(T) == 16 && alignof(T) == 16) ||
-#endif
- (sizeof(T) == 8 && alignof(T) == 8)) &&
-#endif
- std::is_trivially_copyable<T>::value;
-};
-} // namespace Impl
-
-#if defined(__clang__) && (__clang_major__ >= 7) && !defined(__APPLE__)
-// clang-format off
-// Disable warning for large atomics on clang 7 and up (checked with godbolt)
-// error: large atomic operation may incur significant performance penalty [-Werror,-Watomic-alignment]
-// https://godbolt.org/z/G7YhqhbG6
-// clang-format on
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Watomic-alignment"
-#endif
-template <class MemoryOrder, class MemoryScope>
-void atomic_thread_fence(MemoryOrder, MemoryScope) {
- __atomic_thread_fence(GCCMemoryOrder<MemoryOrder>::value);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T> atomic_exchange(
- T* dest, T value, MemoryOrder, MemoryScope) {
- T return_val;
- __atomic_exchange(dest, &value, &return_val, GCCMemoryOrder<MemoryOrder>::value);
- return return_val;
-}
-
-// Failure mode for atomic_compare_exchange_n cannot be RELEASE nor ACQREL so
-// Those two get handled separatly.
-template <typename T, class MemoryOrder, class MemoryScope>
-std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T>
-atomic_compare_exchange(T* dest, T compare, T value, MemoryOrder, MemoryScope) {
- (void)__atomic_compare_exchange(dest,
- &compare,
- &value,
- false,
- GCCMemoryOrder<MemoryOrder>::value,
- GCCMemoryOrder<MemoryOrder>::value);
- return compare;
-}
-
-template <typename T, class MemoryScope>
-std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T>
-atomic_compare_exchange(T* dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
- (void)__atomic_compare_exchange(
- dest, &compare, &value, false, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- return compare;
-}
-
-template <typename T, class MemoryScope>
-std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T>
-atomic_compare_exchange(T* dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
- (void)__atomic_compare_exchange(
- dest, &compare, &value, false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
- return compare;
-}
-
-#if defined(__clang__) && (__clang_major__ >= 7) && !defined(__APPLE__)
-#pragma GCC diagnostic pop
-#endif
-} // namespace desul
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_HIP_HPP_
-#define DESUL_ATOMICS_COMPARE_EXCHANGE_HIP_HPP_
-#include "desul/atomics/Common.hpp"
-#include "desul/atomics/Lock_Array_HIP.hpp"
-
-#ifdef DESUL_HAVE_HIP_ATOMICS
-namespace desul {
-inline __device__ void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
- __threadfence();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
- __threadfence();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
- __threadfence();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
- __threadfence();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
- __threadfence_block();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
- __threadfence_block();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
- __threadfence_block();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
- __threadfence_block();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderRelease, MemoryScopeNode) {
- __threadfence_system();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeNode) {
- __threadfence_system();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeNode) {
- __threadfence_system();
-}
-inline __device__ void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeNode) {
- __threadfence_system();
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned int) == 4,
- "this function assumes an unsigned int is 32-bit");
- unsigned int return_val = atomicCAS(reinterpret_cast<unsigned int*>(dest),
- reinterpret_cast<unsigned int&>(compare),
- reinterpret_cast<unsigned int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned long long int) == 8,
- "this function assumes an unsigned long long is 64-bit");
- unsigned long long int return_val =
- atomicCAS(reinterpret_cast<unsigned long long int*>(dest),
- reinterpret_cast<unsigned long long int&>(compare),
- reinterpret_cast<unsigned long long int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderAcquire, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- return return_val;
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(
- T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned int) == 4,
- "this function assumes an unsigned int is 32-bit");
- unsigned int return_val = atomicExch(reinterpret_cast<unsigned int*>(dest),
- reinterpret_cast<unsigned int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(
- T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
- static_assert(sizeof(unsigned long long int) == 8,
- "this function assumes an unsigned long long is 64-bit");
- unsigned long long int return_val =
- atomicExch(reinterpret_cast<unsigned long long int*>(dest),
- reinterpret_cast<unsigned long long int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_exchange(T* const dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_exchange(
- T* const dest, T /*compare*/, T value, MemoryOrderAcquire, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_exchange(T* const dest, T value, MemoryOrderAcqRel, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_exchange(T* const dest, T value, MemoryOrderSeqCst, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryScope>
-__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T return_val = atomic_compare_exchange(
- dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrder, MemoryScope scope) {
- // This is a way to avoid dead lock in a warp or wave front
- T return_val;
- int done = 0;
- unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
- unsigned long long int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip((void*)dest, scope)) {
- if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
- atomic_thread_fence(MemoryOrderRelease(), scope);
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = *dest;
- if (return_val == compare) {
- *dest = value;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- }
- Impl::unlock_address_hip((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(done);
- }
- return return_val;
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
-atomic_exchange(T* const dest, T value, MemoryOrder, MemoryScope scope) {
- // This is a way to avoid dead lock in a warp or wave front
- T return_val;
- int done = 0;
- unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
- unsigned long long int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip((void*)dest, scope)) {
- if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
- atomic_thread_fence(MemoryOrderRelease(), scope);
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = *dest;
- *dest = value;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address_hip((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(done);
- }
- return return_val;
-}
-} // namespace desul
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_MSVC_HPP_
-#define DESUL_ATOMICS_COMPARE_EXCHANGE_MSVC_HPP_
-#include <type_traits>
-
-#include "desul/atomics/Common.hpp"
-#ifdef DESUL_HAVE_MSVC_ATOMICS
-
-#ifndef DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
-#define DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
-#endif
-
-namespace desul {
-
-// Forward declare these functions. They use compare_exchange themselves
-// so the actual header file with them comes after this file is included.
-namespace Impl {
-template <typename MemoryScope>
-inline bool lock_address(void* ptr, MemoryScope ms);
-
-template <typename MemoryScope>
-void unlock_address(void* ptr, MemoryScope ms);
-} // namespace Impl
-
-template <class MemoryOrder, class MemoryScope>
-void atomic_thread_fence(MemoryOrder, MemoryScope) {
- std::atomic_thread_fence(CXXMemoryOrder<MemoryOrder>::value);
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 1, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderRelaxed,
- MemoryScope) {
- char return_val = _InterlockedExchange8((char*)dest, *((char*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 2, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderRelaxed,
- MemoryScope) {
- short return_val = _InterlockedExchange16((short*)dest, *((short*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderRelaxed,
- MemoryScope) {
- long return_val = _InterlockedExchange((long*)dest, *((long*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderRelaxed,
- MemoryScope) {
- __int64 return_val = _InterlockedExchange64((__int64*)dest, *((__int64*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 1, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderSeqCst,
- MemoryScope) {
- char return_val = _InterlockedExchange8((char*)dest, *((char*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 2, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderSeqCst,
- MemoryScope) {
- short return_val = _InterlockedExchange16((short*)dest, *((short*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderSeqCst,
- MemoryScope) {
- long return_val = _InterlockedExchange((long*)dest, *((long*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(T* const dest,
- T val,
- MemoryOrderSeqCst,
- MemoryScope) {
- __int64 return_val = _InterlockedExchange64((__int64*)dest, *((__int64*)&val));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<(sizeof(T) != 1 && sizeof(T) != 2 && sizeof(T) != 4 &&
- sizeof(T) != 8),
- T>::type
-atomic_exchange(T* const dest, T val, MemoryOrder, MemoryScope scope) {
- while (!Impl::lock_address((void*)dest, scope)) {
- }
- if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
- atomic_thread_fence(MemoryOrderRelease(), scope);
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- T return_val = *dest;
- *dest = val;
- atomic_thread_fence(MemoryOrderRelease(), scope);
-
- Impl::unlock_address((void*)dest, scope);
- return return_val;
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 1, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
- char return_val =
- _InterlockedCompareExchange8((char*)dest, *((char*)&val), *((char*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 2, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
- short return_val =
- _InterlockedCompareExchange16((short*)dest, *((short*)&val), *((short*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
- long return_val =
- _InterlockedCompareExchange((long*)dest, *((long*)&val), *((long*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
- __int64 return_val = _InterlockedCompareExchange64(
- (__int64*)dest, *((__int64*)&val), *((__int64*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 16, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
- Dummy16ByteValue* val16 = reinterpret_cast<Dummy16ByteValue*>(&val);
- (void)_InterlockedCompareExchange128(reinterpret_cast<__int64*>(dest),
- val16->value2,
- val16->value1,
- (reinterpret_cast<__int64*>(&compare)));
- return compare;
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 1, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
- char return_val =
- _InterlockedCompareExchange8((char*)dest, *((char*)&val), *((char*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 2, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
- short return_val =
- _InterlockedCompareExchange16((short*)dest, *((short*)&val), *((short*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
- long return_val =
- _InterlockedCompareExchange((long*)dest, *((long*)&val), *((long*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
- __int64 return_val = _InterlockedCompareExchange64(
- (__int64*)dest, *((__int64*)&val), *((__int64*)&compare));
- return *(reinterpret_cast<T*>(&return_val));
-}
-
-template <typename T, class MemoryScope>
-typename std::enable_if<sizeof(T) == 16, T>::type atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
- Dummy16ByteValue* val16 = reinterpret_cast<Dummy16ByteValue*>(&val);
- (void)_InterlockedCompareExchange128(reinterpret_cast<__int64*>(dest),
- val16->value2,
- val16->value1,
- (reinterpret_cast<__int64*>(&compare)));
- return compare;
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<(sizeof(T) != 1 && sizeof(T) != 2 && sizeof(T) != 4 &&
- sizeof(T) != 8 && sizeof(T) != 16),
- T>::type
-atomic_compare_exchange(
- T* const dest, T compare, T val, MemoryOrder, MemoryScope scope) {
- while (!Impl::lock_address((void*)dest, scope)) {
- }
- if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
- atomic_thread_fence(MemoryOrderRelease(), scope);
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- T return_val = *dest;
- if (return_val == compare) {
- *dest = val;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- }
-
- Impl::unlock_address((void*)dest, scope);
- return return_val;
-}
-
-} // namespace desul
-
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_OPENMP_HPP_
-#define DESUL_ATOMICS_COMPARE_EXCHANGE_OPENMP_HPP_
-#include <omp.h>
-
-#include "desul/atomics/Common.hpp"
-
-#ifdef DESUL_HAVE_OPENMP_ATOMICS
-namespace desul {
-
-#if _OPENMP > 201800
-// atomic_thread_fence for Core Scope
-inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
-// There is no seq_cst flush in OpenMP, isn't it the same anyway for fence?
-#pragma omp flush acq_rel
-}
-inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
-#pragma omp flush acq_rel
-}
-inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
-#pragma omp flush release
-}
-inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
-#pragma omp flush acquire
-}
-// atomic_thread_fence for Device Scope
-inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
-// There is no seq_cst flush in OpenMP, isn't it the same anyway for fence?
-#pragma omp flush acq_rel
-}
-inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
-#pragma omp flush acq_rel
-}
-inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
-#pragma omp flush release
-}
-inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
-#pragma omp flush acquire
-}
-#else
-// atomic_thread_fence for Core Scope
-inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
-#pragma omp flush
-}
-inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
-#pragma omp flush
-}
-inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
-#pragma omp flush
-}
-inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
-#pragma omp flush
-}
-// atomic_thread_fence for Device Scope
-inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
-#pragma omp flush
-}
-inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
-#pragma omp flush
-}
-inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
-#pragma omp flush
-}
-inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
-#pragma omp flush
-}
-#endif
-
-template <typename T, class MemoryOrder, class MemoryScope>
-T atomic_exchange(T* dest, T value, MemoryOrder, MemoryScope) {
- T return_val;
- if (!std::is_same<MemoryOrder, MemoryOrderRelaxed>::value)
- atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
- T& x = *dest;
-#pragma omp atomic capture
- {
- return_val = x;
- x = value;
- }
- if (!std::is_same<MemoryOrder, MemoryOrderRelaxed>::value)
- atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
- return return_val;
-}
-
-// OpenMP doesn't have compare exchange, so we use build-ins and rely on testing that
-// this works Note that means we test this in OpenMPTarget offload regions!
-template <typename T, class MemoryOrder, class MemoryScope>
-std::enable_if_t<Impl::atomic_always_lock_free(sizeof(T)), T> atomic_compare_exchange(
- T* dest, T compare, T value, MemoryOrder, MemoryScope) {
- using cas_t = typename Impl::atomic_compare_exchange_type<sizeof(T)>::type;
- cas_t retval = __sync_val_compare_and_swap(reinterpret_cast<volatile cas_t*>(dest),
- reinterpret_cast<cas_t&>(compare),
- reinterpret_cast<cas_t&>(value));
- return reinterpret_cast<T&>(retval);
-}
-
-#if defined(__clang__) && (__clang_major__ >= 7)
-// Disable warning for large atomics on clang 7 and up (checked with godbolt)
-// clang-format off
-// error: large atomic operation may incur significant performance penalty [-Werror,-Watomic-alignment]
-// clang-format on
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Watomic-alignment"
-#endif
-
-// Make 16 byte cas work on host at least
-#pragma omp begin declare variant match(device = {kind(host)})
-template <typename T, class MemoryOrder, class MemoryScope>
-std::enable_if_t<!Impl::atomic_always_lock_free(sizeof(T)) && (sizeof(T) == 16), T>
-atomic_compare_exchange(T* dest, T compare, T value, MemoryOrder, MemoryScope) {
- (void)__atomic_compare_exchange(dest,
- &compare,
- &value,
- false,
- GCCMemoryOrder<MemoryOrder>::value,
- GCCMemoryOrder<MemoryOrder>::value);
- return compare;
-}
-#pragma omp end declare variant
-
-#pragma omp begin declare variant match(device = {kind(nohost)})
-template <typename T, class MemoryOrder, class MemoryScope>
-std::enable_if_t<!Impl::atomic_always_lock_free(sizeof(T)) && (sizeof(T) == 16), T>
-atomic_compare_exchange(T* /*dest*/, T /*compare*/, T value, MemoryOrder, MemoryScope) {
- // FIXME make sure this never gets called
- return value;
-}
-#pragma omp end declare variant
-
-#if defined(__clang__) && (__clang_major__ >= 7)
-#pragma GCC diagnostic pop
-#endif
-
-} // namespace desul
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_SYCL_HPP_
-#define DESUL_ATOMICS_COMPARE_EXCHANGE_SYCL_HPP_
-
-// clang-format off
-#include "desul/atomics/SYCLConversions.hpp"
-#include "desul/atomics/Common.hpp"
-
-#include <CL/sycl.hpp>
-// clang-format on
-
-#ifdef DESUL_HAVE_SYCL_ATOMICS
-
-namespace desul {
-
-template <class MemoryOrder, class MemoryScope>
-inline void atomic_thread_fence(MemoryOrder, MemoryScope) {
- sycl::atomic_fence(
- Impl::DesulToSYCLMemoryOrder<MemoryOrder, /*extended namespace*/ false>::value,
- Impl::DesulToSYCLMemoryScope<MemoryScope, /*extended namespace*/ false>::value);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrder, MemoryScope) {
- static_assert(sizeof(unsigned int) == 4,
- "this function assumes an unsigned int is 32-bit");
- Impl::sycl_atomic_ref<unsigned int, MemoryOrder, MemoryScope> dest_ref(
- *reinterpret_cast<unsigned int*>(dest));
- dest_ref.compare_exchange_strong(*reinterpret_cast<unsigned int*>(&compare),
- *reinterpret_cast<unsigned int*>(&value));
- return compare;
-}
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrder, MemoryScope) {
- static_assert(sizeof(unsigned long long int) == 8,
- "this function assumes an unsigned long long is 64-bit");
- Impl::sycl_atomic_ref<unsigned long long int, MemoryOrder, MemoryScope> dest_ref(
- *reinterpret_cast<unsigned long long int*>(dest));
- dest_ref.compare_exchange_strong(*reinterpret_cast<unsigned long long int*>(&compare),
- *reinterpret_cast<unsigned long long int*>(&value));
- return compare;
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(T* const dest,
- T value,
- MemoryOrder,
- MemoryScope) {
- static_assert(sizeof(unsigned int) == 4,
- "this function assumes an unsigned int is 32-bit");
- Impl::sycl_atomic_ref<unsigned int, MemoryOrder, MemoryScope> dest_ref(
- *reinterpret_cast<unsigned int*>(dest));
- unsigned int return_val = dest_ref.exchange(*reinterpret_cast<unsigned int*>(&value));
- return reinterpret_cast<T&>(return_val);
-}
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(T* const dest,
- T value,
- MemoryOrder,
- MemoryScope) {
- static_assert(sizeof(unsigned long long int) == 8,
- "this function assumes an unsigned long long is 64-bit");
- Impl::sycl_atomic_ref<unsigned long long int, MemoryOrder, MemoryScope> dest_ref(
- *reinterpret_cast<unsigned long long int*>(dest));
- unsigned long long int return_val =
- dest_ref.exchange(reinterpret_cast<unsigned long long int&>(value));
- return reinterpret_cast<T&>(return_val);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
-atomic_compare_exchange(
- T* const /*dest*/, T compare, T /*value*/, MemoryOrder, MemoryScope) {
- // FIXME_SYCL not implemented
- assert(false);
- return compare;
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type atomic_exchange(
- T* const /*dest*/, T value, MemoryOrder, MemoryScope) {
- // FIXME_SYCL not implemented
- assert(false);
- return value;
-}
-
-} // namespace desul
-
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_SERIAL_HPP_
-#define DESUL_ATOMICS_COMPARE_EXCHANGE_SERIAL_HPP_
-
-#ifdef DESUL_HAVE_SERIAL_ATOMICS
-namespace desul {
-template <class MemoryScope>
-void atomic_thread_fence(MemoryOrderAcquire, MemoryScope) {}
-
-template <class MemoryScope>
-void atomic_thread_fence(MemoryOrderRelease, MemoryScope) {}
-
-template <typename T, class MemoryScope>
-T atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
- T old = *dest;
- if (old == compare) {
- *dest = value;
- } else {
- old = compare;
- }
- return compare;
-}
-template <typename T, class MemoryScope>
-T atomic_compare_exchange(
- T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
- T old = *dest;
- if (old == compare) {
- *dest = value;
- } else {
- old = compare;
- }
- return compare;
-}
-} // namespace desul
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-#ifndef DESUL_ATOMICS_GCC_HPP_
-#define DESUL_ATOMICS_GCC_HPP_
-
-#ifdef DESUL_HAVE_GCC_ATOMICS
-
-#include <type_traits>
-/*
-Built - in Function : type __atomic_add_fetch(type * ptr, type val, int memorder)
-Built - in Function : type __atomic_sub_fetch(type * ptr, type val, int memorder)
-Built - in Function : type __atomic_and_fetch(type * ptr, type val, int memorder)
-Built - in Function : type __atomic_xor_fetch(type * ptr, type val, int memorder)
-Built - in Function : type __atomic_or_fetch(type * ptr, type val, int memorder)
-Built - in Function : type __atomic_nand_fetch(type * ptr, type val, int memorder)
-*/
-
-#define DESUL_GCC_INTEGRAL_OP_ATOMICS(MEMORY_ORDER, MEMORY_SCOPE) \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_add( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_fetch_add(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_sub( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_fetch_sub(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_and( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_fetch_and(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_or( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_fetch_or(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_xor( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_fetch_xor(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_nand( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_fetch_nand(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_add_fetch( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_add_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_sub_fetch( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_sub_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_and_fetch( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_and_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_or_fetch( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_or_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_xor_fetch( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_xor_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- } \
- template <typename T> \
- typename std::enable_if<std::is_integral<T>::value, T>::type atomic_nand_fetch( \
- T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
- return __atomic_nand_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
- }
-
-namespace desul {
-DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderRelaxed, MemoryScopeNode)
-DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderRelaxed, MemoryScopeDevice)
-DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderRelaxed, MemoryScopeCore)
-DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderSeqCst, MemoryScopeNode)
-DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderSeqCst, MemoryScopeDevice)
-DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderSeqCst, MemoryScopeCore)
-
-template <typename T, class MemoryOrder, class MemoryScope>
-std::enable_if_t<!Impl::atomic_exchange_available_gcc<T>::value, T> atomic_exchange(
- T* const dest,
- Impl::dont_deduce_this_parameter_t<const T> val,
- MemoryOrder /*order*/,
- MemoryScope scope) {
- // Acquire a lock for the address
- // clang-format off
- while (!Impl::lock_address((void*)dest, scope)) {}
- // clang-format on
-
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- T return_val = *dest;
- *dest = val;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address((void*)dest, scope);
- return return_val;
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-std::enable_if_t<!Impl::atomic_exchange_available_gcc<T>::value, T>
-atomic_compare_exchange(T* const dest,
- Impl::dont_deduce_this_parameter_t<const T> compare,
- Impl::dont_deduce_this_parameter_t<const T> val,
- MemoryOrder /*order*/,
- MemoryScope scope) {
- // Acquire a lock for the address
- // clang-format off
- while (!Impl::lock_address((void*)dest, scope)) {}
- // clang-format on
-
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- T return_val = *dest;
- if (return_val == compare) {
- *dest = val;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- }
- Impl::unlock_address((void*)dest, scope);
- return return_val;
-}
-} // namespace desul
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_GENERIC_HPP_
-#define DESUL_ATOMICS_GENERIC_HPP_
-
-#include <type_traits>
-#if defined(__GNUC__) && (!defined(__clang__))
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wstrict-aliasing"
-#endif
-#include "desul/atomics/Common.hpp"
-#include "desul/atomics/Compare_Exchange.hpp"
-#include "desul/atomics/Lock_Array.hpp"
-#include "desul/atomics/Macros.hpp"
-// Combination operands to be used in an Compare and Exchange based atomic
-// operation
-namespace desul {
-namespace Impl {
-
-template <class Scalar1, class Scalar2>
-struct MaxOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return (val1 > val2 ? val1 : val2);
- }
- DESUL_FORCEINLINE_FUNCTION
- static constexpr bool check_early_exit(Scalar1 const& val1, Scalar2 const& val2) {
- return val1 > val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct MinOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return (val1 < val2 ? val1 : val2);
- }
- DESUL_FORCEINLINE_FUNCTION
- static constexpr bool check_early_exit(Scalar1 const& val1, Scalar2 const& val2) {
- return val1 < val2;
- }
-};
-
-template <typename Op, typename Scalar1, typename Scalar2, typename = bool>
-struct may_exit_early : std::false_type {};
-
-// This exit early optimization causes weird compiler errors with MSVC 2019
-#ifndef DESUL_HAVE_MSVC_ATOMICS
-template <typename Op, typename Scalar1, typename Scalar2>
-struct may_exit_early<Op,
- Scalar1,
- Scalar2,
- decltype(Op::check_early_exit(std::declval<Scalar1 const&>(),
- std::declval<Scalar2 const&>()))>
- : std::true_type {};
-#endif
-
-template <typename Op, typename Scalar1, typename Scalar2>
-constexpr DESUL_FUNCTION
- typename std::enable_if<may_exit_early<Op, Scalar1, Scalar2>::value, bool>::type
- check_early_exit(Op const&, Scalar1 const& val1, Scalar2 const& val2) {
- return Op::check_early_exit(val1, val2);
-}
-
-template <typename Op, typename Scalar1, typename Scalar2>
-constexpr DESUL_FUNCTION
- typename std::enable_if<!may_exit_early<Op, Scalar1, Scalar2>::value, bool>::type
- check_early_exit(Op const&, Scalar1 const&, Scalar2 const&) {
- return false;
-}
-
-template <class Scalar1, class Scalar2>
-struct AddOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 + val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct SubOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 - val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct MulOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 * val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct DivOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 / val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct ModOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 % val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct AndOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 & val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct OrOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 | val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct XorOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 ^ val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct NandOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return ~(val1 & val2);
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct LShiftOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 << val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct RShiftOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return val1 >> val2;
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct IncModOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return ((val1 >= val2) ? Scalar1(0) : val1 + Scalar1(1));
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct DecModOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
- return (((val1 == Scalar1(0)) | (val1 > val2)) ? val2 : (val1 - Scalar1(1)));
- }
-};
-
-template <class Scalar1, class Scalar2>
-struct StoreOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1&, const Scalar2& val2) { return val2; }
-};
-
-template <class Scalar1, class Scalar2>
-struct LoadOper {
- DESUL_FORCEINLINE_FUNCTION
- static Scalar1 apply(const Scalar1& val1, const Scalar2&) { return val1; }
-};
-
-template <class Oper,
- typename T,
- class MemoryOrder,
- class MemoryScope,
- // equivalent to:
- // requires atomic_always_lock_free(sizeof(T))
- std::enable_if_t<atomic_always_lock_free(sizeof(T)), int> = 0>
-DESUL_INLINE_FUNCTION T atomic_fetch_oper(const Oper& op,
- T* const dest,
- dont_deduce_this_parameter_t<const T> val,
- MemoryOrder order,
- MemoryScope scope) {
- using cas_t = typename atomic_compare_exchange_type<sizeof(T)>::type;
- cas_t oldval = reinterpret_cast<cas_t&>(*dest);
- cas_t assume = oldval;
-
- do {
- if (Impl::check_early_exit(op, reinterpret_cast<T&>(oldval), val))
- return reinterpret_cast<T&>(oldval);
- assume = oldval;
- T newval = op.apply(reinterpret_cast<T&>(assume), val);
- oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest),
- assume,
- reinterpret_cast<cas_t&>(newval),
- order,
- scope);
- } while (assume != oldval);
-
- return reinterpret_cast<T&>(oldval);
-}
-
-template <class Oper,
- typename T,
- class MemoryOrder,
- class MemoryScope,
- // equivalent to:
- // requires atomic_always_lock_free(sizeof(T))
- std::enable_if_t<atomic_always_lock_free(sizeof(T)), int> = 0>
-DESUL_INLINE_FUNCTION T atomic_oper_fetch(const Oper& op,
- T* const dest,
- dont_deduce_this_parameter_t<const T> val,
- MemoryOrder order,
- MemoryScope scope) {
- using cas_t = typename atomic_compare_exchange_type<sizeof(T)>::type;
- cas_t oldval = reinterpret_cast<cas_t&>(*dest);
- T newval = val;
- cas_t assume = oldval;
- do {
- if (Impl::check_early_exit(op, reinterpret_cast<T&>(oldval), val))
- return reinterpret_cast<T&>(oldval);
- assume = oldval;
- newval = op.apply(reinterpret_cast<T&>(assume), val);
- oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest),
- assume,
- reinterpret_cast<cas_t&>(newval),
- order,
- scope);
- } while (assume != oldval);
-
- return newval;
-}
-
-template <class Oper,
- typename T,
- class MemoryOrder,
- class MemoryScope,
- // equivalent to:
- // requires !atomic_always_lock_free(sizeof(T))
- std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
-DESUL_INLINE_FUNCTION T atomic_fetch_oper(const Oper& op,
- T* const dest,
- dont_deduce_this_parameter_t<const T> val,
- MemoryOrder /*order*/,
- MemoryScope scope) {
-#if defined(DESUL_HAVE_FORWARD_PROGRESS)
- // Acquire a lock for the address
- while (!Impl::lock_address((void*)dest, scope)) {
- }
-
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- T return_val = *dest;
- *dest = op.apply(return_val, val);
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address((void*)dest, scope);
- return return_val;
-#elif defined(DESUL_HAVE_GPU_LIKE_PROGRESS)
- // This is a way to avoid dead lock in a warp or wave front
- T return_val;
- int done = 0;
-#ifdef __HIPCC__
- unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
- unsigned long long int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip((void*)dest, scope)) {
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = *dest;
- *dest = op.apply(return_val, val);
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address_hip((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(done);
- }
- return return_val;
-// FIXME_SYCL not implemented
-#elif defined(__SYCL_DEVICE_ONLY__)
- (void)op;
- (void)dest;
- (void)scope;
- (void)return_val;
- (void)done;
-
- assert(false);
- return val;
-#else
- unsigned int mask = DESUL_IMPL_ACTIVEMASK;
- unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda((void*)dest, scope)) {
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = *dest;
- *dest = op.apply(return_val, val);
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address_cuda((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
- }
- return return_val;
-#endif
-#else
- static_assert(false, "Unimplemented lock based atomic\n");
- return val;
-#endif
-}
-
-template <class Oper,
- typename T,
- class MemoryOrder,
- class MemoryScope,
- // equivalent to:
- // requires !atomic_always_lock_free(sizeof(T))
- std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
-DESUL_INLINE_FUNCTION T atomic_oper_fetch(const Oper& op,
- T* const dest,
- dont_deduce_this_parameter_t<const T> val,
- MemoryOrder /*order*/,
- MemoryScope scope) {
-#if defined(DESUL_HAVE_FORWARD_PROGRESS)
- // Acquire a lock for the address
- while (!Impl::lock_address((void*)dest, scope)) {
- }
-
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- T return_val = op.apply(*dest, val);
- *dest = return_val;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address((void*)dest, scope);
- return return_val;
-#elif defined(DESUL_HAVE_GPU_LIKE_PROGRESS)
- // This is a way to avoid dead lock in a warp or wave front
- T return_val;
- int done = 0;
-#ifdef __HIPCC__
- unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
- unsigned long long int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_hip((void*)dest, scope)) {
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = op.apply(*dest, val);
- *dest = return_val;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address_hip((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(done);
- }
- return return_val;
- // FIXME_SYCL not implemented
-#elif defined(__SYCL_DEVICE_ONLY__)
- (void)op;
- (void)dest;
- (void)scope;
- (void)done;
-
- assert(false);
- return val;
-#else
- unsigned int mask = DESUL_IMPL_ACTIVEMASK;
- unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
- unsigned int done_active = 0;
- while (active != done_active) {
- if (!done) {
- if (Impl::lock_address_cuda((void*)dest, scope)) {
- atomic_thread_fence(MemoryOrderAcquire(), scope);
- return_val = op.apply(*dest, val);
- *dest = return_val;
- atomic_thread_fence(MemoryOrderRelease(), scope);
- Impl::unlock_address_cuda((void*)dest, scope);
- done = 1;
- }
- }
- done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
- }
- return return_val;
-#endif
-#else
- static_assert(false, "Unimplemented lock based atomic\n");
- return val;
-#endif
-}
-
-template <class Oper, typename T, class MemoryOrder>
-DESUL_INLINE_FUNCTION T atomic_fetch_oper(const Oper& op,
- T* const dest,
- dont_deduce_this_parameter_t<const T> val,
- MemoryOrder /*order*/,
- MemoryScopeCaller /*scope*/) {
- T oldval = *dest;
- *dest = op.apply(oldval, val);
- return oldval;
-}
-
-template <class Oper, typename T, class MemoryOrder>
-DESUL_INLINE_FUNCTION T atomic_oper_fetch(const Oper& op,
- T* const dest,
- dont_deduce_this_parameter_t<const T> val,
- MemoryOrder /*order*/,
- MemoryScopeCaller /*scope*/) {
- T oldval = *dest;
- T newval = op.apply(oldval, val);
- *dest = newval;
- return newval;
-}
-
-} // namespace Impl
-} // namespace desul
-
-namespace desul {
-
-// Fetch_Oper atomics: return value before operation
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_add(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::AddOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_sub(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::SubOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_max(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::MaxOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_min(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::MinOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_mul(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::MulOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_div(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::DivOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_mod(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::ModOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_and(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::AndOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_or(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::OrOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_xor(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::XorOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_nand(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_fetch_oper(Impl::NandOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_fetch_lshift(T* const dest,
- const unsigned int val,
- MemoryOrder order,
- MemoryScope scope) {
- return Impl::atomic_fetch_oper(
- Impl::LShiftOper<T, const unsigned int>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_fetch_rshift(T* const dest,
- const unsigned int val,
- MemoryOrder order,
- MemoryScope scope) {
- return Impl::atomic_fetch_oper(
- Impl::RShiftOper<T, const unsigned int>(), dest, val, order, scope);
-}
-
-// Oper Fetch atomics: return value after operation
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_add_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::AddOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_sub_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::SubOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_max_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::MaxOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_min_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::MinOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_mul_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::MulOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_div_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::DivOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_mod_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::ModOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_and_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::AndOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_or_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::OrOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_xor_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::XorOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_nand_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
- return Impl::atomic_oper_fetch(Impl::NandOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_lshift_fetch(T* const dest,
- const unsigned int val,
- MemoryOrder order,
- MemoryScope scope) {
- return Impl::atomic_oper_fetch(
- Impl::LShiftOper<T, const unsigned int>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_rshift_fetch(T* const dest,
- const unsigned int val,
- MemoryOrder order,
- MemoryScope scope) {
- return Impl::atomic_oper_fetch(
- Impl::RShiftOper<T, const unsigned int>(), dest, val, order, scope);
-}
-
-// Other atomics
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_load(const T* const dest,
- MemoryOrder order,
- MemoryScope scope) {
- return Impl::atomic_fetch_oper(
- Impl::LoadOper<T, const T>(), const_cast<T*>(dest), T(), order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_store(T* const dest,
- const T val,
- MemoryOrder order,
- MemoryScope scope) {
- (void)Impl::atomic_fetch_oper(Impl::StoreOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_add(T* const dest,
- const T val,
- MemoryOrder order,
- MemoryScope scope) {
- (void)atomic_fetch_add(dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_sub(T* const dest,
- const T val,
- MemoryOrder order,
- MemoryScope scope) {
- (void)atomic_fetch_sub(dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_mul(T* const dest,
- const T val,
- MemoryOrder order,
- MemoryScope scope) {
- (void)atomic_fetch_mul(dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_div(T* const dest,
- const T val,
- MemoryOrder order,
- MemoryScope scope) {
- (void)atomic_fetch_div(dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_min(T* const dest,
- const T val,
- MemoryOrder order,
- MemoryScope scope) {
- (void)atomic_fetch_min(dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_max(T* const dest,
- const T val,
- MemoryOrder order,
- MemoryScope scope) {
- (void)atomic_fetch_max(dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_inc_fetch(T* const dest,
- MemoryOrder order,
- MemoryScope scope) {
- return atomic_add_fetch(dest, T(1), order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_dec_fetch(T* const dest,
- MemoryOrder order,
- MemoryScope scope) {
- return atomic_sub_fetch(dest, T(1), order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_fetch_inc(T* const dest,
- MemoryOrder order,
- MemoryScope scope) {
- return atomic_fetch_add(dest, T(1), order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_inc_mod(T* const dest, T val, MemoryOrder order, MemoryScope scope) {
- static_assert(std::is_unsigned<T>::value,
- "Signed types not supported by atomic_fetch_inc_mod.");
- return Impl::atomic_fetch_oper(
- Impl::IncModOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T atomic_fetch_dec(T* const dest,
- MemoryOrder order,
- MemoryScope scope) {
- return atomic_fetch_sub(dest, T(1), order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION T
-atomic_fetch_dec_mod(T* const dest, T val, MemoryOrder order, MemoryScope scope) {
- static_assert(std::is_unsigned<T>::value,
- "Signed types not supported by atomic_fetch_dec_mod.");
- return Impl::atomic_fetch_oper(
- Impl::DecModOper<T, const T>(), dest, val, order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_inc(T* const dest,
- MemoryOrder order,
- MemoryScope scope) {
- return atomic_add(dest, T(1), order, scope);
-}
-
-template <typename T, class MemoryOrder, class MemoryScope>
-DESUL_INLINE_FUNCTION void atomic_dec(T* const dest,
- MemoryOrder order,
- MemoryScope scope) {
- return atomic_sub(dest, T(1), order, scope);
-}
-
-// FIXME
-template <typename T,
- class SuccessMemoryOrder,
- class FailureMemoryOrder,
- class MemoryScope>
-DESUL_INLINE_FUNCTION bool atomic_compare_exchange_strong(
- T* const dest,
- T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder /*failure*/,
- MemoryScope scope) {
- T const old = atomic_compare_exchange(dest, expected, desired, success, scope);
- if (old != expected) {
- expected = old;
- return false;
- } else {
- return true;
- }
-}
-
-template <typename T,
- class SuccessMemoryOrder,
- class FailureMemoryOrder,
- class MemoryScope>
-DESUL_INLINE_FUNCTION bool atomic_compare_exchange_weak(T* const dest,
- T& expected,
- T desired,
- SuccessMemoryOrder success,
- FailureMemoryOrder failure,
- MemoryScope scope) {
- return atomic_compare_exchange_strong(
- dest, expected, desired, success, failure, scope);
-}
-
-} // namespace desul
-
-#include <desul/atomics/CUDA.hpp>
-#include <desul/atomics/GCC.hpp>
-#include <desul/atomics/HIP.hpp>
-#include <desul/atomics/OpenMP.hpp>
-#include <desul/atomics/SYCL.hpp>
-#if defined(__GNUC__) && (!defined(__clang__))
-#pragma GCC diagnostic pop
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-#ifndef DESUL_ATOMICS_HIP_HPP_
-#define DESUL_ATOMICS_HIP_HPP_
-
-#ifdef __HIP_DEVICE_COMPILE__
-namespace desul {
-
-// header file is organized as follows:
-// 1/ device-side overload set from atomic functions provided by HIP
-// 2/ fallback implementation on host-side for atomic functions defined in 1/ that are
-// not included in the GCC overload set
-// 3/ fallback implementation on device-side for atomic functions from the GCC
-// overload set that are not defined in 1/
-
-// clang-format off
-inline __device__ int atomic_fetch_add( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
-inline __device__ unsigned int atomic_fetch_add( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
-inline __device__ unsigned long long atomic_fetch_add(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
-inline __device__ float atomic_fetch_add( float* ptr, float val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
-inline __device__ double atomic_fetch_add( double* ptr, double val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
-
-inline __device__ int atomic_fetch_sub( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, val); }
-inline __device__ unsigned int atomic_fetch_sub( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, val); }
-inline __device__ unsigned long long atomic_fetch_sub(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
-inline __device__ float atomic_fetch_sub( float* ptr, float val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
-inline __device__ double atomic_fetch_sub( double* ptr, double val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
-
-inline __device__ int atomic_fetch_min( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
-inline __device__ unsigned int atomic_fetch_min( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
-inline __device__ unsigned long long atomic_fetch_min(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
-
-inline __device__ int atomic_fetch_max( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
-inline __device__ unsigned int atomic_fetch_max( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
-inline __device__ unsigned long long atomic_fetch_max(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
-
-inline __device__ int atomic_fetch_and( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
-inline __device__ unsigned int atomic_fetch_and( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
-inline __device__ unsigned long long atomic_fetch_and(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
-
-inline __device__ int atomic_fetch_or ( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
-inline __device__ unsigned int atomic_fetch_or ( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
-inline __device__ unsigned long long atomic_fetch_or (unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
-
-inline __device__ int atomic_fetch_xor( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
-inline __device__ unsigned int atomic_fetch_xor( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
-inline __device__ unsigned long long atomic_fetch_xor(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
-
-inline __device__ int atomic_fetch_inc( int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1 ); }
-inline __device__ unsigned int atomic_fetch_inc( unsigned int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1u ); }
-inline __device__ unsigned long long atomic_fetch_inc(unsigned long long* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1ull); }
-
-inline __device__ int atomic_fetch_dec( int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, 1 ); }
-inline __device__ unsigned int atomic_fetch_dec( unsigned int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, 1u ); }
-inline __device__ unsigned long long atomic_fetch_dec(unsigned long long* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -1 ); }
-
-inline __device__ unsigned int atomic_fetch_inc_mod( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicInc(ptr, val); }
-inline __device__ unsigned int atomic_fetch_dec_mod( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicDec(ptr, val); }
-// clang-format on
-
-#define DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, TYPE) \
- template <class MemoryOrder> \
- inline __device__ TYPE atomic_fetch_##OP( \
- TYPE* ptr, TYPE val, MemoryOrder, MemoryScopeDevice) { \
- __threadfence(); \
- TYPE return_val = \
- atomic_fetch_##OP(ptr, val, MemoryOrderRelaxed(), MemoryScopeDevice()); \
- __threadfence(); \
- return return_val; \
- } \
- template <class MemoryOrder> \
- inline __device__ TYPE atomic_fetch_##OP( \
- TYPE* ptr, TYPE val, MemoryOrder, MemoryScopeCore) { \
- return atomic_fetch_##OP(ptr, val, MemoryOrder(), MemoryScopeDevice()); \
- }
-
-#define DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(OP) \
- DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, int) \
- DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, unsigned int) \
- DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, unsigned long long)
-
-#define DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(OP) \
- DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, float) \
- DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, double)
-
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(min)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(max)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(and)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(or)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(xor)
-
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(add)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(add)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(sub)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(sub)
-
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(inc)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(dec)
-
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(inc_mod, unsigned int)
-DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(dec_mod, unsigned int)
-
-#undef DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT
-#undef DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL
-#undef DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP
-
-// 2/ host-side fallback implementation for atomic functions not provided by GCC
-
-#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, TYPE) \
- template <class MemoryOrder> \
- inline __host__ TYPE atomic_fetch_##OP_LOWERCASE( \
- TYPE* ptr, TYPE val, MemoryOrder order, MemoryScopeDevice scope) { \
- return Impl::atomic_fetch_oper( \
- Impl::OP_PASCAL_CASE##Oper<TYPE, const TYPE>(), ptr, val, order, scope); \
- } \
- template <class MemoryOrder> \
- inline __host__ TYPE atomic_fetch_##OP_LOWERCASE( \
- TYPE* ptr, TYPE val, MemoryOrder order, MemoryScopeCore scope) { \
- return Impl::atomic_fetch_oper( \
- Impl::OP_PASCAL_CASE##Oper<TYPE, const TYPE>(), ptr, val, order, scope); \
- }
-
-#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL(OP_LOWERCASE, OP_PASCAL_CASE) \
- DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, int) \
- DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, unsigned int) \
- DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN( \
- OP_LOWERCASE, OP_PASCAL_CASE, unsigned long long)
-
-#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT(OP_LOWERCASE, \
- OP_PASCAL_CASE) \
- DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, float) \
- DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, double)
-
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL(min, Min)
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL(max, Max)
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT(add, Add)
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT(sub, Sub)
-
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(inc_mod, IncMod, unsigned int)
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(dec_mod, DecMod, unsigned int)
-
-#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT
-#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL
-#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN
-
-#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(TYPE) \
- template <class MemoryOrder> \
- inline __host__ TYPE atomic_fetch_inc( \
- TYPE* ptr, MemoryOrder order, MemoryScopeDevice scope) { \
- return atomic_fetch_add(ptr, static_cast<TYPE>(1), order, scope); \
- } \
- template <class MemoryOrder> \
- inline __host__ TYPE atomic_fetch_inc( \
- TYPE* ptr, MemoryOrder order, MemoryScopeCore scope) { \
- return atomic_fetch_add(ptr, static_cast<TYPE>(1), order, scope); \
- } \
- template <class MemoryOrder> \
- inline __host__ TYPE atomic_fetch_dec( \
- TYPE* ptr, MemoryOrder order, MemoryScopeDevice scope) { \
- return atomic_fetch_sub(ptr, static_cast<TYPE>(1), order, scope); \
- } \
- template <class MemoryOrder> \
- inline __host__ TYPE atomic_fetch_dec( \
- TYPE* ptr, MemoryOrder order, MemoryScopeCore scope) { \
- return atomic_fetch_sub(ptr, static_cast<TYPE>(1), order, scope); \
- }
-
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(int)
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(unsigned int)
-DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(unsigned long long)
-
-#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT
-
-// 3/ device-side fallback implementation for atomic functions defined in GCC overload
-// set
-
-#define DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE( \
- OP_LOWERCASE, OP_PASCAL_CASE, MEMORY_ORDER, MEMORY_SCOPE) \
- template <class T> \
- inline __device__ std::enable_if_t<std::is_integral<T>::value, T> \
- atomic_##OP_LOWERCASE##_fetch( \
- T* ptr, T val, MEMORY_ORDER order, MEMORY_SCOPE scope) { \
- return Impl::atomic_oper_fetch( \
- Impl::OP_PASCAL_CASE##Oper<T, const T>(), ptr, val, order, scope); \
- } \
- template <class T> \
- inline __device__ std::enable_if_t<std::is_integral<T>::value, T> \
- atomic_fetch_##OP_LOWERCASE( \
- T* ptr, T val, MEMORY_ORDER order, MEMORY_SCOPE scope) { \
- return Impl::atomic_fetch_oper( \
- Impl::OP_PASCAL_CASE##Oper<T, const T>(), ptr, val, order, scope); \
- }
-
-// clang-format off
-#define DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE) \
- DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderRelaxed, MemoryScopeNode) \
- DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderRelaxed, MemoryScopeDevice) \
- DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderRelaxed, MemoryScopeCore) \
- DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderSeqCst, MemoryScopeNode) \
- DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderSeqCst, MemoryScopeDevice) \
- DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderSeqCst, MemoryScopeCore)
-// clang-format on
-
-DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(add, Add)
-DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(sub, Sub)
-DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(and, And)
-DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(or, Or)
-DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(xor, Xor)
-DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(nand, Nand)
-
-#undef DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN
-#undef DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE
-
-} // namespace desul
-
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_MACROS_HPP_
-#define DESUL_ATOMICS_MACROS_HPP_
-
-// Macros
-
-#if (!defined(__CUDA_ARCH__) || !defined(__NVCC__)) && \
- (!defined(__HIP_DEVICE_COMPILE) || !defined(__HIP_PLATFORM_HCC__)) && \
- !defined(__SYCL_DEVICE_ONLY__) && !defined(DESUL_HAVE_OPENMP_ATOMICS) && \
- !defined(DESUL_HAVE_SERIAL_ATOMICS)
-#define DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS
-#endif
-
-// ONLY use GNUC atomics if not compiling for the device
-// and we didn't explicitly say to use OPENMP or SERIAL atomics
-#if defined(__GNUC__) && defined(DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS)
-#define DESUL_HAVE_GCC_ATOMICS
-#endif
-
-// Equivalent to above: if we are compiling for the device we
-// need to use CUDA/HIP/SYCL atomics instead of MSVC atomics
-#if defined(_MSC_VER) && defined(DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS)
-#define DESUL_HAVE_MSVC_ATOMICS
-#endif
-
-#undef DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS
-
-#ifdef __CUDACC__
-#define DESUL_HAVE_CUDA_ATOMICS
-#endif
-
-#ifdef __HIPCC__
-#define DESUL_HAVE_HIP_ATOMICS
-#endif
-
-#ifdef __SYCL_DEVICE_ONLY__
-#define DESUL_HAVE_SYCL_ATOMICS
-#endif
-
-#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) || \
- defined(__SYCL_DEVICE_ONLY__)
-#define DESUL_HAVE_GPU_LIKE_PROGRESS
-#endif
-
-#if defined(DESUL_HAVE_CUDA_ATOMICS) || defined(DESUL_HAVE_HIP_ATOMICS)
-#define DESUL_FORCEINLINE_FUNCTION inline __host__ __device__
-#define DESUL_INLINE_FUNCTION inline __host__ __device__
-#define DESUL_FUNCTION __host__ __device__
-#else
-#define DESUL_FORCEINLINE_FUNCTION inline
-#define DESUL_INLINE_FUNCTION inline
-#define DESUL_FUNCTION
-#endif
-
-#if !defined(DESUL_HAVE_GPU_LIKE_PROGRESS)
-#define DESUL_HAVE_FORWARD_PROGRESS
-#endif
-
-#endif // DESUL_ATOMICS_MACROS_HPP_
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-#ifndef DESUL_ATOMICS_OPENMP_HPP_
-#define DESUL_ATOMICS_OPENMP_HPP_
-
-#ifdef DESUL_HAVE_OPENMP_ATOMICS
-
-#include <desul/atomics/openmp/OpenMP_40.hpp>
-#endif
-#endif
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_SYCL_CONVERSIONS_HPP_
-#define DESUL_ATOMICS_SYCL_CONVERSIONS_HPP_
-#ifdef DESUL_HAVE_SYCL_ATOMICS
-
-// clang-format off
-#include "desul/atomics/Common.hpp"
-
-#include <CL/sycl.hpp>
-// clang-format on
-
-namespace desul {
-namespace Impl {
-
-#ifdef __clang__
-namespace sycl_sync_and_atomics = ::sycl::ext::oneapi;
-#else
-namespace sycl_sync_and_atomics = ::sycl;
-#endif
-
-template <bool extended_namespace>
-using sycl_memory_order = std::conditional_t<extended_namespace,
- sycl_sync_and_atomics::memory_order,
- sycl::memory_order>;
-template <bool extended_namespace>
-using sycl_memory_scope = std::conditional_t<extended_namespace,
- sycl_sync_and_atomics::memory_scope,
- sycl::memory_scope>;
-
-template <class MemoryOrder, bool extended_namespace = true>
-struct DesulToSYCLMemoryOrder;
-template <bool extended_namespace>
-struct DesulToSYCLMemoryOrder<MemoryOrderSeqCst, extended_namespace> {
- static constexpr sycl_memory_order<extended_namespace> value =
- sycl_memory_order<extended_namespace>::seq_cst;
-};
-template <bool extended_namespace>
-struct DesulToSYCLMemoryOrder<MemoryOrderAcquire, extended_namespace> {
- static constexpr sycl_memory_order<extended_namespace> value =
- sycl_memory_order<extended_namespace>::acquire;
-};
-template <bool extended_namespace>
-struct DesulToSYCLMemoryOrder<MemoryOrderRelease, extended_namespace> {
- static constexpr sycl_memory_order<extended_namespace> value =
- sycl_memory_order<extended_namespace>::release;
-};
-template <bool extended_namespace>
-struct DesulToSYCLMemoryOrder<MemoryOrderAcqRel, extended_namespace> {
- static constexpr sycl_memory_order<extended_namespace> value =
- sycl_memory_order<extended_namespace>::acq_rel;
-};
-template <bool extended_namespace>
-struct DesulToSYCLMemoryOrder<MemoryOrderRelaxed, extended_namespace> {
- static constexpr sycl_memory_order<extended_namespace> value =
- sycl_memory_order<extended_namespace>::relaxed;
-};
-
-template <class MemoryScope, bool extended_namespace = true>
-struct DesulToSYCLMemoryScope;
-template <bool extended_namespace>
-struct DesulToSYCLMemoryScope<MemoryScopeCore, extended_namespace> {
- static constexpr sycl_memory_scope<extended_namespace> value =
- sycl_memory_scope<extended_namespace>::work_group;
-};
-template <bool extended_namespace>
-struct DesulToSYCLMemoryScope<MemoryScopeDevice, extended_namespace> {
- static constexpr sycl_memory_scope<extended_namespace> value =
- sycl_memory_scope<extended_namespace>::device;
-};
-template <bool extended_namespace>
-struct DesulToSYCLMemoryScope<MemoryScopeSystem, extended_namespace> {
- static constexpr sycl_memory_scope<extended_namespace> value =
- sycl_memory_scope<extended_namespace>::system;
-};
-
-// FIXME_SYCL generic_space isn't available yet for CUDA.
-#ifdef __NVPTX__
-template <class T, class MemoryOrder, class MemoryScope>
-using sycl_atomic_ref = sycl::atomic_ref<T,
- DesulToSYCLMemoryOrder<MemoryOrder>::value,
- DesulToSYCLMemoryScope<MemoryScope>::value,
- sycl::access::address_space::global_space>;
-#else
-template <class T, class MemoryOrder, class MemoryScope>
-using sycl_atomic_ref = sycl::atomic_ref<T,
- DesulToSYCLMemoryOrder<MemoryOrder>::value,
- DesulToSYCLMemoryScope<MemoryScope>::value,
- sycl::access::address_space::generic_space>;
-#endif
-} // namespace Impl
-} // namespace desul
-
-#endif
-#endif
+++ /dev/null
-#include<limits>
-namespace desul {
-#if defined(__CUDA_ARCH__) || (defined(__clang__) && !defined(__NVCC__))
-// Choose the variant of atomics we are using later
-#if !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_GENERIC) && \
- !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE) && \
- !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL) && \
- !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_FORCEGLOBAL)
-#if (__CUDACC_VER_MAJOR__ > 11) || ((__CUDACC_VER_MAJOR__==11) && (__CUDACC_VER_MINOR__>1))
-#define DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
-#else
-#define DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
-#endif
-#endif
-#include<desul/atomics/cuda/cuda_cc7_asm.inc>
-
-#endif
-}
+++ /dev/null
-#include<limits>
-namespace desul {
-#if defined(__CUDA_ARCH__) || (defined(__clang__) && !defined(__NVCC__))
-
-#include<desul/atomics/cuda/cuda_cc7_asm_exchange.inc>
-
-#endif
-}
+++ /dev/null
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_GENERIC
-#include "cuda_cc7_asm_atomic_fetch_op.inc_generic"
-#endif
-
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
-#include "cuda_cc7_asm_atomic_fetch_op.inc_isglobal"
-#endif
-
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
-#include "cuda_cc7_asm_atomic_fetch_op.inc_predicate"
-#endif
-
-// This version is not generally safe
-// Only here for performance comparison purposes
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_FORCEGLOBAL
-#include "cuda_cc7_asm_atomic_fetch_op.inc_forceglobal"
-#endif
-
+++ /dev/null
-
-// Inline PTX: h u16 , r u32, l u64, f f32, d f64
-// Ops:
-
-// binary operations
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
- uint32_t asm_result = 0u; \
- asm volatile("atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-} \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
- uint64_t asm_result = 0u; \
- asm volatile("atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
- uint32_t asm_result = 0u; \
- asm volatile("atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-} \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
- uint64_t asm_result = 0u; \
- asm volatile("atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
- uint32_t asm_result = 0u; \
- asm volatile("atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-} \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
- uint64_t asm_result = 0u; \
- asm volatile("atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-}
-
-// Fetch atomics
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- asm volatile("atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- ctype neg_value = -value; \
- asm volatile("atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(neg_value) : "memory"); \
- return result; \
-}
-
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- asm volatile("atom.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- asm volatile("atom.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
- asm volatile("atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-} \
-inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- asm volatile("atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
- asm volatile("atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-} \
-inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- asm volatile("atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-}
-
-// Group ops for integer ctypes
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR()
-
-
-// Instantiate Functions
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
-
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND
-
+++ /dev/null
-
-// Inline PTX: h u16 , r u32, l u64, f f32, d f64
-// Ops:
-
-// binary operations
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
- uint32_t asm_result = 0u; \
- asm volatile("atom.and" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-} \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
- uint64_t asm_result = 0u; \
- asm volatile("atom.and" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
- uint32_t asm_result = 0u; \
- asm volatile("atom.or" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-} \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
- uint64_t asm_result = 0u; \
- asm volatile("atom.or" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
- uint32_t asm_result = 0u; \
- asm volatile("atom.xor" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-} \
-template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
- uint64_t asm_result = 0u; \
- asm volatile("atom.xor" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
- return reinterpret_cast<ctype&>(asm_result); \
-}
-
-// Fetch atomics
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- asm volatile("atom.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- ctype neg_value = -value; \
- asm volatile("atom.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(neg_value) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- asm volatile("atom.min" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result=0; \
- asm volatile("atom.max" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
- asm volatile("atom.inc" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-} \
-inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- asm volatile("atom.inc" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
- asm volatile("atom.dec" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-} \
-inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- ctype result = 0; \
- asm volatile("atom.dec" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
- return result; \
-}
-// Group ops for integer ctypes
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR()
-
-
-// Instantiate Functions
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
-
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC
-#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND
-
+++ /dev/null
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_GENERIC
-#include "cuda_cc7_asm_atomic_op.inc_generic"
-#endif
-
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
-#include "cuda_cc7_asm_atomic_op.inc_isglobal"
-#endif
-
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
-#include "cuda_cc7_asm_atomic_op.inc_predicate"
-#endif
-
-// This version is not generally safe
-// Only here for performance comparison purposes
-#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_FORCEGLOBAL
-#include "cuda_cc7_asm_atomic_op.inc_forceglobal"
-#endif
-
+++ /dev/null
-
-// Inline PTX: h u16 , r u32, l u64, f f32, d f64
-// Ops:
-
-// Non Returning Atomic Operations
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- type neg_value = -value; \
- asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(neg_value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- asm volatile("red.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- asm volatile("red.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- type limit = desul::Impl::numeric_limits_max<type>::value; \
- asm volatile("red.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
-inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- type limit = desul::Impl::numeric_limits_max<type>::value; \
- asm volatile("red.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
-}
-
-// Group ops for integer types
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
-
-// Instantiate Functions
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
+++ /dev/null
-
-// Inline PTX: h u16 , r u32, l u64, f f32, d f64
-// Ops:
-
-// Non Returning Atomic Operations
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- asm volatile("red.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- type neg_value = -value; \
- asm volatile("red.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(neg_value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- asm volatile("red.min" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- asm volatile("red.max" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- type limit = desul::Impl::numeric_limits_max<type>::value; \
- asm volatile("red.inc" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
-}
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
-inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
- type limit = desul::Impl::numeric_limits_max<type>::value; \
- asm volatile("red.dec" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
-}
-
-// Group ops for integer types
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
-
-// Instantiate Functions
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
-
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
+++ /dev/null
-/*
-Copyright (c) 2019, Lawrence Livermore National Security, LLC
-and DESUL project contributors. See the COPYRIGHT file for details.
-Source: https://github.com/desul/desul
-
-SPDX-License-Identifier: (BSD-3-Clause)
-*/
-
-#ifndef DESUL_ATOMICS_OPENMP40_HPP_
-#define DESUL_ATOMICS_OPENMP40_HPP_
-#include<type_traits>
-
-namespace desul {
-namespace Impl {
- template<class MEMORY_ORDER_TMP, class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_pre_capture_flush(MEMORY_ORDER_TMP, MEMORY_SCOPE_TMP) {}
- template<class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_pre_capture_flush(MemoryOrderAcquire, MEMORY_SCOPE_TMP) {
- atomic_thread_fence(MemoryOrderAcquire(), MEMORY_SCOPE_TMP());
- }
- template<class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_pre_capture_flush(MemoryOrderAcqRel, MEMORY_SCOPE_TMP) {
- atomic_thread_fence(MemoryOrderAcqRel(), MEMORY_SCOPE_TMP());
- }
- template<class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_pre_capture_flush(MemoryOrderSeqCst, MEMORY_SCOPE_TMP) {
- atomic_thread_fence(MemoryOrderSeqCst(), MEMORY_SCOPE_TMP());
- }
-
- template<class MEMORY_ORDER_TMP, class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_post_capture_flush(MEMORY_ORDER_TMP, MEMORY_SCOPE_TMP) {}
- template<class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_post_capture_flush(MemoryOrderRelease, MEMORY_SCOPE_TMP) {
- atomic_thread_fence(MemoryOrderRelease(), MEMORY_SCOPE_TMP());
- }
- template<class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_post_capture_flush(MemoryOrderAcqRel, MEMORY_SCOPE_TMP) {
- atomic_thread_fence(MemoryOrderAcqRel(), MEMORY_SCOPE_TMP());
- }
- template<class MEMORY_SCOPE_TMP>
- void openmp_maybe_call_post_capture_flush(MemoryOrderSeqCst, MEMORY_SCOPE_TMP) {
- atomic_thread_fence(MemoryOrderSeqCst(), MEMORY_SCOPE_TMP());
- }
-
- template<class T>
- struct is_openmp_atomic_type_t {
- static constexpr bool value = std::is_arithmetic<T>::value;
- };
- template<class T>
- constexpr bool is_openmp_atomic_type_v = is_openmp_atomic_type_t<T>::value;
-}
-}
-
-namespace desul {
-// Can't use a macro approach to get all definitions since the ops include #pragma omp
-// So gonna use multiple inclusion of the same code snippet here.
-
-// Can't do Node level atomics this way with OpenMP Target, but we could
-// have a define which says whether or not Device level IS node level (e.g. for pure CPU node)
-
-#define MEMORY_ORDER MemoryOrderRelaxed
-// #define MEMORY_SCOPE MemoryScopeNode
-// #include<desul/atomics/openmp/OpenMP_40_op.inc>
-// #undef MEMORY_SCOPE
-#define MEMORY_SCOPE MemoryScopeDevice
-#include<desul/atomics/openmp/OpenMP_40_op.inc>
-#undef MEMORY_SCOPE
-#define MEMORY_SCOPE MemoryScopeCore
-#include<desul/atomics/openmp/OpenMP_40_op.inc>
-#undef MEMORY_SCOPE
-#undef MEMORY_ORDER
-
-#define MEMORY_ORDER MemoryOrderAcqRel
-// #define MEMORY_SCOPE MemoryScopeNode
-// #include<desul/atomics/openmp/OpenMP_40_op.inc>
-// #undef MEMORY_SCOPE
-#define MEMORY_SCOPE MemoryScopeDevice
-#include<desul/atomics/openmp/OpenMP_40_op.inc>
-#undef MEMORY_SCOPE
-#define MEMORY_SCOPE MemoryScopeCore
-#include<desul/atomics/openmp/OpenMP_40_op.inc>
-#undef MEMORY_SCOPE
-#undef MEMORY_ORDER
-
-#define MEMORY_ORDER MemoryOrderSeqCst
-// #define MEMORY_SCOPE MemoryScopeNode
-// #include<desul/atomics/openmp/OpenMP_40_op.inc>
-// #undef MEMORY_SCOPE
-#define MEMORY_SCOPE MemoryScopeDevice
-#include<desul/atomics/openmp/OpenMP_40_op.inc>
-#undef MEMORY_SCOPE
-#define MEMORY_SCOPE MemoryScopeCore
-#include<desul/atomics/openmp/OpenMP_40_op.inc>
-#undef MEMORY_SCOPE
-#undef MEMORY_ORDER
-} // namespace desul
-#endif
--- /dev/null
+# CHANGELOG
+
+## 4.5.01
+
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.5.00...4.5.01)
+
+### Bug Fixes
+
+* Fix re-builds after cleaning the binary tree when doing `add_subdirectory` on the Kokkos source [\#7557](https://github.com/kokkos/kokkos/pull/7557)
+* Update mdspan to include fix for submdspan and bracket operator with clang 15&16 [\#7559](https://github.com/kokkos/kokkos/pull/7559)
+* Fix DynRankView performance regression by re-introducing shortcut operator() impls [\#7606](https://github.com/kokkos/kokkos/pull/7606)
+* Add missing MI300A (`GFX942_APU`) option to Makefile build-system
+
+## 4.5.00
+
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.4.01...4.5.00)
+
+### Features
+
+* SYCL backend graduated to production ready
+* Introduce new `SequentialHostInit` view allocation property [\#7229](https://github.com/kokkos/kokkos/pull/7229) (backported in 4.4.01)
+* Support building with Run-Time Type Information (RTTI) disabled
+* Add new `KOKKOS_RELOCATABLE_FUNCTION` function annotation macro [\#5993](https://github.com/kokkos/kokkos/pull/5993)
+
+### Backend and Architecture Enhancements
+
+#### CUDA
+
+* Adding occupancy tuning for CUDA architectures [\#6788](https://github.com/kokkos/kokkos/pull/6788)
+* By default disable `cudaMallocAsync` (i.e., revert the change made in version 4.2) [\#7353](https://github.com/kokkos/kokkos/pull/7353)
+
+#### HIP
+
+* Add support for AMD Phoenix APUs with Radeon 740M/760M/780M/880M/890M [\#7162](https://github.com/kokkos/kokkos/pull/7162)
+* Update maximum waves per CU values for consumer card [\#7347](https://github.com/kokkos/kokkos/pull/7347)
+* Check that Kokkos is running on the architecture it was compiled for [\#7379](https://github.com/kokkos/kokkos/pull/7379)
+* Add opt-in option to use `hipMallocAsync` instead of `hipMalloc` [\#7324](https://github.com/kokkos/kokkos/pull/7324)
+* Introduce new architecture option `AMD_GFX942_APU` for MI300A [\#7462](https://github.com/kokkos/kokkos/pull/7462)
+
+#### SYCL
+
+* Move the `SYCL` backend out of the `Experimental` namespace [\#7171](https://github.com/kokkos/kokkos/pull/7171)
+* Introduce `KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE` as CMake option [\#5993](https://github.com/kokkos/kokkos/pull/5993)
+
+#### OpenACC
+
+* Add support for building with the Clacc compiler [\#7198](https://github.com/kokkos/kokkos/pull/7198)
+* Workaround NVHPC collapse clause bug for `MDRangePolicy` [\#7425](https://github.com/kokkos/kokkos/pull/7425)
+
+#### HPX
+
+* Implement `Experimental::partition_space` to produce truly independent execution spaces [\#7287](https://github.com/kokkos/kokkos/pull/7287)
+
+#### Threads
+
+* Fix compilation for `parallel_reduce` `MDRange` with `Dynamic` scheduling [\#7478](https://github.com/kokkos/kokkos/pull/7478)
+* Fix race conditions on ARM architectures [\#7498](https://github.com/kokkos/kokkos/pull/7498)
+
+#### OpenMP
+
+* Fix run time behavior when compiling with `-fvisibility-hidden` [\#7284](https://github.com/kokkos/kokkos/pull/7284) (backported in 4.4.01)
+* Fix linking with Cray Clang compiler [\#7341](https://github.com/kokkos/kokkos/pull/7341)
+
+#### Serial
+
+* Allow `Kokkos_ENABLE_ATOMICS_BYPASS` to skip mutexes to remediate performance regression in 4.4 [\#7369](https://github.com/kokkos/kokkos/pull/7369)
+
+### General Enhancements
+
+* Improve `View` initialization/destruction for non-scalar trivial and trivially-destructible types [\#7219](https://github.com/kokkos/kokkos/pull/7219) [\#7225](https://github.com/kokkos/kokkos/pull/7225)
+* Add getters for default tile sizes used in `MDRangePolicy` [\#6839](https://github.com/kokkos/kokkos/pull/6839)
+* Improve performance of `Kokkos::sort` when `std::sort` is used [\#7264](https://github.com/kokkos/kokkos/pull/7264)
+* Add range-based for loop support for `Array<T, N>` [\#7293](https://github.com/kokkos/kokkos/pull/7293)
+* Allow functors as reducers for nested team parallel reduce [\#6921](https://github.com/kokkos/kokkos/pull/6921)
+* Avoid making copies of string rvalue reference arguments to `view_alloc()` [\#7364](https://github.com/kokkos/kokkos/pull/7364)
+* Add `atomic_{mod,xor,nand,lshift,rshift}` [\#7458](https://github.com/kokkos/kokkos/pull/7458)
+* Allow using `SequentialHostInit` with `Kokkos::DualView` [\#7456](https://github.com/kokkos/kokkos/pull/7456)
+* Add `Graph::instantiate()` [\#7240](https://github.com/kokkos/kokkos/pull/7240)
+* Allow an arbitrary execution space instance to be used in `Kokkos::Graph::submit()` [\#7249](https://github.com/kokkos/kokkos/pull/7249)
+* Enable compile-time diagnostic of illegal reduction target for graphs [\#7460](https://github.com/kokkos/kokkos/pull/7460)
+
+### Build System Changes
+
+* Make sure backend-specific options such as `IMPL_CUDA_MALLOC_ASYNC` only show when that backend is actually enabled [\#7228](https://github.com/kokkos/kokkos/pull/7228)
+* Major refactoring removing `TriBITS` paths [\#6164](https://github.com/kokkos/kokkos/pull/6164)
+* Add support for SpacemiT K60 (RISC-V) [\#7160](https://github.com/kokkos/kokkos/pull/7160)
+
+### Deprecations
+
+* Deprecate Tasking interface [\#7393](https://github.com/kokkos/kokkos/pull/7393)
+* Deprecate `atomic_query_version`, `atomic_assign`, `atomic_compare_exchange_strong`, `atomic_{inc, dec}rement` [\#7458](https://github.com/kokkos/kokkos/pull/7458)
+* Deprecate `{OpenMP,HPX}::is_asynchronous()` [\#7322](https://github.com/kokkos/kokkos/pull/7322)
+
+### Bug Fixes
+
+* Fix undefined behavior in `BinSort` when sorting within bins on host [\#7223](https://github.com/kokkos/kokkos/pull/7223)
+* Using CUDA limits to set extents for blocks, grids [\#7235](https://github.com/kokkos/kokkos/pull/7235)
+* Fix `deep_copy (serial_exec, dst, src)` with multiple host backends [\#7245](https://github.com/kokkos/kokkos/pull/7245)
+* Skip `RangePolicy` bounds conversion checks if roundtrip convertibility is not provided [\#7172](https://github.com/kokkos/kokkos/pull/7172)
+* Allow extracting host and device views from `DualView` with `const` value type [\#7242](https://github.com/kokkos/kokkos/pull/7242)
+* Fix `TeamPolicy` array reduction for CUDA and HIP [\#6296](https://github.com/kokkos/kokkos/pull/6296)
+* Fix implicit copy assignment operators in few AVX2 masks being deleted [\#7296](https://github.com/kokkos/kokkos/pull/7296)
+* Fix configuring without architecture flags for SYCL [\#7303](https://github.com/kokkos/kokkos/pull/7303)
+* Set an initial value index during join of `MinLoc`, `MaxLoc` or `MinMaxLoc` [\#7330](https://github.com/kokkos/kokkos/pull/7330)
+* Fix storage lifetime of driver for global launch of graph nodes for CUDA and HIP [\#7365](https://github.com/kokkos/kokkos/pull/7365)
+* Make `value_type` for `RandomAccessIterator` non-`const` [\#7485](https://github.com/kokkos/kokkos/pull/7485)
+
+## [4.4.01](https://github.com/kokkos/kokkos/tree/4.4.01)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.4.00...4.4.01)
+
+### Features:
+* Introduce new SequentialHostInit view allocation property [\#7229](https://github.com/kokkos/kokkos/pull/7229)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+* Experimental support for unified memory mode (intended for Grace-Hopper etc.) [\#6823](https://github.com/kokkos/kokkos/pull/6823)
+
+### Bug Fixes
+* OpenMP: Fix issue related to the visibility of an internal symbol with shared libraries that affected `ScatterView` in particular [\#7284](https://github.com/kokkos/kokkos/pull/7284)
+* Fix implicit copy assignment operators in few AVX2 masks being deleted [\#7296](https://github.com/kokkos/kokkos/pull/7296)
+
+## [4.4.00](https://github.com/kokkos/kokkos/tree/4.4.00)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.3.01...4.4.00)
+
+### Features:
+* Add `Kokkos::View` conversions from and to [`std::mdspan`](https://en.cppreference.com/w/cpp/container/mdspan) [\#6830](https://github.com/kokkos/kokkos/pull/6830) [\#7069](https://github.com/kokkos/kokkos/pull/7069)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+* `nvcc_wrapper`: Adding ability to process `--disable-warnings` flag [\#6936](https://github.com/kokkos/kokkos/issues/6936)
+* Use recommended/max team size functions in Cuda ParallelFor and Reduce constructors [\#6891](https://github.com/kokkos/kokkos/issues/6891)
+* Improve compile-times when building with `Kokkos_ENABLE_DEBUG_BOUNDS_CHECK` in Cuda [\#7013](https://github.com/kokkos/kokkos/pull/7013)
+
+#### HIP:
+* Use HIP builtin atomics [\#6882](https://github.com/kokkos/kokkos/pull/6882) [\#7000](https://github.com/kokkos/kokkos/pull/7000)
+* Enable user-specified compiler and linker flags for AMD GPUs [\#7127](https://github.com/kokkos/kokkos/pull/7127)
+
+#### SYCL:
+* Add support for Graphs [\#6912](https://github.com/kokkos/kokkos/pull/6912)
+* Fix multi-GPU support [\#6887](https://github.com/kokkos/kokkos/pull/6887)
+* Improve performance of reduction and scan operations [\#6562](https://github.com/kokkos/kokkos/pull/6562), [\#6750](https://github.com/kokkos/kokkos/pull/6750)
+* Fix lock for guarding scratch space in `TeamPolicy` `parallel_reduce` [\#6988](https://github.com/kokkos/kokkos/pull/6988)
+* Include submission command queue property information into `SYCL::print_configuration()` [\#7004](https://github.com/kokkos/kokkos/pull/7004)
+
+#### OpenACC:
+* Make `TeamPolicy` `parallel_for` execute on the correct async queue [\#7012](https://github.com/kokkos/kokkos/pull/7012)
+
+#### OpenMPTarget:
+* Honor user requested loop ordering in `MDRange` policy [\#6925](https://github.com/kokkos/kokkos/pull/6925)
+* Prevent data races by guarding the scratch space used in `parallel_scan` [\#6998](https://github.com/kokkos/kokkos/pull/6998)
+
+#### HPX:
+* Workaround issue with template argument deduction to support compilation with NVCC [\#7015](https://github.com/kokkos/kokkos/pull/7015)
+
+### General Enhancements
+* Improve performance of view copies in host parallel regions [\#6730](https://github.com/kokkos/kokkos/pull/6730)
+* Harmonize convertibility rules of `Kokkos::RandomAccessIterator` with `View`s [\#6929](https://github.com/kokkos/kokkos/pull/6929)
+* Add a check precondition non-overlapping ranges for the `adjacent_difference` algorithm in debug mode [\#6922](https://github.com/kokkos/kokkos/pull/6922)
+* Add deduction guides for `TeamPolicy` [\#7030](https://github.com/kokkos/kokkos/pull/7030)
+* SIMD: Allow flexible vector width for 32 bit types [\#6802](https://github.com/kokkos/kokkos/pull/6802)
+* Updates for `Kokkos::Array`: add `kokkos_swap(Array<T, N>)` specialization [\#6943](https://github.com/kokkos/kokkos/pull/6943), add `Kokkos::to_array` [\#6375](https://github.com/kokkos/kokkos/pull/6375), make `Kokkos::Array` equality-comparable [\#7148](https://github.com/kokkos/kokkos/pull/7148)
+* Structured binding support for `Kokkos::complex` [\#7040](https://github.com/kokkos/kokkos/pull/7040)
+* Introduce `KOKKOS_DEDUCTION_GUIDE` macro to allow for portable user-defined deduction guides [\#6954](https://github.com/kokkos/kokkos/pull/6954)
+
+### Build System Changes
+* Do not require OpenMP support for languages other than CXX [\#6965](https://github.com/kokkos/kokkos/pull/6965)
+* Update Intel GPU architectures in Makefile [\#6895](https://github.com/kokkos/kokkos/pull/6895)
+* Fix use of OpenMP with Cuda or HIP as compile language [\#6972](https://github.com/kokkos/kokkos/pull/6972)
+* Define and enforce new minimum compiler versions for C++20 support [\#7128](https://github.com/kokkos/kokkos/pull/7128), [\#7123](https://github.com/kokkos/kokkos/pull/7123)
+* Add nvidia Grace CPU architecture: `Kokkos_ARCH_ARMV9_GRACE` [\#7158](https://github.com/kokkos/kokkos/pull/7158)
+* Fix Makefile.kokkos for Threads [\#6896](https://github.com/kokkos/kokkos/pull/6896)
+* Remove support for NVHPC as CUDA device compiler [\#6987](https://github.com/kokkos/kokkos/pull/6987)
+* Fix using CUDAToolkit for CMake 3.28.4 and higher [\#7062](https://github.com/kokkos/kokkos/pull/7062)
+
+### Incompatibilities (i.e. breaking changes)
+* Drop `Kokkos::Array` special treatment in `View`s [\#6906](https://github.com/kokkos/kokkos/pull/6906)
+* Drop `Experimental::RawMemoryAllocationFailure` [\#7145](https://github.com/kokkos/kokkos/pull/7145)
+
+### Deprecations
+* Remove `Experimental::LayoutTiled` class template and deprecate `is_layouttiled` trait [\#6907](https://github.com/kokkos/kokkos/pull/6907)
+* Deprecate `Kokkos::layout_iterate_type_selector` [\#7076](https://github.com/kokkos/kokkos/pull/7076)
+* Deprecate specialization of `Kokkos::pair` for a single element [\#6947](https://github.com/kokkos/kokkos/pull/6947)
+* Deprecate `deep_copy` of `UnorderedMap` of different size [\#6812](https://github.com/kokkos/kokkos/pull/6812)
+* Deprecate trailing `Proxy` template argument of `Kokkos::Array` [\#6934](https://github.com/kokkos/kokkos/pull/6934)
+* Deprecate implicit conversions of integers to `ChunkSize` [\#7151](https://github.com/kokkos/kokkos/pull/7151)
+* Deprecate implicit conversions to execution spaces [\#7156](https://github.com/kokkos/kokkos/pull/7156)
+
+### Bug Fixes
+* Do not return a copy of the input functor in `Experimental::for_each` [\#6910](https://github.com/kokkos/kokkos/pull/6910)
+* Fix `realloc` on views of non-default constructible element types [\#6993](https://github.com/kokkos/kokkos/pull/6993)
+* Fix undefined behavior in `View` initialization or fill with zeros [\#7014](https://github.com/kokkos/kokkos/pull/7014)
+* Fix `sort_by_key` on host execution spaces when building with NVCC [\#7059](https://github.com/kokkos/kokkos/pull/7059)
+* Fix using shared libraries and -fvisibility=hidden [\#7065](https://github.com/kokkos/kokkos/pull/7065)
+* Fix view reference counting when functor copy constructor throws in parallel dispatch [\#6289](https://github.com/kokkos/kokkos/pull/6289)
+* Fix `initialize(InitializationSetting)` for handling `print_configuration` setting [\#7098](https://github.com/kokkos/kokkos/pull/7098)
+* Thread safety fixes for the Serial and OpenMP backend [\#7080](https://github.com/kokkos/kokkos/pull/7080), [\#6151](https://github.com/kokkos/kokkos/pull/6151)
+
+## [4.3.01](https://github.com/kokkos/kokkos/tree/4.3.01)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.3.00...4.3.01)
+
+### Backend and Architecture Enhancements:
+
+#### HIP:
+* MI300 support unified memory [\#6877](https://github.com/kokkos/kokkos/pull/6877)
+
+### Bug Fixes
+* Serial: Use the provided execution space instance in TeamPolicy [\#6951](https://github.com/kokkos/kokkos/pull/6951)
+* `nvcc_wrapper`: bring back support for `--fmad` option [\#6931](https://github.com/kokkos/kokkos/pull/6931)
+* Fix CUDA reduction overflow for `RangePolicy` [\#6578](https://github.com/kokkos/kokkos/pull/6578)
+
+## [4.3.00](https://github.com/kokkos/kokkos/tree/4.3.00) (2024-03-19)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.2.01...4.3.00)
+
+### Features:
+* Add `Experimental::sort_by_key(exec, keys, values)` algorithm [\#6801](https://github.com/kokkos/kokkos/pull/6801)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+* Experimental multi-GPU support (from the same process) [\#6782](https://github.com/kokkos/kokkos/pull/6782)
+* Link against CUDA libraries even with KOKKOS_ENABLE_COMPILE_AS_CMAKE_LANGUAGE [\#6701](https://github.com/kokkos/kokkos/pull/6701)
+* Don't use the compiler launcher script if the CMake compile language is CUDA. [\#6704](https://github.com/kokkos/kokkos/pull/6704)
+* nvcc(wrapper): adding "long" and "short" versions for all flags [\#6615](https://github.com/kokkos/kokkos/pull/6615)
+
+#### HIP:
+ * Fix compilation when using amdclang (with ROCm >= 5.7) and RDC [\#6857](https://github.com/kokkos/kokkos/pull/6857)
+ * Use rocthrust for sorting, when available [\#6793](https://github.com/kokkos/kokkos/pull/6793)
+
+#### SYCL:
+* We only support OneAPI SYCL implementation: add check during initialization
+ * Error out on initialization if the backend is different from `ext_oneapi_*` [\#6784](https://github.com/kokkos/kokkos/pull/6784)
+ * Filter GPU devices for `ext_onapi_*` GPU devices [\#6758](https://github.com/kokkos/kokkos/pull/6784)
+* Performance Improvements
+ * Avoid unnecessary zero-memset of the scratch flags in SYCL [\#6739](https://github.com/kokkos/kokkos/pull/6739)
+ * Use host-pinned memory to copy reduction/scan result [\#6500](https://github.com/kokkos/kokkos/pull/6500)
+* Address deprecations after oneAPI 2023.2.0 [\#6577](https://github.com/kokkos/kokkos/pull/6739)
+* Make sure to call find_dependency for oneDPL if necessary [\#6870](https://github.com/kokkos/kokkos/pull/6870)
+
+#### OpenMPTarget:
+* Use LLVM extensions for dynamic shared memory [\#6380](https://github.com/kokkos/kokkos/pull/6380)
+* Guard scratch memory usage in ParallelReduce [\#6585 ](https://github.com/kokkos/kokkos/pull/6585)
+* Update linker flags for Intel GPUs update [\#6735](https://github.com/kokkos/kokkos/pull/6735)
+* Improve handling of printf on Intel GPUs [\#6652](https://github.com/kokkos/kokkos/pull/6652)
+
+#### OpenACC:
+* Add atomics support [\#6446](https://github.com/kokkos/kokkos/pull/6446)
+* Make the OpenACC backend asynchronous [\#6772](https://github.com/kokkos/kokkos/pull/6772)
+
+#### Threads:
+* Add missing broadcast to TeamThreadRange parallel_scan [\#6601](https://github.com/kokkos/kokkos/pull/6601)
+
+#### OpenMP:
+* Improve performance of view initializations and filling with zeros [\#6573](https://github.com/kokkos/kokkos/pull/6573)
+
+### General Enhancements
+
+* Improve performance of random number generation when using a normal distribution on GPUs [\#6556](https://github.com/kokkos/kokkos/pull/6556)
+* Allocate temporary view with the user-provided execution space instance and do not initialize in `unique` algorithm [\#6598](https://github.com/kokkos/kokkos/pull/6598)
+* Add deduction guide for `Kokkos::Array` [\#6373](https://github.com/kokkos/kokkos/pull/6373)
+* Provide new public headers `<Kokkos_Clamp.hpp>` and `<Kokkos_MinMax.hpp>` [\#6687](https://github.com/kokkos/kokkos/pull/6687)
+* Fix/improvement to `remove_if` parallel algorithm: use the provided execution space instance for temporary allocations and drop unnecessaryinitialization + avoid evaluating twice the predicate during final pass [\#6747](https://github.com/kokkos/kokkos/pull/6747)
+* Add runtime function to query the number of devices and make device ID consistent with `KOKKOS_VISIBLE_DEVICES` [\#6713](https://github.com/kokkos/kokkos/pull/6713)
+* simd: support `vector_aligned_tag` [\#6243](https://github.com/kokkos/kokkos/pull/6243)
+* Avoid unnecessary allocation when default constructing Bitset [\#6524](https://github.com/kokkos/kokkos/pull/6524)
+* Fix constness for views in std algorithms [\#6813](https://github.com/kokkos/kokkos/pull/6813)
+* Improve error message on unsafe implicit conversion in MDRangePolicy [\#6855](https://github.com/kokkos/kokkos/pull/6855)
+* CTAD (deduction guides) for RangePolicy [\#6850](https://github.com/kokkos/kokkos/pull/6850)
+* CTAD (deduction guides) for MDRangePolicy [\#5516](https://github.com/kokkos/kokkos/pull/5516)
+
+### Build System Changes
+* Require `Kokkos_ENABLE_ATOMICS_BYPASS` option to bypass atomic operation for Serial backend only builds [\#6692](https://github.com/kokkos/kokkos/pull/6692)
+* Add support for RISCV and the Milk-V's Pioneer [\#6773](https://github.com/kokkos/kokkos/pull/6773)
+* Add C++26 standard to CMake setup [\#6733](https://github.com/kokkos/kokkos/pull/6733)
+* Fix Makefile when using gnu_generate_makefile.sh and make >= 4.3 [\#6606](https://github.com/kokkos/kokkos/pull/6606)
+* Cuda: Fix configuring with CMake >= 3.28.4 - temporary fallback to internal CudaToolkit.cmake [\#6898](https://github.com/kokkos/kokkos/pull/6898)
+
+### Incompatibilities (i.e. breaking changes)
+* Remove all `DEPRECATED_CODE_3` option and all code that was guarded by it [\#6523](https://github.com/kokkos/kokkos/pull/6523)
+* Drop guards to accommodate external code defining `KOKKOS_ASSERT` [\#6665](https://github.com/kokkos/kokkos/pull/6665)
+* `Profiling::ProfilingSection(std::string)` constructor marked explicit and nodiscard [\#6690](https://github.com/kokkos/kokkos/pull/6690)
+* Add bound check preconditions for `RangePolicy` and `MDRangePolicy` [\#6617](https://github.com/kokkos/kokkos/pull/6617) [\#6726](https://github.com/kokkos/kokkos/pull/6726)
+* Add checks for unsafe implicit conversions in RangePolicy [\#6754](https://github.com/kokkos/kokkos/pull/6754)
+* Remove Kokkos::[b]half_t volatile overloads [\#6579](https://github.com/kokkos/kokkos/pull/6579)
+* Remove KOKKOS_IMPL_DO_NOT_USE_PRINTF [\#6593](https://github.com/kokkos/kokkos/pull/6593)
+* Check matching static extents in View constructor [\#5190 ](https://github.com/kokkos/kokkos/pull/5190)
+* Tools(profiling): fix typo Kokkos_Tools_Optim[i]zationGoal [\#6642](https://github.com/kokkos/kokkos/pull/6642)
+* Remove variadic range policy constructor (disallow passing multiple trailing chunk size arguments) [\#6845](https://github.com/kokkos/kokkos/pull/6845)
+* Improve message on view out of bounds access and always abort [\#6861](https://github.com/kokkos/kokkos/pull/6861)
+* Drop `KOKKOS_ENABLE_INTEL_MM_ALLOC` macro [\#6797](https://github.com/kokkos/kokkos/pull/6797)
+* Remove `Kokkos::Experimental::LogicalMemorySpace` (without going through deprecation) [\#6557](https://github.com/kokkos/kokkos/pull/6557)
+* Remove `Experimental::HBWSpace` and support for linking against memkind [\#6791](https://github.com/kokkos/kokkos/pull/6791)
+* Drop librt TPL and associated `KOKKOS_ENABLE_LIBRT` macro [\#6798](https://github.com/kokkos/kokkos/pull/6798)
+* Drop support for old CPU architectures (`ARCH_BGQ`, `ARCH_POWER7`, `ARCH_WSM` and associated `ARCH_SSE4` macro) [\#6806](https://github.com/kokkos/kokkos/pull/6806)
+* Drop support for deprecated command-line arguments and environment variables [\#6744](https://github.com/kokkos/kokkos/pull/6744)
+
+### Deprecations
+* Provide kokkos_swap as part of Core and deprecate Experimental::swap in Algorithms [\#6697](https://github.com/kokkos/kokkos/pull/6697)
+* Deprecate {Cuda,HIP}::detect_device_count() and Cuda::[detect_]device_arch() [\#6710](https://github.com/kokkos/kokkos/pull/6710)
+* Deprecate `ExecutionSpace::in_parallel()` [\#6582](https://github.com/kokkos/kokkos/pull/6582)
+
+### Bug Fixes
+* Fix team-level MDRange reductions: [\#6511](https://github.com/kokkos/kokkos/pull/6511)
+* Fix CUDA and SYCL small value type (16-bit) team reductions [\#5334](https://github.com/kokkos/kokkos/pull/5334)
+* Enable `{transform_}exclusive_scan` in place [\#6667](https://github.com/kokkos/kokkos/pull/6667)
+* `fill_random` overload that do not take an execution space instance argument should fence [\#6658](https://github.com/kokkos/kokkos/pull/6658)
+* HIP,Cuda,OpenMPTarget: Fixup use provided execution space when copying host inaccessible reduction result [\#6777](https://github.com/kokkos/kokkos/pull/6777)
+* Fix typo in `cuda_func_set_attribute[s]_wrapper` preventing proper setting of desired occupancy [\#6786](https://github.com/kokkos/kokkos/pull/6786)
+* Avoid undefined behavior due to conversion between signed and unsigned integers in shift_{right, left}_team_impl [\#6821](https://github.com/kokkos/kokkos/pull/6821)
+* Fix a bug in Makefile.kokkos when using AMD GPU architectures as `AMD_GFXYYY` [\#6892](https://github.com/kokkos/kokkos/pull/6892)
+
+## [4.2.01](https://github.com/kokkos/kokkos/tree/4.2.01) (2023-12-07)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.2.00...4.2.01)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+- Add warp sync for `parallel_reduce` to avoid race condition [\#6630](https://github.com/kokkos/kokkos/pull/6630), [\#6746](https://github.com/kokkos/kokkos/pull/6746)
+
+#### HIP:
+- Fix Graph "multiple definition of" linking error (missing `inline` specifier) [\#6624](https://github.com/kokkos/kokkos/pull/6624)
+- Add support for gfx940 (AMD Instinct MI300 GPU) [\#6671](https://github.com/kokkos/kokkos/pull/6671)
+
+### Build System
+- CMake: Don't let Kokkos set `CMAKE_CXX_FLAGS` for Trilinos builds [\#6742](https://github.com/kokkos/kokkos/pull/6742)
+
+### Bug Fixes
+- Remove deprecation warning for `AllocationMechanism` for GCC <11.0 [\#6653](https://github.com/kokkos/kokkos/pull/6653)
+- Fix bug early tools finalize with non-default host execution instances [\#6635](https://github.com/kokkos/kokkos/pull/6635)
+- Fix various issues for MSVC CUDA builds [\#6659](https://github.com/kokkos/kokkos/pull/6659)
+- Fix "extra `;`" warning with `-pedantic` flag in `<Kokkos_SIMD_Scalar.hpp>` [\#6510](https://github.com/kokkos/kokkos/pull/6510)
+
+## [4.2.00](https://github.com/kokkos/kokkos/tree/4.2.00) (2023-11-06)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.1.00...4.2.00)
+
+### Features:
+- SIMD: significant improvements to SIMD support and alignment with C++26 SIMD
+ - add `Kokkos::abs` overload for SIMD types [\#6069](https://github.com/kokkos/kokkos/pull/6069)
+ - add generator constructors [\#6347](https://github.com/kokkos/kokkos/pull/6347)
+ - convert binary operators to hidden friends [\#6320](https://github.com/kokkos/kokkos/pull/6320)
+ - add shift operators [\#6109](https://github.com/kokkos/kokkos/pull/6109)
+ - add `float` support [\#6177](https://github.com/kokkos/kokkos/pull/6177)
+ - add remaining `gather_from` and `scatter_to` overloads [\#6220](https://github.com/kokkos/kokkos/pull/6220)
+ - define simd math function overloads in the Kokkos namespace [\#6465](https://github.com/kokkos/kokkos/pull/6465), [\#6487](https://github.com/kokkos/kokkos/pull/6487)
+ - `Kokkos_ENABLE_NATIVE=ON` autodetects SIMD types supported [\#6188](https://github.com/kokkos/kokkos/pull/6188)
+ - fix AVX2 SIMD support for ZEN2 AMD CPU [\#6238](https://github.com/kokkos/kokkos/pull/6238)
+- `Kokkos::printf` [\#6083](https://github.com/kokkos/kokkos/pull/6083)
+- `Kokkos::sort`: support custom comparator [\#6253](https://github.com/kokkos/kokkos/pull/6253)
+- `half_t` and `bhalf_t` numeric traits [\#5778](https://github.com/kokkos/kokkos/pull/5778)
+- `half_t` and `bhalf_t` mixed comparisons [\#6407](https://github.com/kokkos/kokkos/pull/6407)
+- `half_t` and `bhalf_t` mathematical functions [\#6124](https://github.com/kokkos/kokkos/pull/6124)
+- `TeamThreadRange` `parallel_scan` with return value [\#6090](https://github.com/kokkos/kokkos/pull/6090), [\#6301](https://github.com/kokkos/kokkos/pull/6301), [\#6302](https://github.com/kokkos/kokkos/pull/6302), [\#6303](https://github.com/kokkos/kokkos/pull/6303), [\#6307](https://github.com/kokkos/kokkos/pull/6307)
+- `ThreadVectorRange` `parallel_scan` with return value [\#6235](https://github.com/kokkos/kokkos/pull/6235), [\#6242](https://github.com/kokkos/kokkos/pull/6242), [\#6308](https://github.com/kokkos/kokkos/pull/6308), [\#6305](https://github.com/kokkos/kokkos/pull/6305), [\#6292](https://github.com/kokkos/kokkos/pull/6292)
+- Add team-level std algorithms [\#6200](https://github.com/kokkos/kokkos/pull/6200), [\#6205](https://github.com/kokkos/kokkos/pull/6205), [\#6207](https://github.com/kokkos/kokkos/pull/6207), [\#6208](https://github.com/kokkos/kokkos/pull/6208), [\#6209](https://github.com/kokkos/kokkos/pull/6209), [\#6210](https://github.com/kokkos/kokkos/pull/6210), [\#6211](https://github.com/kokkos/kokkos/pull/6211), [\#6212](https://github.com/kokkos/kokkos/pull/6212), [\#6213](https://github.com/kokkos/kokkos/pull/6213), [\#6256](https://github.com/kokkos/kokkos/pull/6256), [\#6258](https://github.com/kokkos/kokkos/pull/6258), [\#6350](https://github.com/kokkos/kokkos/pull/6350), [\#6351](https://github.com/kokkos/kokkos/pull/6351)
+- Serial: Allow for distinct execution space instances [\#6441](https://github.com/kokkos/kokkos/pull/6441)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+- Fixed potential data race in Cuda `parallel_reduce` [\#6236](https://github.com/kokkos/kokkos/pull/6236)
+- Use `cudaMallocAsync` by default [\#6402](https://github.com/kokkos/kokkos/pull/6402)
+- Bugfix for using Kokkos from a thread of execution [\#6299](https://github.com/kokkos/kokkos/pull/6299)
+
+#### HIP:
+- New naming convention for AMD GPU: VEGA906, VEGA908, VEGA90A, NAVI1030 to AMD_GFX906, AMD_GFX908, AMD_GFX90A, AMD_GFX1030 [\#6266](https://github.com/kokkos/kokkos/pull/6266)
+- Add initial support for gfx942: [\#6358](https://github.com/kokkos/kokkos/pull/6358)
+- Improve reduction performance [\#6229](https://github.com/kokkos/kokkos/pull/6229)
+- Deprecate `HIP(hipStream_t,bool)` constructor [\#6401](https://github.com/kokkos/kokkos/pull/6401)
+- Add support for Graph [\#6370](https://github.com/kokkos/kokkos/pull/6370)
+- Improve reduction performance when using Teams [\#6284](https://github.com/kokkos/kokkos/pull/6284)
+- Fix concurrency calculation [\#6479](https://github.com/kokkos/kokkos/pull/6479)
+- Fix potential data race in HIP `parallel_reduce` [\#6429](https://github.com/kokkos/kokkos/pull/6429)
+
+#### SYCL:
+- Enforce external `sycl::queues` to be in-order [\#6246](https://github.com/kokkos/kokkos/pull/6246)
+- Improve reduction performance: [\#6272](https://github.com/kokkos/kokkos/pull/6272) [\#6271](https://github.com/kokkos/kokkos/pull/6271) [\#6270](https://github.com/kokkos/kokkos/pull/6270) [\#6264](https://github.com/kokkos/kokkos/pull/6264)
+- Allow using the SYCL execution space on AMD GPUs [\#6321](https://github.com/kokkos/kokkos/pull/6321)
+- Allow sorting via native oneDPL to support Views with stride=1 [\#6322](https://github.com/kokkos/kokkos/pull/6322)
+- Make in-order queues the default via macro [\#6189](https://github.com/kokkos/kokkos/pull/6189)
+
+#### OpenACC:
+- Support Clacc compiler [\#6250](https://github.com/kokkos/kokkos/pull/6250)
+
+### General Enhancements
+- Add missing `is_*_view` traits and `is_*_view_v` helper variable templates for `DynRankView`, `DynamicView`, `OffsetView`, `ScatterView` containers [\#6195](https://github.com/kokkos/kokkos/pull/6195)
+- Make `nvcc_wrapper` and `compiler_launcher` scripts more portable by switching to a `#!/usr/bin/env` shebang [\#6357](https://github.com/kokkos/kokkos/pull/6357)
+- Add an improved `Kokkos::malloc` / `Kokkos::free` performance test [\#6377](https://github.com/kokkos/kokkos/pull/6377)
+- Ensure `Views` with `size==0` can be used with `deep_copy` [\#6273](https://github.com/kokkos/kokkos/pull/6273)
+- `Kokkos::abort` is moved to header `Kokkos_Abort.hpp` [\#6445](https://github.com/kokkos/kokkos/pull/6445)
+- `KOKKOS_ASSERT`, `KOKKOS_EXPECTS`, `KOKKOS_ENSURES` are moved to header `Kokkos_Assert.hpp` [\#6445](https://github.com/kokkos/kokkos/pull/6445)
+- Add a permuted-index mode to the gups benchmark [\#6378](https://github.com/kokkos/kokkos/pull/6378)
+- Check for overflow during backend initialization [\#6159](https://github.com/kokkos/kokkos/pull/6159)
+- Make constraints on `Kokkos::sort` more visible [\#6234](https://github.com/kokkos/kokkos/pull/6234) and cleanup API [\#6239](https://github.com/kokkos/kokkos/pull/6239)
+- Add converting assignment to `DualView`: [\#6474](https://github.com/kokkos/kokkos/pull/6474)
+
+
+### Build System Changes
+
+- Export `Kokkos_CXX_COMPILER_VERSION` [\#6282](https://github.com/kokkos/kokkos/pull/6282)
+- Disable default oneDPL support in Trilinos [\#6342](https://github.com/kokkos/kokkos/pull/6342)
+
+### Incompatibilities (i.e. breaking changes)
+ - Ensure that `Kokkos::complex` only gets instantiated for cv-unqualified floating-point types [\#6251](https://github.com/kokkos/kokkos/pull/6251)
+ - Removed (deprecated-3) support for volatile join operators in reductions [\#6385](https://github.com/kokkos/kokkos/pull/6385)
+ - Enforce `ViewCtorArgs` restrictions for `create_mirror_view` [\#6304](https://github.com/kokkos/kokkos/pull/6304)
+ - SIMD types for ARM NEON are not autodetected anymore but need `Kokkos_ARCH_ARM_NEON` or `Kokkos_ARCH_NATIVE=ON` [\#6394](https://github.com/kokkos/kokkos/pull/6394)
+ - Remove `#include <iostream>` from headers where possible [\#6482](https://github.com/kokkos/kokkos/pull/6482)
+
+### Deprecations
+- Deprecated `Kokkos::vector` [\#6252](https://github.com/kokkos/kokkos/pull/6252)
+- All host allocation mechanisms except for `STD_MALLOC` have been deprecated [\#6341](https://github.com/kokkos/kokkos/pull/6341)
+
+### Bug Fixes
+ - Missing memory fence in `RandomPool::free_state` functions [\#6290](https://github.com/kokkos/kokkos/pull/6290)
+ - Fix for corner case in `Kokkos::Experimental::is_partitioned` algorithm [\#6257](https://github.com/kokkos/kokkos/pull/6257)
+ - Fix initialization of scratch lock variables in the `Cuda` backend [\#6433](https://github.com/kokkos/kokkos/pull/6433)
+ - Fixes for `Kokkos::Array` [\#6372](https://github.com/kokkos/kokkos/pull/6372)
+ - Fixed symlink configure issue for Windows [\#6241](https://github.com/kokkos/kokkos/pull/6241)
+ - OpenMPTarget init-join fix [\#6444](https://github.com/kokkos/kokkos/pull/6444)
+ - Fix atomic operations bug for Min and Max [\#6435](https://github.com/kokkos/kokkos/pull/6435)
+ - Fix implementation for `cyl_bessel_i0` [\#6484](https://github.com/kokkos/kokkos/pull/6484)
+ - Fix various NVCC warnings in `BinSort`, `Array`, and bit manipulation function templates [\#6483](https://github.com/kokkos/kokkos/pull/6483)
+
+## [4.1.00](https://github.com/kokkos/kokkos/tree/4.1.00) (2023-06-16)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.0.01...4.1.00)
+
+### Features:
+* Add `<Kokkos_BitManipulation.hpp>` header [\#4577](https://github.com/kokkos/kokkos/pull/4577) [\#5907](https://github.com/kokkos/kokkos/pull/5907) [\#5967](https://github.com/kokkos/kokkos/pull/5967) [\#6101](https://github.com/kokkos/kokkos/pull/6101)
+* Add `UnorderedMapInsertOpTypes` [\#5877](https://github.com/kokkos/kokkos/pull/5877) and documentation [\#350](https://github.com/kokkos/kokkos-core-wiki/pull/350)
+* Add multiple reducers support for team-level parallel reduce [\#5727](https://github.com/kokkos/kokkos/pull/5727)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+
+* Allow NVCC 12 to compile using C++20 flag [\#5977](https://github.com/kokkos/kokkos/pull/5977)
+* Remove ability to disable CMake option `Kokkos_ENABLE_CUDA_LAMBDA` and unconditionally enable CUDA extended lambda support. [\#5964](https://github.com/kokkos/kokkos/pull/5964)
+* Drop unnecessary fences around the memory allocation when using `CudaUVMSpace` in views [\#6008](https://github.com/kokkos/kokkos/pull/6008)
+
+#### HIP:
+* Improve performance for `parallel_reduce`. Use different parameters for `LightWeight` kernels [\#6029](https://github.com/kokkos/kokkos/pull/6029) and [\#6160](https://github.com/kokkos/kokkos/pull/6160)
+
+#### SYCL:
+* Only pass one wrapper object in SYCL reductions [\#6047](https://github.com/kokkos/kokkos/pull/6047)
+* Improve and simplify parallel_scan implementation [\#6064](https://github.com/kokkos/kokkos/pull/6064)
+* Remove workaround for submit_barrier not being enqueued properly [\#5504](https://github.com/kokkos/kokkos/pull/5504)
+* Fix guards for using scratch space with SYCL [\#6003](https://github.com/kokkos/kokkos/pull/6003)
+* Fix compiling SYCL with KOKKOS_IMPL_DO_NOT_USE_PRINTF_USAGE [\#6219](https://github.com/kokkos/kokkos/pull/6219)
+
+#### OpenMPTarget:
+* Improve hierarchical parallelism for Intel architectures [\#6043](https://github.com/kokkos/kokkos/pull/6043)
+* Enable Cray compiler for the OpenMPTarget backend. [\#5889](https://github.com/kokkos/kokkos/pull/5889)
+
+#### HPX:
+* Update HPX backend to use HPX's sender/receiver functionality [\#5628](https://github.com/kokkos/kokkos/pull/5628)
+* Increase minimum required HPX version to 1.8.0 [\#6132](https://github.com/kokkos/kokkos/pull/6132)
+* Implement HPX::in_parallel [\#6143](https://github.com/kokkos/kokkos/pull/6143)
+
+### General Enhancements
+* Export CMake `Kokkos_{CUDA,HIP}_ARCHITECTURES` variables [\#5919](https://github.com/kokkos/kokkos/pull/5919) [\#5925](https://github.com/kokkos/kokkos/pull/5925)
+* Add `Kokkos::Profiling::ScopedRegion` [\#5959](https://github.com/kokkos/kokkos/pull/5959) [\#5972](https://github.com/kokkos/kokkos/pull/5972)
+* Add support for `View::rank[_dynamic]()`[\#5870](https://github.com/kokkos/kokkos/pull/5870)
+* Detect incompatible relocatable device code mode to prevent ODR violations [\#5991](https://github.com/kokkos/kokkos/pull/5991)
+* Add (experimental) support for 32-bit Darwin and PPC [\#5916](https://github.com/kokkos/kokkos/pull/5916)
+* Add missing half and bhalf specialization of the infinity numeric trait [\#6055](https://github.com/kokkos/kokkos/pull/6055)
+* Add `is_dual_view` trait and align further with regular view [\#6120](https://github.com/kokkos/kokkos/pull/6120)
+* Allow templated functors in parallel_for, parallel_reduce and parallel_scan [\#5976](https://github.com/kokkos/kokkos/pull/5976)
+* Define KOKKOS_COMPILER_INTEL_LLVM and only define at most one KOKKOS_COMPILER* macro [\#5906](https://github.com/kokkos/kokkos/pull/5906)
+* Allow linking against build tree [\#6078](https://github.com/kokkos/kokkos/pull/6078)
+* Allow passing a temporary std::vector to partition_space [\#6167](https://github.com/kokkos/kokkos/pull/6167)
+* `Kokkos` can be used as an external dependency in `Trilinos` [\#6142](https://github.com/kokkos/kokkos/pull/6142), [\#6157](https://github.com/kokkos/kokkos/pull/6157) [\#6163](https://github.com/kokkos/kokkos/pull/6163)
+* Left align demangled stacktrace output [\#6191](https://github.com/kokkos/kokkos/pull/6191)
+* Improve OpenMP affinity warning to include MPI concerns [\#6185](https://github.com/kokkos/kokkos/pull/6185)
+
+### Build System Changes
+* Drop `Kokkos_ENABLE_LAUNCH_COMPILER` option which had no effect [\#6148](https://github.com/kokkos/kokkos/pull/6148)
+* Export variables for relevant Kokkos options with cmake[\#6142](https://github.com/kokkos/kokkos/pull/6142)
+
+### Incompatibilities (i.e. breaking changes)
+* Desul atomics always enabled [\#5801](https://github.com/kokkos/kokkos/pull/5801)
+* Drop `KOKKOS_ENABLE_CUDA_ASM*` and `KOKKOS_ENABLE_*_ATOMICS` macros [\#5940](https://github.com/kokkos/kokkos/pull/5940)
+* Drop `KOKKOS_ENABLE_RFO_PREFETCH` macro [\#5944](https://github.com/kokkos/kokkos/pull/5944)
+* Deprecate `Kokkos_ENABLE_CUDA_LAMBDA` configuration option and force it to `ON` [\#5964](https://github.com/kokkos/kokkos/pull/5964)
+* Remove TriBITS Kokkos subpackages [\#6104](https://github.com/kokkos/kokkos/pull/6104)
+* Cuda: Remove unused attach_texture_object [\#6129](https://github.com/kokkos/kokkos/pull/6129)
+* Drop Kokkos_ENABLE_PROFILING_LOAD_PRINT configuration option [\#6150](https://github.com/kokkos/kokkos/pull/6150)
+* Drop pointless Kokkos{Algorithms,Containers}_config.h files [\#6108](https://github.com/kokkos/kokkos/pull/6108)
+
+### Deprecations
+* Deprecate `BinSort`, `BinOp1D`, and `BinOp3D` default constructors [\#6131](https://github.com/kokkos/kokkos/pull/6131)
+
+### Bug Fixes
+* Fix `SYCLTeamMember` to take arguments for scratch sizes as `std::size_t` [\#5981](https://github.com/kokkos/kokkos/pull/5981)
+* Fix Kokkos_SIMD with AVX2 on 64-bit architectures [\#6075](https://github.com/kokkos/kokkos/pull/6075)
+* Fix an incorrectly returning size for SIMD uint64_t in AVX2 [\#6004](https://github.com/kokkos/kokkos/pull/6004)
+* Fix missing avx512 header file with gcc versions before 10 [\#6183](https://github.com/kokkos/kokkos/pull/6183)
+* Fix incorrect results of `parallel_reduce` of types smaller than `int` on CUDA and HIP: [\#5745](https://github.com/kokkos/kokkos/pull/5745)
+* CMake: update package compatibility mode when building within Trilinos [\#6012](https://github.com/kokkos/kokkos/pull/6012)
+* Fix warnings generated from internal uses of `ALL_t` rather than `Kokkos::ALL_t` [\#6028](https://github.com/kokkos/kokkos/pull/6028)
+* Fix bug in `hpcbind` script: check for correct Slurm variable [\#6116](https://github.com/kokkos/kokkos/pull/6116)
+* KokkosTools: Don't call callbacks before backends are initialized [\#6114](https://github.com/kokkos/kokkos/pull/6114)
+* Fix global fence in Kokkos::resize(DynRankView) [\#6184](https://github.com/kokkos/kokkos/pull/6184)
+* Fix `BinSort` support for strided views [\#6081](https://github.com/kokkos/kokkos/pull/6184)
+* Fix missing `is_*_view` traits in containers [\#6195](https://github.com/kokkos/kokkos/pull/6195)
+* Fix broken OpenMP target on NVHPC [\#6171](https://github.com/kokkos/kokkos/pull/6171)
+* Sorting an empty view should exit early and not fail [\#6130](https://github.com/kokkos/kokkos/pull/6130)
+
+## [4.0.01](https://github.com/kokkos/kokkos/tree/4.0.01) (2023-04-14)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/4.0.00...4.0.01)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+
+- Allow NVCC 12 to compile using C++20 flag [\#6020](https://github.com/kokkos/kokkos/pull/6020)
+- Add CUDA Ada architecture support [\#6022](https://github.com/kokkos/kokkos/pull/6022)
+
+#### HIP:
+
+- Add support for AMDGPU target NAVI31 / RX 7900 XT(X): gfx1100 [\#6021](https://github.com/kokkos/kokkos/pull/6021)
+- HIP: Fix warning from `std::memcpy` [\#6019](https://github.com/kokkos/kokkos/pull/6019)
+
+#### SYCL:
+- Fix `SYCLTeamMember` to take arguments for scratch sizes as `std::size_t` [\#5986](https://github.com/kokkos/kokkos/pull/5986)
+
+### General Enhancements
+- Fixup 4.0 change log [\#6023](https://github.com/kokkos/kokkos/pull/6023)
+
+### Build System Changes
+- Cherry-pick TriBITS update from Trilinos [\#6037](https://github.com/kokkos/kokkos/pull/6037)
+- CMake: update package compatibility mode when building within Trilinos [\#6013](https://github.com/kokkos/kokkos/pull/6013)
+
+### Bug Fixes
+- Fix an incorrectly returning size for SIMD uint64_t in AVX2 [\#6011](https://github.com/kokkos/kokkos/pull/6011)
+- Desul atomics: wrong value for `desul::Impl::numeric_limits_max<uint64_t>` [\#6018](https://github.com/kokkos/kokkos/pull/6018)
+- Fix warning in some user code when using std::memcpy [\#6000](https://github.com/kokkos/kokkos/pull/6000)
+- Fix excessive build times using Makefile.kokkos [\#6068](https://github.com/kokkos/kokkos/pull/6068)
+
+## [4.0.0](https://github.com/kokkos/kokkos/tree/4.0.00) (2023-02-21)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.7.01...4.0.00)
+
+### Features:
+- Allow value types without default constructor in `Kokkos::View` with `Kokkos::WithoutInitializing` [\#5307](https://github.com/kokkos/kokkos/pull/5307)
+- `parallel_scan` with `View` as result type. [\#5146](https://github.com/kokkos/kokkos/pull/5146)
+- Introduced `SharedSpace`, an alias for a `MemorySpace` that is accessible by every `ExecutionSpace`. The memory is moved and then accessed locally. [\#5289](https://github.com/kokkos/kokkos/pull/5289)
+- Introduced `SharedHostPinnedSpace`, an alias for a `MemorySpace` that is accessible by every `ExecutionSpace`. The memory is pinned to the host and accessed via zero-copy access. [\#5405](https://github.com/kokkos/kokkos/pull/5405)
+- Add team- and thread-level `sort`, `sort_by_key` algorithms. [\#5317](https://github.com/kokkos/kokkos/pull/5317)
+- Groundwork for `MDSpan` integration. [\#4973](https://github.com/kokkos/kokkos/pull/4973) and [\#5304](https://github.com/kokkos/kokkos/pull/5304)
+- Introduced MD version of hierarchical parallelism: `TeamThreadMDRange`, `ThreadVectorMDRange` and `TeamVectorMDRange`. [\#5238](https://github.com/kokkos/kokkos/pull/5238)
+
+### Backend and Architecture Enhancements:
+
+#### CUDA:
+- Allow CUDA PTX forward compatibility [\#3612](https://github.com/kokkos/kokkos/pull/3612) [\#5536](https://github.com/kokkos/kokkos/pull/5536) [\#5527](https://github.com/kokkos/kokkos/pull/5527)
+- Add support for NVIDIA Hopper GPU architecture [\#5538](https://github.com/kokkos/kokkos/pull/5538)
+- Don't rely on synchronization behavior of default stream in CUDA and HIP [\#5391](https://github.com/kokkos/kokkos/pull/5391)
+- Improve CUDA cache config settings [\#5706](https://github.com/kokkos/kokkos/pull/5706)
+
+#### HIP:
+ - Move `HIP`, `HIPSpace`, `HIPHostPinnedSpace`, and `HIPManagedSpace` out of the `Experimental` namespace [\#5383](https://github.com/kokkos/kokkos/pull/5383)
+ - Don't rely on synchronization behavior of default stream in CUDA and HIP [\#5391](https://github.com/kokkos/kokkos/pull/5391)
+ - Export AMD architecture flag when using Trilinos [\#5528](https://github.com/kokkos/kokkos/pull/5528)
+ - Fix linking error (see [OLCF issue](https://docs.olcf.ornl.gov/systems/crusher_quick_start_guide.html#olcfdev-1167-kokkos-build-failures-with-prgenv-amd)) when using `amdclang`: [\#5539](https://github.com/kokkos/kokkos/pull/5539)
+ - Remove support for MI25 and added support for Navi 1030 [\#5522](https://github.com/kokkos/kokkos/pull/5522)
+ - Fix race condition when using `HSA_XNACK=1` [\#5755](https://github.com/kokkos/kokkos/pull/5755)
+ - Add parameter to force using GlobalMemory launch mechanism. This can be used when encountering compiler bugs with ROCm 5.3 and 5.4 [\#5796](https://github.com/kokkos/kokkos/pull/5796)
+
+#### SYCL:
+- Delegate choice of workgroup size for `parallel_reduce` with `RangePolicy` to the compiler. [\#5227](https://github.com/kokkos/kokkos/pull/5227)
+- SYCL `RangePolicy`: manually specify workgroup size through chunk size [\#4875](https://github.com/kokkos/kokkos/pull/4875)
+
+#### OpenMPTarget:
+- Select the right device [\#5492](https://github.com/kokkos/kokkos/pull/5492)
+
+#### OpenMP:
+ - Add `partition_space` [\#5105](https://github.com/kokkos/kokkos/pull/5105)
+
+### General Enhancements
+- Implement `OffsetView` constructor taking `pair`s and `ViewCtorProp` [\#5303](https://github.com/kokkos/kokkos/pull/5303)
+- Promote math constants to `Kokkos::numbers` namespace [\#5434](https://github.com/kokkos/kokkos/pull/5434)
+- Add overloads of `hypot` math function that take 3 arguments [\#5341](https://github.com/kokkos/kokkos/pull/5341)
+- Add `fma` fused multiply-add math function [\#5428](https://github.com/kokkos/kokkos/pull/5428)
+- Views using `MemoryTraits::Atomic` don't need `volatile` overloads for the value type anymore. [\#5455](https://github.com/kokkos/kokkos/pull/5455)
+- Added `is_team_handle` trait [\#5375](https://github.com/kokkos/kokkos/pull/5375)
+- Refactor desul atomics to support compiling CUDA with NVC++ [\#5431](https://github.com/kokkos/kokkos/pull/5431) [\#5497](https://github.com/kokkos/kokkos/pull/5497) [\#5498](https://github.com/kokkos/kokkos/pull/5498)
+- Support finding `libquadmath` with native compiler support [\#5286](https://github.com/kokkos/kokkos/pull/5286)
+- Add architecture flags for MSVC [\#5673](https://github.com/kokkos/kokkos/pull/5673)
+- SIMD backend for ARM NEON [\#5829](https://github.com/kokkos/kokkos/pull/5829)
+
+### Build System Changes
+- Let CMake determine OpenMP flags. [\#4105](https://github.com/kokkos/kokkos/pull/4105)
+- Update minimum compiler versions. [\#5323](https://github.com/kokkos/kokkos/pull/5323)
+- Makefile and CMake support for C++23 [\#5283](https://github.com/kokkos/kokkos/pull/5283)
+- Do not add `-cuda` to the link line with NVHPC compiler when the CUDA backend is not actually enabled [\#5485](https://github.com/kokkos/kokkos/pull/5485)
+- Only add `-latomic` in generated GNU makefiles when OpenMPTarget backend is enabled [\#5501](https://github.com/kokkos/kokkos/pull/5501) [\#5537](https://github.com/kokkos/kokkos/pull/5537) (3.7 patch release candidate)
+- `Kokkos_ENABLE_CUDA_LAMBDA` now `ON` by default with NVCC [\#5580](https://github.com/kokkos/kokkos/pull/5580)
+- Fix enabling of relocatable device code when using CUDA as CMake language [\#5564](https://github.com/kokkos/kokkos/pull/5564)
+- Fix cmake configuration with CUDA 12 [\#5691](https://github.com/kokkos/kokkos/pull/5691)
+
+### Incompatibilities (i.e. breaking changes)
+- ***Require C++17*** [\#5277](https://github.com/kokkos/kokkos/pull/5277)
+- Turn setting `Kokkos_CXX_STANDARD` into an error [\#5293](https://github.com/kokkos/kokkos/pull/5293)
+- Remove all deprecations in Kokkos 3 [\#5297](https://github.com/kokkos/kokkos/pull/5297)
+- Remove `KOKKOS_COMPILER_CUDA_VERSION` [\#5430](https://github.com/kokkos/kokkos/pull/5430)
+- Drop `reciprocal_overflow_threshold` numeric trait [\#5326](https://github.com/kokkos/kokkos/pull/5326)
+- Move `reduction_identity` out of `<Kokkos_NumericTraits.hpp>` into a new `<Kokkos_ReductionIdentity.hpp>` header [\#5450](https://github.com/kokkos/kokkos/pull/5450)
+- Reduction and scan routines will report an error if the `join()` operator they would use takes `volatile`-qualified parameters [\#5409](https://github.com/kokkos/kokkos/pull/5409)
+- `ENABLE_CUDA_UVM` is dropped in favor of using `SharedSpace` as `MemorySpace` explicitly [\#5608](https://github.com/kokkos/kokkos/pull/5608)
+- Remove Kokkos_ENABLE_CUDA_LDG_INTRINSIC option [\#5623](https://github.com/kokkos/kokkos/pull/5623)
+- Don't rely on synchronization behavior of default stream in CUDA and HIP - this potentially will break unintended implicit synchronization with other libraries such as MPI [\#5391](https://github.com/kokkos/kokkos/pull/5391)
+- Make ExecutionSpace::concurrency() a non-static member function [\#5655](https://github.com/kokkos/kokkos/pull/5655) and related PRs
+- Remove code guarded by `KOKKOS_ENABLE_DEPRECATED_CODE_3`
+
+### Deprecations
+- Deprecate `CudaUVMSpace::available()` which always returned `true` [\#5614](https://github.com/kokkos/kokkos/pull/5614)
+- Deprecate `volatile`-qualified members from `Kokkos::pair` and `Kokkos::complex` [\#5412](https://github.com/kokkos/kokkos/pull/5412)
+- Deprecate `KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_*` macros [\#5824](https://github.com/kokkos/kokkos/pull/5824) (oversight in 3.6)
+
+### Bug Fixes
+- Avoid allocating memory for `UniqueToken` [\#5300](https://github.com/kokkos/kokkos/pull/5300)
+- Fix `pragma ivdep` in `Kokkos_OpenMP_Parallel.hpp` [\#5356](https://github.com/kokkos/kokkos/pull/5356)
+- Fix configuring with Threads support when rerunning CMake [\#5486](https://github.com/kokkos/kokkos/pull/5486)
+- Fix View assignment between `LayoutLeft` and `LayoutRight` with static extents [\#5535](https://github.com/kokkos/kokkos/pull/5535) (3.7 patch release candidate)
+- Add `fence()` calls to sorting routine overloads that don't take an execution space parameter [\#5389](https://github.com/kokkos/kokkos/pull/5389)
+- `ClockTic` changed to 64 bit to fix overflow on Power [\#5577](https://github.com/kokkos/kokkos/pull/5577) (incl. in 3.7.01 patch release)
+- Fix incorrect offset in CUDA and HIP `parallel_scan` for < 4 byte types [\#5555](https://github.com/kokkos/kokkos/pull/5555) (3.7 patch release candidate)
+- Fix incorrect alignment behavior of scratch allocations in some corner cases (e.g. very small allocations) [\#5687](https://github.com/kokkos/kokkos/pull/5687) (3.7 patch release candidate)
+- Add missing `ReductionIdentity<char>` specialization [\#5798](https://github.com/kokkos/kokkos/pull/5798)
+- Don't install standard algorithms headers multiple times [\#5670](https://github.com/kokkos/kokkos/pull/5670)
+- Fix max scratch size calculation for level 0 scratch in CUDA and HIP [\#5718](https://github.com/kokkos/kokkos/pull/5718)
+
+## [3.7.02](https://github.com/kokkos/kokkos/tree/3.7.02) (2023-05-17)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.7.01...3.7.02)
+
+### Backends and Archs Enhancements:
+#### CUDA
+- Add Hopper support and update nvcc_wrapper to work with CUDA-12 [\#5693](https://github.com/kokkos/kokkos/pull/5693)
+### General Enhancements:
+- sprintf -> snprintf [\#5787](https://github.com/kokkos/kokkos/pull/5787)
+### Build System:
+- Add error message when not using `hipcc` and when `CMAKE_CXX_STANDARD` is not set [\#5945](https://github.com/kokkos/kokkos/pull/5945)
+### Bug Fixes:
+- Fix Scratch allocation alignment issues [\#5692](https://github.com/kokkos/kokkos/pull/5692)
+- Fix Intel Classic Compiler ICE [\#5710](https://github.com/kokkos/kokkos/pull/5710)
+- Don't install std algorithm headers multiple times [\#5711](https://github.com/kokkos/kokkos/pull/5711)
+- Fix static init order issue in InitalizationSettings [\#5721](https://github.com/kokkos/kokkos/pull/5721)
+- Fix src/dst Properties in deep_copy(DynamicView,View) [\#5732](https://github.com/kokkos/kokkos/pull/5732)
+- Fix build on Fedora Rawhide [\#5782](https://github.com/kokkos/kokkos/pull/5782)
+- Finalize HIP lock arrays [\#5694](https://github.com/kokkos/kokkos/pull/5694)
+- Fix CUDA lock arrays for current Desul [\#5812](https://github.com/kokkos/kokkos/pull/5812)
+- Set the correct device/context in InterOp tests [\#5701](https://github.com/kokkos/kokkos/pull/5701)
+
+## [3.7.01](https://github.com/kokkos/kokkos/tree/3.7.01) (2022-12-01)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.7.00...3.7.01)
+
+### Bug Fixes:
+- Add fences to all sorting routines not taking an execution space instance argument [\#5547](https://github.com/kokkos/kokkos/pull/5547)
+- Fix repeated `team_reduce` without barrier [\#5552](https://github.com/kokkos/kokkos/pull/5552)
+- Fix memory spaces in `create_mirror_view` overloads using `view_alloc` [\#5521](https://github.com/kokkos/kokkos/pull/5521)
+- Allow `as_view_of_rank_n()` to be overloaded for "special" scalar types [\#5553](https://github.com/kokkos/kokkos/pull/5553)
+- Fix warning calling a `__host__` function from a `__host__ __device__` from `View:: as_view_of_rank_n` [\#5591](https://github.com/kokkos/kokkos/pull/5591)
+- OpenMPTarget: adding implementation to set device id. [\#5557](https://github.com/kokkos/kokkos/pull/5557)
+- Use `Kokkos::atomic_load` to Correct Race Condition Giving Rise to Seg Faulting Error in OpenMP tests [\#5559](https://github.com/kokkos/kokkos/pull/5559)
+- cmake: define `KOKKOS_ARCH_A64FX` [\#5561](https://github.com/kokkos/kokkos/pull/5561)
+- Only link against libatomic in gnu-make OpenMPTarget build [\#5565](https://github.com/kokkos/kokkos/pull/5565)
+- Fix static extents assignment for LayoutLeft/LayoutRight assignment [\#5566](https://github.com/kokkos/kokkos/pull/5566)
+- Do not add -cuda to the link line with NVHPC compiler when the CUDA backend is not actually enabled [\#5569](https://github.com/kokkos/kokkos/pull/5569)
+- Export the flags in `KOKKOS_AMDGPU_OPTIONS` when using Trilinos [\#5571](https://github.com/kokkos/kokkos/pull/5571)
+- Add support for detecting MPI local rank with MPICH and PMI [\#5570](https://github.com/kokkos/kokkos/pull/5570) [\#5582](https://github.com/kokkos/kokkos/pull/5582)
+- Remove listing of undefined TPL dependencies [\#5573](https://github.com/kokkos/kokkos/pull/5573)
+- ClockTic changed to 64 bit to fix overflow on Power [\#5592](https://github.com/kokkos/kokkos/pull/5592)
+- Fix incorrect offset in CUDA and HIP parallel scan for < 4 byte types [\#5607](https://github.com/kokkos/kokkos/pull/5607)
+- Fix initialization of Cuda lock arrays [\#5622](https://github.com/kokkos/kokkos/pull/5622)
+
+## [3.7.00](https://github.com/kokkos/kokkos/tree/3.7.00) (2022-08-22)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.6.01...3.7.00)
+
+### Features:
+- Use non-volatile `join()` member functions and `operator+=` in `parallel_reduce/scan` [\#4931](https://github.com/kokkos/kokkos/pull/4931) [\#4954](https://github.com/kokkos/kokkos/pull/4954) [\#4951](https://github.com/kokkos/kokkos/pull/4951)
+- Add `SIMD` sub package (requires C++17) [\#5016](https://github.com/kokkos/kokkos/pull/5016)
+- Add `is_finalized()` [\#5247](https://github.com/kokkos/kokkos/pull/5247)
+- Promote mathematical functions from `namespace Kokkos::Experimental` to `namespace Kokkos` [\#4791](https://github.com/kokkos/kokkos/pull/4791)
+- Promote `min`, `max`, `clamp`, `minmax` functions from `namespace Kokkos::Experimental` to `namespace Kokkos` [\#5170](https://github.com/kokkos/kokkos/pull/5170)
+- Add `round`, `logb`, `nextafter`, `copysign`, and `signbit` math functions [\#4768](https://github.com/kokkos/kokkos/pull/4768)
+- Add `HIPManagedSpace`, similar to `CudaUVMSpace` [\#5112](https://github.com/kokkos/kokkos/pull/5112)
+- Accept view construction allocation properties in `create_mirror[_view,_view_and_copy]` and `resize/realloc` [\#5125](https://github.com/kokkos/kokkos/pull/5125) [\#5095](https://github.com/kokkos/kokkos/pull/5095) [\#5035](https://github.com/kokkos/kokkos/pull/5035) [\#4805](https://github.com/kokkos/kokkos/pull/4805) [\#4844](https://github.com/kokkos/kokkos/pull/4844)
+- Allow `MemorySpace::allocate()` to be called with execution space [\#4826](https://github.com/kokkos/kokkos/pull/4826)
+- Experimental: Compile time view subscriber [\#4197](https://github.com/kokkos/kokkos/pull/4197)
+
+### Backends and Archs Enhancements:
+- Add support for Sapphire Rapids Intel architecture [\#5015](https://github.com/kokkos/kokkos/pull/5015)
+- Add support for ICX, SKL and ICL Intel architectures [\#5013](https://github.com/kokkos/kokkos/pull/5013) [\#4929](https://github.com/kokkos/kokkos/pull/4929)
+- Add arch flags for Intel GPU Ponte Vecchio [\#4932](https://github.com/kokkos/kokkos/pull/4932)
+- SYCL: require GPU if GPU architecture was set at configuration time (i.e. do not allow fallback to CPU device) [\#5264](https://github.com/kokkos/kokkos/pull/5264) [\#5222](https://github.com/kokkos/kokkos/pull/5222)
+- SYCL: Add `SYCL::sycl_queue()` for interoperability [\#5241](https://github.com/kokkos/kokkos/pull/5241)
+- SYCL: Loosen restriction for using built-in `sycl::group_broadcast` [\#4552](https://github.com/kokkos/kokkos/pull/4552)
+- SYCL: preserve address space [\#4396](https://github.com/kokkos/kokkos/pull/4396)
+- OpenMPTarget: Adding a workaound for team scan [\#5219](https://github.com/kokkos/kokkos/pull/5219)
+- OpenMPTarget: Adding logic to skip the kernel launch if `league_size=0` [\#5067](https://github.com/kokkos/kokkos/pull/5067)
+- OpenMPTarget: Make sure `Kokkos::abort()` causes abnormal program termination when called on the host-side [\#4808](https://github.com/kokkos/kokkos/pull/4808)
+- HIP: Make HIPHostPinnedSpace coarse-grained [\#5152](https://github.com/kokkos/kokkos/pull/5152)
+- Refactor OpenMP `parallel_for` implementation to use more native OpenMP constructs [\#4664](https://github.com/kokkos/kokkos/pull/4664)
+- Add option to optimize for local CPU architecture `Kokkos_ARCH_NATIVE` [\#4930](https://github.com/kokkos/kokkos/pull/4930)
+
+
+### Implemented enhancements
+- Add command line argument/environment variable to print the configuration [\#5233](https://github.com/kokkos/kokkos/pull/5233)
+- Improve error message in view memory access violations [\#4950](https://github.com/kokkos/kokkos/pull/4950)
+- Remove unnecessary fences in View initialization [\#4823](https://github.com/kokkos/kokkos/pull/4823)
+- Make `View::shmem_size()` device-callable [\#4936](https://github.com/kokkos/kokkos/pull/4936)
+- Update numerics support for `__float128` [\#5081](https://github.com/kokkos/kokkos/pull/5081)
+- Add `log10` overload for `Kokkos::complex` [\#5009](https://github.com/kokkos/kokkos/pull/5009)
+- Add `[[nodiscard]]` to `ScopeGuard` [\#5224](https://github.com/kokkos/kokkos/pull/5224)
+- Add structured binding support for `Kokkos::Array` [\#4962](https://github.com/kokkos/kokkos/pull/4962)
+- Enable accessing `Kokkos::Array` elements in constant expressions [\#4916](https://github.com/kokkos/kokkos/pull/4916)
+- Mark `as_view_of_rank_n` as KOKKOS_FUNCTION [\#5248](https://github.com/kokkos/kokkos/pull/5248)
+- Cleanup/rework fence overloads [\#5148](https://github.com/kokkos/kokkos/pull/5148)
+- Assert that `Layout` construction from extents is valid in functions taking integer extents [\#5209](https://github.com/kokkos/kokkos/pull/5209)
+- Add `fill_random` overload that takes an execution space as first argument [\#5181](https://github.com/kokkos/kokkos/pull/5181)
+- Avoid some unnecessary fences in `parallel_reduce/scan` [\#5154](https://github.com/kokkos/kokkos/pull/5154)
+- Include `KOKKOS_ENABLE_LIBDL` in options when printing configuration [\#5086](https://github.com/kokkos/kokkos/pull/5086)
+- DynRankView: make `layout()` return the same as a corresponding static View [\#5026](https://github.com/kokkos/kokkos/pull/5026)
+- Use `_mm_malloc` for icpx [\#5012](https://github.com/kokkos/kokkos/pull/5012)
+- Avoid forcing matching execution spaces in `BinSort` constructor and `sort()` [\#4919](https://github.com/kokkos/kokkos/pull/4919)
+- Check number of bins in `BinSort` [\#4890](https://github.com/kokkos/kokkos/pull/4890)
+- Improve performance in parallel STL-like algorithms [\#4887](https://github.com/kokkos/kokkos/pull/4887) [\#4886](https://github.com/kokkos/kokkos/pull/4886)
+- Disable `memset` on A64FX and launch `parallel_for` instead (performance) [\#4884](https://github.com/kokkos/kokkos/pull/4884)
+- Allow non-power-of-two team sizes for team reductions and scans [\#4809](https://github.com/kokkos/kokkos/pull/4809)
+
+#### Harmonization of Kokkos execution environment initialization:
+- Warn when unable to detect local MPI rank and user explicitly asked for it [\#5263](https://github.com/kokkos/kokkos/pull/5263)
+- Refactor parsing of command line arguments and environment variables [\#5221](https://github.com/kokkos/kokkos/pull/5221)
+- Refactor device selection at initialization [\#5211](https://github.com/kokkos/kokkos/pull/5211)
+- Rename tools settings for consistency [\#5201](https://github.com/kokkos/kokkos/pull/5201)
+- Print help only once [\#5128](https://github.com/kokkos/kokkos/pull/5128)
+- Update precedence rule in initialization [\#5130](https://github.com/kokkos/kokkos/pull/5130)
+- Warn instead of just ignoring user settings when kokkos-tools is disabled [\#5088](https://github.com/kokkos/kokkos/pull/5088)
+- Drop numa args in threads backend initialization [\#5127](https://github.com/kokkos/kokkos/pull/5127)
+- Warn users when a flag prefixed with -[-]kokkos is not recognized and do not remove it [\#5256](https://github.com/kokkos/kokkos/pull/5256)
+- Give back to Core what belongs to Core (aka moving tune_internals option from Tools back to Core) [\#5202](https://github.com/kokkos/kokkos/pull/5202)
+
+#### Build system updates:
+- `nvcc_wrapper`: filter out -pedantic-errors from nvcc options [\#5235](https://github.com/kokkos/kokkos/pull/5235)
+- `nvcc_wrapper`: add known nvcc option --source-in-ptx [\#5052](https://github.com/kokkos/kokkos/pull/5052)
+- Link libdl as interface library [\#5179](https://github.com/kokkos/kokkos/pull/5179)
+- Only show GPU architectures with enabled corresponding backend [\#5119](https://github.com/kokkos/kokkos/pull/5119)
+- Enable optional external desul build [\#5021](https://github.com/kokkos/kokkos/pull/5021) [\#5132](https://github.com/kokkos/kokkos/pull/5132)
+- Export `Kokkos_CXX_STANDARD` variable with CMake [\#5068](https://github.com/kokkos/kokkos/pull/5068)
+- Suppress warnings with nvc++ [\#5031](https://github.com/kokkos/kokkos/pull/5031)
+- Disallow multiple host architectures in CMake [\#4996](https://github.com/kokkos/kokkos/pull/4996)
+- Do not include compiler warning flags in the compile option of the cmake target [\#4989](https://github.com/kokkos/kokkos/pull/4989)
+- AOT flags for OpenMPTarget targeting Intel GPUs [\#4915](https://github.com/kokkos/kokkos/pull/4915)
+- Repurpose `Kokkos_ARCH_INTEL_GEN` for SYCL to mean JIT to be conforming with OMPT [\#4894](https://github.com/kokkos/kokkos/pull/4894)
+- Replace amdgpu-target with offload-arch [\#4874](https://github.com/kokkos/kokkos/pull/4874)
+- Do not enable `kokkos_launch_compiler` when `CMAKE_CXX_COMPILER_LAUNCHER` is set [\#4870](https://github.com/kokkos/kokkos/pull/4870)
+- Move CMake version check up [\#4797](https://github.com/kokkos/kokkos/pull/4797)
+
+### Incompatibilities:
+- Remove `KOKKOS_THREAD_LOCAL` [\#5064](https://github.com/kokkos/kokkos/pull/5064)
+- Remove `KOKKOS_ENABLE_POSIX_MEMALIGN` [\#5011](https://github.com/kokkos/kokkos/pull/5011)
+- Remove unused `KOKKOS_ENABLE_TM` [\#4995](https://github.com/kokkos/kokkos/pull/4995)
+- Remove unused cmakedefine `KOKKOS_ENABLE_COMPILER_WARNINGS` [\#4883](https://github.com/kokkos/kokkos/pull/4883)
+- Remove unused `KOKKOS_ENABLE_DUALVIEW_MODIFY_CHECK` [\#4882](https://github.com/kokkos/kokkos/pull/4882)
+- Drop Instruction Set Architecture (ISA) macros [\#4981](https://github.com/kokkos/kokkos/pull/4981)
+- Warn in `ScopeGuard` about illegal usage [\#5250](https://github.com/kokkos/kokkos/pull/5250)
+
+### Deprecations:
+- Guard against non-public header inclusion [\#5178](https://github.com/kokkos/kokkos/pull/5178)
+- Raise deprecation warnings if non empty WorkTag class is used [\#5230](https://github.com/kokkos/kokkos/pull/5230)
+- Deprecate `parallel_*` overloads taking the label as trailing argument [\#5141](https://github.com/kokkos/kokkos/pull/5141)
+- Deprecate nested types in functional [\#5185](https://github.com/kokkos/kokkos/pull/5185)
+- Deprecate `InitArguments` struct and replace it with `InitializationSettings` [\#5135](https://github.com/kokkos/kokkos/pull/5135)
+- Deprecate `finalize_all()` [\#5134](https://github.com/kokkos/kokkos/pull/5134)
+- Deprecate command line arguments (other than `--help`) that are not prefixed with `kokkos-*` [\#5120](https://github.com/kokkos/kokkos/pull/5120)
+- Deprecate `--[kokkos-]numa` cmdline arg and `KOKKOS_NUMA` env var [\#5117](https://github.com/kokkos/kokkos/pull/5117)
+- Deprecate `--[kokkos-]threads` command line argument in favor of `--[kokkos-]num-threads` [\#5111](https://github.com/kokkos/kokkos/pull/5111)
+- Deprecate `Kokkos::is_reducer_type` [\#4957](https://github.com/kokkos/kokkos/pull/4957)
+- Deprecate `OffsetView` constructors taking `index_list_type` [\#4810](https://github.com/kokkos/kokkos/pull/4810)
+- Deprecate overloads of `Kokkos::sort` taking a parameter `bool always_use_kokkos_sort` [\#5382](https://github.com/kokkos/kokkos/issues/5382)
+- Warn about `parallel_reduce` cases that call `join()` with volatile-qualified arguments [\#5215](https://github.com/kokkos/kokkos/pull/5215)
+
+### Bug Fixes:
+- CUDA Reductions: Fix data races reported by Nvidia `compute-sanitizer` [\#4855](https://github.com/kokkos/kokkos/pull/4855)
+- Work around Intel compiler bug [\#5301](https://github.com/kokkos/kokkos/pull/5301)
+- Avoid allocating memory for UniqueToken [\#5300](https://github.com/kokkos/kokkos/pull/5300)
+- DynamicView: Properly resize mirror instances after construction [\#5276](https://github.com/kokkos/kokkos/pull/5276)
+- Remove Kokkos::Rank limit of 6 ranks [\#5271](https://github.com/kokkos/kokkos/pull/5271)
+- Do not forget to set last element to nullptr when removing a flag in `Kokkos::initialize` [\#5272](https://github.com/kokkos/kokkos/pull/5272)
+- Fix CUDA+MSVC build issue [\#5261](https://github.com/kokkos/kokkos/pull/5261)
+- Fix `DynamicView::resize_serial` [\#5220](https://github.com/kokkos/kokkos/pull/5220)
+- Fix cmake default compiler flags for unknown compiler [\#5217](https://github.com/kokkos/kokkos/pull/5217)
+- Fix `move_backward` [\#5191](https://github.com/kokkos/kokkos/pull/5191)
+- Fixing issue 5196 - missing symbol with intel compiler [\#5207](https://github.com/kokkos/kokkos/pull/5207)
+- Preserve `KOKKOS_INVALID_INDEX` in ViewDimension and ArrayLayout construction [\#5188](https://github.com/kokkos/kokkos/pull/5188)
+- Finalize `deep_copy_space` early avoiding printing to `std::cerr` for Cuda [\#5151](https://github.com/kokkos/kokkos/pull/5151)
+- Use correct policy in Threads MDRange `parallel_reduce` [\#5123](https://github.com/kokkos/kokkos/pull/5123)
+- Fix building with NVCC as the CXX compiler while the CUDA backend is not enabled [\#5115](https://github.com/kokkos/kokkos/pull/5115)
+- OpenMPTarget Index range fix for MDRange. [\#5089](https://github.com/kokkos/kokkos/pull/5089)
+- Fix bug with CUDA's team reduction for empty ranges [\#5079](https://github.com/kokkos/kokkos/pull/5079)
+- Fix using `ZeroMemset` for Serial [\#5077](https://github.com/kokkos/kokkos/pull/5077)
+- Fix `Kokkos::Vector::push_back` for default execution space [\#5047](https://github.com/kokkos/kokkos/pull/5047)
+- ScatterView: Fix ScatterMin/ScatterMax to use proper atomics [\#5045](https://github.com/kokkos/kokkos/pull/5045)
+- Fix calling `ZeroMemset` in `deep_copy` [\#5040](https://github.com/kokkos/kokkos/pull/5040)
+- Make View self-assignment not produce double-free [\#5024](https://github.com/kokkos/kokkos/pull/5024)
+- Guard against unrecognized pragma with intel compilers [\#5019](https://github.com/kokkos/kokkos/pull/5019)
+- Fix racing condition in `HIPParallelLaunch` [\#5008](https://github.com/kokkos/kokkos/pull/5008)
+- KokkosP: Fix `device_id` in profiling [\#4997](https://github.com/kokkos/kokkos/pull/4997)
+- Fix for `Kokkos::vector::insert` into empty vector with begin and end iterators [\#4988](https://github.com/kokkos/kokkos/pull/4988)
+- Fix Core header files installation [\#4984](https://github.com/kokkos/kokkos/pull/4984)
+- Fix bounds errors with `Kokkos::sort` [\#4980](https://github.com/kokkos/kokkos/pull/4980)
+- Fixup let `RangePolicy::set_chunk_size` return a reference to self [\#4918](https://github.com/kokkos/kokkos/pull/4918)
+- Fix allocating large Views [\#4907](https://github.com/kokkos/kokkos/pull/4907)
+- Fix combined reductions with `Kokkos::View` [\#4896](https://github.com/kokkos/kokkos/pull/4896)
+- Fixed `_CUDA_ARCH__` to `__CUDA_ARCH__` for CUDA LDG [\#4893](https://github.com/kokkos/kokkos/pull/4893)
+- Fixup `View::access()` truncate parameter pack [\#4876](https://github.com/kokkos/kokkos/pull/4876)
+- Fix `abort` with HIP backend for ROCm 5.0.2 and beyond [\#4873](https://github.com/kokkos/kokkos/pull/4873)
+- Fix HIP version when printing the configuration [\#4872](https://github.com/kokkos/kokkos/pull/4872)
+- Fix scratch lock array when using scratch level 1 [\#4871](https://github.com/kokkos/kokkos/pull/4871)
+- Fix Makefile.kokkos to work with fujitsu compiler [\#4867](https://github.com/kokkos/kokkos/pull/4867)
+- cmake: Correct link THREADS link option [\#4854](https://github.com/kokkos/kokkos/pull/4854)
+- UniqueToken `impl_acquire` function should be device only [\#4819](https://github.com/kokkos/kokkos/pull/4819)
+- Fix example calls to non existing static `print_configuration` [\#4806](https://github.com/kokkos/kokkos/pull/4806)
+- Fix requests for large team scratch sizes [\#4728](https://github.com/kokkos/kokkos/pull/4728)
+
+
+## [3.6.01](https://github.com/kokkos/kokkos/tree/3.6.01) (2022-05-23)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.6.00...3.6.01)
+
+### Bug Fixes:
+- Fix Threads: Fix serial resizing scratch space (3.6.01 cherry-pick) [\#5109](https://github.com/kokkos/kokkos/pull/5109)
+- Fix ScatterMin/ScatterMax to use proper atomics (3.6.01 cherry-pick) [\#5046](https://github.com/kokkos/kokkos/pull/5046)
+- Fix allocating large Views [\#4907](https://github.com/kokkos/kokkos/pull/4907)
+- Fix bounds errors with Kokkos::sort [\#4980](https://github.com/kokkos/kokkos/pull/4980)
+- Fix HIP version when printing the configuration [\#4872](https://github.com/kokkos/kokkos/pull/4872)
+- Fixed `_CUDA_ARCH__` to `__CUDA_ARCH__` for CUDA LDG [\#4893](https://github.com/kokkos/kokkos/pull/4893)
+- Fixed an incorrect struct initialization [\#5028](https://github.com/kokkos/kokkos/pull/5028)
+- Fix racing condition in `HIPParallelLaunch` [\#5008](https://github.com/kokkos/kokkos/pull/5008)
+- Avoid deprecation warnings with `OpenMPExec::validate_partition` [\#4982](https://github.com/kokkos/kokkos/pull/4982)
+- Make View self-assignment not produce double-free [\#5024](https://github.com/kokkos/kokkos/pull/5024)
+
+
+## [3.6.00](https://github.com/kokkos/kokkos/tree/3.6.00) (2022-02-18)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.5.00...3.6.00)
+
+### Features:
+- Add C++ standard algorithms [\#4315](https://github.com/kokkos/kokkos/pull/4315)
+- Implement `fill_random` for `DynRankView` [\#4763](https://github.com/kokkos/kokkos/pull/4763)
+- Add `bhalf_t` [\#4543](https://github.com/kokkos/kokkos/pull/4543) [\#4653](https://github.com/kokkos/kokkos/pull/4653)
+- Add mathematical constants [\#4519](https://github.com/kokkos/kokkos/pull/4519)
+- Allow `Kokkos::{create_mirror*,resize,realloc}` to be used with `WithoutInitializing` [\#4486](https://github.com/kokkos/kokkos/pull/4486) [\#4337](https://github.com/kokkos/kokkos/pull/4337)
+- Implement `KOKKOS_IF_ON_{HOST,DEVICE}` macros [\#4660](https://github.com/kokkos/kokkos/pull/4660)
+- Allow setting the CMake language for Kokkos [\#4323](https://github.com/kokkos/kokkos/pull/4323)
+
+#### Perf bug fix
+- Desul: Add ScopeCaller [\#4690](https://github.com/kokkos/kokkos/pull/4690)
+- Enable Desul atomics by default when using Makefiles [\#4606](https://github.com/kokkos/kokkos/pull/4606)
+- Unique token improvement [\#4741](https://github.com/kokkos/kokkos/pull/4741) [\#4748](https://github.com/kokkos/kokkos/pull/4748)
+
+#### Other improvements:
+- Add math function long double overload on the host side [\#4712](https://github.com/kokkos/kokkos/pull/4712)
+
+### Deprecations:
+- Array reductions with pointer return types [\#4756](https://github.com/kokkos/kokkos/pull/4756)
+- Deprecate `partition_master`, `validate_partition` [\#4737](https://github.com/kokkos/kokkos/pull/4737)
+- Deprecate `Kokkos_ENABLE_PTHREAD` in favor of `Kokkos_ENABLE_THREADS` [\#4619](https://github.com/kokkos/kokkos/pull/4619) ** pair with use std::threads **
+- Deprecate `log2(unsigned) -> int` (removing in next release) [\#4595](https://github.com/kokkos/kokkos/pull/4595)
+- Deprecate `Kokkos::Impl::is_view` [\#4592](https://github.com/kokkos/kokkos/pull/4592)
+- Deprecate `KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_*` macros and the `ActiveExecutionMemorySpace` alias [\#4668](https://github.com/kokkos/kokkos/issues/4668)
+
+### Backends and Archs Enhancements:
+
+#### SYCL:
+- Update required SYCL compiler version [\#4749](https://github.com/kokkos/kokkos/pull/4749)
+- Cap vector size to kernel maximum for SYCL [\#4704](https://github.com/kokkos/kokkos/pull/4704)
+- Improve check for compatibility of vector size and subgroup size in SYCL [\#4579](https://github.com/kokkos/kokkos/pull/4579)
+- Provide `chunk_size` for SYCL [\#4635](https://github.com/kokkos/kokkos/pull/4635)
+- Use host-pinned memory for SYCL kernel memory [\#4627](https://github.com/kokkos/kokkos/pull/4627)
+- Use shuffle-based algorithm for scalar reduction [\#4608](https://github.com/kokkos/kokkos/pull/4608)
+- Implement pool of USM IndirectKernelMemory [\#4596](https://github.com/kokkos/kokkos/pull/4596)
+- Provide valid default team size for SYCL [\#4481](https://github.com/kokkos/kokkos/pull/4481)
+
+#### CUDA:
+- Add checks for shmem usage in `parallel_reduce` [\#4548](https://github.com/kokkos/kokkos/pull/4548)
+
+#### HIP:
+- Add support for fp16 in the HIP backend [\#4688](https://github.com/kokkos/kokkos/pull/4688)
+- Disable multiple kernel instantiations when using HIP (configure with `-DKokkos_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS=ON` to use) [\#4644](https://github.com/kokkos/kokkos/pull/4644)
+- Fix HIP scratch use per instance [\#4439](https://github.com/kokkos/kokkos/pull/4439)
+- Change allocation header to 256B alignment for AMD VEGA architecture [\#4753](https://github.com/kokkos/kokkos/pull/4753)
+- Add generic `KOKKOS_ARCH_VEGA` macro [\#4782](https://github.com/kokkos/kokkos/pull/4782)
+- Require ROCm 4.5 [\#4689](https://github.com/kokkos/kokkos/pull/4689)
+
+### HPX:
+- Adapt to HPX 1.7.0 which is now required [\#4241](https://github.com/kokkos/kokkos/pull/4241)
+
+#### OpenMP:
+- Fix thread deduction for OpenMP for `thread_count==0` [\#4541](https://github.com/kokkos/kokkos/pull/4541)
+
+#### OpenMPTarget:
+- Update memory space `size_type` to improve performance (`size_t -> unsigned`) [\#4779](https://github.com/kokkos/kokkos/pull/4779)
+
+#### Other Improvements:
+- Improve NVHPC support [\#4599](https://github.com/kokkos/kokkos/pull/4599)
+- Add `Kokkos::Experimental::{min,max,minmax,clamp}` [\#4629](https://github.com/kokkos/kokkos/pull/4629) [\#4506](https://github.com/kokkos/kokkos/pull/4506)
+- Use device type as template argument in Containers and Algorithms [\#4724](https://github.com/kokkos/kokkos/pull/4724) [\#4675](https://github.com/kokkos/kokkos/pull/4675)
+- Implement `Kokkos::sort` with execution space [\#4490](https://github.com/kokkos/kokkos/pull/4490)
+- `Kokkos::resize` always error out for mismatch in runtime rank [\#4681](https://github.com/kokkos/kokkos/pull/4681)
+- Print current call stack when calling `Kokkos::abort()` from the host [\#4672](https://github.com/kokkos/kokkos/pull/4672) [\#4671](https://github.com/kokkos/kokkos/pull/4671)
+- Detect mismatch of execution spaces in functors [\#4655](https://github.com/kokkos/kokkos/pull/4655)
+- Improve view label access on host [\#4647](https://github.com/kokkos/kokkos/pull/4647)
+- Error out for `const` scalar return type in reduction [\#4645](https://github.com/kokkos/kokkos/pull/4645)
+- Don't allow calling `UnorderdMap::value_at` for a set [\#4639](https://github.com/kokkos/kokkos/pull/4639)
+- Add `KOKKOS_COMPILER_NVHPC` macro, disable `quiet_NaN` and `signaling_NaN` [\#4586](https://github.com/kokkos/kokkos/pull/4586)
+- Improve performance of `local_deep_copy` [\#4511](https://github.com/kokkos/kokkos/pull/4511)
+- Improve performance when sorting integers [\#4464](https://github.com/kokkos/kokkos/pull/4464)
+- Add missing numeric traits (`denorm_min`, `reciprocal_overflow_threshold`, `{quiet,silent}_NaN}`) and make them work on cv-qualified types [\#4466](https://github.com/kokkos/kokkos/pull/4466) [\#4415](https://github.com/kokkos/kokkos/pull/4415) [\#4473](https://github.com/kokkos/kokkos/pull/4473) [\#4443](https://github.com/kokkos/kokkos/pull/4443)
+
+### Implemented enhancements BuildSystem
+- Manually compute IntelLLVM compiler version for older CMake versions [\#4760](https://github.com/kokkos/kokkos/pull/4760)
+- Add Xptxas without = to `nvcc_wrapper` [\#4646](https://github.com/kokkos/kokkos/pull/4646)
+- Use external GoogleTest optionally [\#4563](https://github.com/kokkos/kokkos/pull/4563)
+- Silent warnings about multiple optimization flags with `nvcc_wrapper` [\#4502](https://github.com/kokkos/kokkos/pull/4502)
+- Use the same flags in Makefile.kokkos for POWER7/8/9 as for CMake [\#4483](https://github.com/kokkos/kokkos/pull/4483)
+- Fix support for A64FX architecture [\#4745](https://github.com/kokkos/kokkos/pull/4745)
+
+### Incompatibilities:
+- Drop `KOKKOS_ARCH_HIP` macro when using generated GNU makefiles [\#4786](https://github.com/kokkos/kokkos/pull/4786)
+- Remove gcc-toolchain auto add for clang in Makefile.kokkos [\#4762](https://github.com/kokkos/kokkos/pull/4762)
+
+### Bug Fixes:
+- Lock constant memory in Cuda/HIP kernel launch with a mutex (thread safety) [\#4525](https://github.com/kokkos/kokkos/pull/4525)
+- Fix overflow for large requested scratch allocation [\#4551](https://github.com/kokkos/kokkos/pull/4551)
+- Fix Windows build in mingw [\#4564](https://github.com/kokkos/kokkos/pull/4564)
+- Fix `kokkos_launch_compiler`: escape `$` character [\#4769](https://github.com/kokkos/kokkos/pull/4769) [\#4703](https://github.com/kokkos/kokkos/pull/4703)
+- Fix math functions with NVCC and GCC 5 as host compiler [\#4733](https://github.com/kokkos/kokkos/pull/4733)
+- Fix shared build with Intel19 [\#4725](https://github.com/kokkos/kokkos/pull/4725)
+- Do not install empty `desul/src/` directory [\#4714](https://github.com/kokkos/kokkos/pull/4714)
+- Fix wrong `device_id` computation in `identifier_from_devid` (Profiling Interface) [\#4694](https://github.com/kokkos/kokkos/pull/4694)
+- Fix a bug in CUDA scratch memory pool (abnormally high memory consumption) [\#4673](https://github.com/kokkos/kokkos/pull/4673)
+- Remove eval of command args in `hpcbind` [\#4630](https://github.com/kokkos/kokkos/pull/4630)
+- SYCL fix to run when no GPU is detected [\#4623](https://github.com/kokkos/kokkos/pull/4623)
+- Fix `layout_strides::span` for rank-0 views [\#4605](https://github.com/kokkos/kokkos/pull/4605)
+- Fix SYCL atomics for local memory [\#4585](https://github.com/kokkos/kokkos/pull/4585)
+- Hotfix `mdrange_large_deep_copy` for SYCL [\#4581](https://github.com/kokkos/kokkos/pull/4581)
+- Fix bug when sorting integer using the HIP backend [\#4570](https://github.com/kokkos/kokkos/pull/4570)
+- Fix compilation error when using HIP with RDC [\#4553](https://github.com/kokkos/kokkos/pull/4553)
+- `DynamicView`: Fix deallocation extent [\#4533](https://github.com/kokkos/kokkos/pull/4533)
+- SYCL fix running parallel_reduce with TeamPolicy for large ranges [\#4532](https://github.com/kokkos/kokkos/pull/4532)
+- Fix bash syntax error in `nvcc_wrapper` [\#4524](https://github.com/kokkos/kokkos/pull/4524)
+- OpenMPTarget `team_policy` reduce fixes for `init/join` reductions [\#4521](https://github.com/kokkos/kokkos/pull/4521)
+- Avoid hangs in the Threads backend [\#4499](https://github.com/kokkos/kokkos/pull/4499)
+- OpenMPTarget fix reduction bug in `parallel_reduce` for `TeamPolicy` [\#4491](https://github.com/kokkos/kokkos/pull/4491)
+- HIP fix scratch space per instance [\#4439](https://github.com/kokkos/kokkos/pull/4439)
+- OpenMPTarget fix team scratch allocation [\#4431](https://github.com/kokkos/kokkos/pull/4431)
+
+
+## [3.5.00](https://github.com/kokkos/kokkos/tree/3.5.00) (2021-10-19)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.4.01...3.5.00)
+
+### Features:
+
+- Add support for quad-precision math functions/traits [\#4098](https://github.com/kokkos/kokkos/pull/4098)
+- Adding ExecutionSpace partitioning function [\#4096](https://github.com/kokkos/kokkos/pull/4096)
+- Improve Python Interop Capabilities [\#4065](https://github.com/kokkos/kokkos/pull/4065)
+- Add half_t Kokkos::rand specialization [\#3922](https://github.com/kokkos/kokkos/pull/3922)
+- Add math special functions: erf, erfcx, expint1, Bessel functions, Hankel functions [\#3920](https://github.com/kokkos/kokkos/pull/3920)
+- Add missing common mathematical functions [\#4043](https://github.com/kokkos/kokkos/pull/4043) [\#4036](https://github.com/kokkos/kokkos/pull/4036) [\#4034](https://github.com/kokkos/kokkos/pull/4034)
+- Let the numeric traits be SFINAE-friendly [\#4038](https://github.com/kokkos/kokkos/pull/4038)
+- Add Desul atomics - enabling memory-order and memory-scope parameters [\#3247](https://github.com/kokkos/kokkos/pull/3247)
+- Add detection idiom from the C++ standard library extension version 2 [\#3980](https://github.com/kokkos/kokkos/pull/3980)
+- Fence Profiling Support in all backends [\#3966](https://github.com/kokkos/kokkos/pull/3966) [\#4304](https://github.com/kokkos/kokkos/pull/4304) [\#4258](https://github.com/kokkos/kokkos/pull/4258) [\#4232](https://github.com/kokkos/kokkos/pull/4232)
+- Significant SYCL enhancements (see below)
+
+### Deprecations:
+
+- Deprecate CUDA_SAFE_CALL and HIP_SAFE_CALL [\#4249](https://github.com/kokkos/kokkos/pull/4249)
+- Deprecate Kokkos::Impl::Timer (Kokkos::Timer has been available for a long time) [\#4201](https://github.com/kokkos/kokkos/pull/4201)
+- Deprecate Experimental::MasterLock [\#4094](https://github.com/kokkos/kokkos/pull/4094)
+- Deprecate Kokkos_TaskPolicy.hpp (headers got reorganized, doesn't remove functionality) [\#4011](https://github.com/kokkos/kokkos/pull/4011)
+- Deprecate backward compatibility features [\#3978](https://github.com/kokkos/kokkos/pull/3978)
+- Update and deprecate is_space::host_memory/execution/mirror_space [\#3973](https://github.com/kokkos/kokkos/pull/3973)
+
+
+### Backends and Archs Enhancements:
+
+- Enabling constbitset constructors in kernels [\#4296](https://github.com/kokkos/kokkos/pull/4296)
+- Use ZeroMemset in View constructor to improve performance [\#4226](https://github.com/kokkos/kokkos/pull/4226)
+- Use memset in deep_copy [\#3944](https://github.com/kokkos/kokkos/pull/3944)
+- Add missing fence() calls in resize(View) that effectively do deep_copy(resized, orig) [\#4212](https://github.com/kokkos/kokkos/pull/4212)
+- Avoid allocations in resize and realloc [\#4207](https://github.com/kokkos/kokkos/pull/4207)
+- StaticCsrGraph: use device type instead of execution space to construct views [\#3991](https://github.com/kokkos/kokkos/pull/3991)
+- Consider std::sort when view is accessible from host [\#3929](https://github.com/kokkos/kokkos/pull/3929)
+- Fix CPP20 warnings except for volatile [\#4312](https://github.com/kokkos/kokkos/pull/4312)
+
+#### SYCL:
+- Introduce SYCLHostUSMSpace [\#4268](https://github.com/kokkos/kokkos/pull/4268)
+- Implement SYCL TeamPolicy for vector_size > 1 [\#4183](https://github.com/kokkos/kokkos/pull/4183)
+- Enable 64bit ranges for SYCL [\#4211](https://github.com/kokkos/kokkos/pull/4211)
+- Don't print SYCL device info in execution space intialization [\#4168](https://github.com/kokkos/kokkos/pull/4168)
+- Improve SYCL MDRangePolicy performance [\#4161](https://github.com/kokkos/kokkos/pull/4161)
+- Use sub_groups in SYCL parallel_scan [\#4147](https://github.com/kokkos/kokkos/pull/4147)
+- Implement subgroup reduction for SYCL RangePolicy parallel_reduce [\#3940](https://github.com/kokkos/kokkos/pull/3940)
+- Use DPC++ broadcast extension in SYCL team_broadcast [\#4103](https://github.com/kokkos/kokkos/pull/4103)
+- Only fence in SYCL parallel_reduce for non-device-accessible result_ptr [\#4089](https://github.com/kokkos/kokkos/pull/4089)
+- Improve fencing behavior in SYCL backend [\#4088](https://github.com/kokkos/kokkos/pull/4088)
+- Fence all registered SYCL queues before deallocating memory [\#4086](https://github.com/kokkos/kokkos/pull/4086)
+- Implement SYCL::print_configuration [\#3992](https://github.com/kokkos/kokkos/pull/3992)
+- Reuse scratch memory in parallel_scan and TeamPolicy (decreases memory footprint) [\#3899](https://github.com/kokkos/kokkos/pull/3899) [\#3889](https://github.com/kokkos/kokkos/pull/3889)
+
+#### CUDA:
+- Cuda improve heuristic for blocksize [\#4271](https://github.com/kokkos/kokkos/pull/4271)
+- Don't use [[deprecated]] for nvcc [\#4229](https://github.com/kokkos/kokkos/pull/4229)
+- Improve error message for NVHPC as host compiler [\#4227](https://github.com/kokkos/kokkos/pull/4227)
+- Update support for cuda reductions to work with types < 4bytes [\#4156](https://github.com/kokkos/kokkos/pull/4156)
+- Fix incompatible team size deduction in rare cases parallel_reduce [\#4142](https://github.com/kokkos/kokkos/pull/4142)
+- Remove UVM usage in DynamicView [\#4129](https://github.com/kokkos/kokkos/pull/4129)
+- Remove dependency between core and containers [\#4114](https://github.com/kokkos/kokkos/pull/4114)
+- Adding opt-in CudaMallocSync support when using CUDA version >= 11.2 [\#4026](https://github.com/kokkos/kokkos/pull/4026) [\#4233](https://github.com/kokkos/kokkos/pull/4233)
+- Fix a potential race condition in the CUDA backend [\#3999](https://github.com/kokkos/kokkos/pull/3999)
+
+#### HIP:
+- Implement new blocksize deduction method for HIP Backend [\#3953](https://github.com/kokkos/kokkos/pull/3953)
+- Add multiple LaunchMechanism [\#3820](https://github.com/kokkos/kokkos/pull/3820)
+- Make HIP backend thread-safe [\#4170](https://github.com/kokkos/kokkos/pull/4170)
+
+#### Serial:
+- Refactor Serial backend and fix thread-safety issue [\#4053](https://github.com/kokkos/kokkos/pull/4053)
+
+#### OpenMPTarget:
+- OpenMPTarget: support array reductions in RangePolicy [\#4040](https://github.com/kokkos/kokkos/pull/4040)
+- OpenMPTarget: add MDRange parallel_reduce [\#4032](https://github.com/kokkos/kokkos/pull/4032)
+- OpenMPTarget: Fix bug in for the case of a reducer. [\#4044](https://github.com/kokkos/kokkos/pull/4044)
+- OpenMPTarget: verify process fix [\#4041](https://github.com/kokkos/kokkos/pull/4041)
+
+### Implemented enhancements BuildSystem
+
+#### Important BuildSystem Updates:
+- Use hipcc architecture autodetection when Kokkos_ARCH is not set [\#3941](https://github.com/kokkos/kokkos/pull/3941)
+- Introduce Kokkos_ENABLE_DEPRECATION_WARNINGS and remove deprecated code with Kokkos_ENABLE_DEPRECATED_CODE_3 [\#4106](https://github.com/kokkos/kokkos/pull/4106) [\#3855](https://github.com/kokkos/kokkos/pull/3855)
+
+#### Other Improvements:
+- Add allow-unsupported-compiler flag to nvcc-wrapper [\#4298](https://github.com/kokkos/kokkos/pull/4298)
+- nvcc_wrapper: fix errors in argument handling [\#3993](https://github.com/kokkos/kokkos/pull/3993)
+- Adds support for -time=<file> and -time <file> in nvcc_wrapper [\#4015](https://github.com/kokkos/kokkos/pull/4015)
+- nvcc_wrapper: suppress duplicates of GPU architecture and RDC flags [\#3968](https://github.com/kokkos/kokkos/pull/3968)
+- Fix TMPDIR support in nvcc_wrapper [\#3792](https://github.com/kokkos/kokkos/pull/3792)
+- NVHPC: update PGI compiler arch flags [\#4133](https://github.com/kokkos/kokkos/pull/4133)
+- Replace PGI with NVHPC (works for both) [\#4196](https://github.com/kokkos/kokkos/pull/4196)
+- Make sure that KOKKOS_CXX_HOST_COMPILER_ID is defined [\#4235](https://github.com/kokkos/kokkos/pull/4235)
+- Add options to Makefile builds for deprecated code and warnings [\#4215](https://github.com/kokkos/kokkos/pull/4215)
+- Use KOKKOS_CXX_HOST_COMPILER_ID for identifying CPU arch flags [\#4199](https://github.com/kokkos/kokkos/pull/4199)
+- Added support for Cray Clang to Makefile.kokkos [\#4176](https://github.com/kokkos/kokkos/pull/4176)
+- Add XLClang as compiler [\#4120](https://github.com/kokkos/kokkos/pull/4120)
+- Keep quoted compiler flags when passing to Trilinos [\#3987](https://github.com/kokkos/kokkos/pull/3987)
+- Add support for AMD Zen3 CPU architecture [\#3972](https://github.com/kokkos/kokkos/pull/3972)
+- Rename IntelClang to IntelLLVM [\#3945](https://github.com/kokkos/kokkos/pull/3945)
+- Add cppcoreguidelines-pro-type-cstyle-cast to clang-tidy [\#3522](https://github.com/kokkos/kokkos/pull/3522)
+- Add sve bit size definition for A64FX [\#3947](https://github.com/kokkos/kokkos/pull/3947) [\#3946](https://github.com/kokkos/kokkos/pull/3946)
+- Remove KOKKOS_ENABLE_DEBUG_PRINT_KERNEL_NAMES [\#4150](https://github.com/kokkos/kokkos/pull/4150)
+
+### Other Changes:
+
+#### Tool Enhancements:
+
+- Retrieve original value from a point in a MultidimensionalSparseTuningProblem [\#3977](https://github.com/kokkos/kokkos/pull/3977)
+- Allow extension of built-in tuners with additional tuning axes [\#3961](https://github.com/kokkos/kokkos/pull/3961)
+- Added a categorical tuner [\#3955](https://github.com/kokkos/kokkos/pull/3955)
+
+
+#### Miscellaneous:
+
+- hpcbind: Use double quotes around $@ when invoking user command [\#4284](https://github.com/kokkos/kokkos/pull/4284)
+- Add file and line to error message [\#3985](https://github.com/kokkos/kokkos/pull/3985)
+- Fix compiler warnings when compiling with nvc++ [\#4198](https://github.com/kokkos/kokkos/pull/4198)
+- Add OpenMPTarget CI build on AMD GPUs [\#4055](https://github.com/kokkos/kokkos/pull/4055)
+- CI: icpx is now part of intel container [\#4002](https://github.com/kokkos/kokkos/pull/4002)
+
+### Incompatibilities:
+
+- Remove pre CUDA 9 KOKKOS_IMPL_CUDA_* macros [\#4138](https://github.com/kokkos/kokkos/pull/4138)
+
+### Bug Fixes:
+- UnorderedMap::clear() should zero the size() [\#4130](https://github.com/kokkos/kokkos/pull/4130)
+- Add memory fence for HostSharedPtr::cleanup() [\#4144](https://github.com/kokkos/kokkos/pull/4144)
+- SYCL: Fix race conditions in TeamPolicy::parallel_reduce [\#4418](https://github.com/kokkos/kokkos/pull/4418)
+- Adding missing memory fence to serial exec space fence. [\#4292](https://github.com/kokkos/kokkos/pull/4292)
+- Fix using external SYCL queues in tests [\#4291](https://github.com/kokkos/kokkos/pull/4291)
+- Fix digits10 bug [\#4281](https://github.com/kokkos/kokkos/pull/4281)
+- Fixes constexpr errors with frounding-math on gcc < 10. [\#4278](https://github.com/kokkos/kokkos/pull/4278)
+- Fix compiler flags for PGI/NVHPC [\#4264](https://github.com/kokkos/kokkos/pull/4264)
+- Fix Zen2/3 also implying Zen Arch with Makefiles [\#4260](https://github.com/kokkos/kokkos/pull/4260)
+- Kokkos_Cuda.hpp: Fix shadow warning with cuda/11.0 [\#4252](https://github.com/kokkos/kokkos/pull/4252)
+- Fix issue w/ static initialization of function attributes [\#4242](https://github.com/kokkos/kokkos/pull/4242)
+- Disable long double hypot test on Power systems [\#4221](https://github.com/kokkos/kokkos/pull/4221)
+- Fix false sharing in random pool [\#4218](https://github.com/kokkos/kokkos/pull/4218)
+- Fix a missing memory_fence for debug shared alloc code [\#4216](https://github.com/kokkos/kokkos/pull/4216)
+- Fix two xl issues [\#4179](https://github.com/kokkos/kokkos/pull/4179)
+- Makefile.kokkos: fix (standard_in) 1: syntax error [\#4173](https://github.com/kokkos/kokkos/pull/4173)
+- Fixes for query_device example [\#4172](https://github.com/kokkos/kokkos/pull/4172)
+- Fix a bug when using HIP atomic with Kokkos::Complex [\#4159](https://github.com/kokkos/kokkos/pull/4159)
+- Fix mistaken logic in pthread creation [\#4157](https://github.com/kokkos/kokkos/pull/4157)
+- Define KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION when requesting Kokkos_ENABLE_AGGRESSIVE_VECTORIZATION=ON [\#4107](https://github.com/kokkos/kokkos/pull/4107)
+- Fix compilation with latest MSVC version [\#4102](https://github.com/kokkos/kokkos/pull/4102)
+- Fix incorrect macro definitions when compiling with Intel compiler on Windows [\#4087](https://github.com/kokkos/kokkos/pull/4087)
+- Fixup global buffer overflow in hand rolled string manipulation [\#4070](https://github.com/kokkos/kokkos/pull/4070)
+- Fixup heap buffer overflow in cmd line args parsing unit tests [\#4069](https://github.com/kokkos/kokkos/pull/4069)
+- Only add quotes in compiler flags for Trilinos if necessary [\#4067](https://github.com/kokkos/kokkos/pull/4067)
+- Fixed invocation of tools init callbacks [\#4061](https://github.com/kokkos/kokkos/pull/4061)
+- Work around SYCL JIT compiler issues with static variables [\#4013](https://github.com/kokkos/kokkos/pull/4013)
+- Fix TestDetectionIdiom.cpp test inclusion for Trilinos/TriBITS [\#4010](https://github.com/kokkos/kokkos/pull/4010)
+- Fixup allocation headers with OpenMPTarget backend [\#4003](https://github.com/kokkos/kokkos/pull/4003)
+- Add missing specialization for OMPT to Kokkos Random [\#3967](https://github.com/kokkos/kokkos/pull/3967)
+- Disable hypot long double test on power arches [\#3962](https://github.com/kokkos/kokkos/pull/3962)
+- Use different EBO workaround for MSVC (rebased) [\#3924](https://github.com/kokkos/kokkos/pull/3924)
+- Fix SYCL Kokkos::Profiling::(de)allocateData calls [\#3928](https://github.com/kokkos/kokkos/pull/3928)
+
+## [3.4.01](https://github.com/kokkos/kokkos/tree/3.4.01) (2021-05-19)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.4.00...3.4.01)
+
+**Bug Fixes:**
+- Windows: Remove atomic_compare_exchange_strong overload conflicts with Windows [\#4024](https://github.com/kokkos/kokkos/pull/4024)
+- OpenMPTarget: Fixup allocation headers with OpenMPTarget backend [\#4020](https://github.com/kokkos/kokkos/pull/4020)
+- OpenMPTarget: Add missing specailization for OMPT to Kokkos Random [\#4022](https://github.com/kokkos/kokkos/pull/4022)
+- AMD: Add support for AMD Zen3 CPU architecture [\#4021](https://github.com/kokkos/kokkos/pull/4021)
+- SYCL: Implement SYCL::print_configuration [\#4012](https://github.com/kokkos/kokkos/pull/4012)
+- Containers: staticcsrgraph: use device type instead of execution space to construct views [\#3998](https://github.com/kokkos/kokkos/pull/3998)
+- nvcc_wrapper: fix errors in argument handling, suppress duplicates of GPU architecture and RDC flags [\#4006](https://github.com/kokkos/kokkos/pull/4006)
+- CI: Add icpx testing to intel container [\#4004](https://github.com/kokkos/kokkos/pull/4004)
+- CMake/TRIBITS: Keep quoted compiler flags when passing to Trilinos [\#4007](https://github.com/kokkos/kokkos/pull/4007)
+- CMake: Rename IntelClang to IntelLLVM [\#3945](https://github.com/kokkos/kokkos/pull/3945)
+
+## [3.4.00](https://github.com/kokkos/kokkos/tree/3.4.00) (2021-04-25)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.3.01...3.4.00)
+
+**Highlights:**
+- SYCL Backend Almost Feature Complete
+- OpenMPTarget Backend Almost Feature Complete
+- Performance Improvements for HIP backend
+- Require CMake 3.16 or newer
+- Tool Callback Interface Enhancements
+- cmath wrapper functions available now in Kokkos::Experimental
+
+**Features:**
+- Implement parallel_scan with ThreadVectorRange and Reducer [\#3861](https://github.com/kokkos/kokkos/pull/3861)
+- Implement SYCL Random [\#3849](https://github.com/kokkos/kokkos/pull/3849)
+- OpenMPTarget: Adding Implementation for nested reducers [\#3845](https://github.com/kokkos/kokkos/pull/3845)
+- Implement UniqueToken for SYCL [\#3833](https://github.com/kokkos/kokkos/pull/3833)
+- OpenMPTarget: UniqueToken::Global implementation [\#3823](https://github.com/kokkos/kokkos/pull/3823)
+- DualView sync's on ExecutionSpaces [\#3822](https://github.com/kokkos/kokkos/pull/3822)
+- SYCL outer TeamPolicy parallel_reduce [\#3818](https://github.com/kokkos/kokkos/pull/3818)
+- SYCL TeamPolicy::team_scan [\#3815](https://github.com/kokkos/kokkos/pull/3815)
+- SYCL MDRangePolicy parallel_reduce [\#3801](https://github.com/kokkos/kokkos/pull/3801)
+- Enable use of execution space instances in ScatterView [\#3786](https://github.com/kokkos/kokkos/pull/3786)
+- SYCL TeamPolicy nested parallel_reduce [\#3783](https://github.com/kokkos/kokkos/pull/3783)
+- OpenMPTarget: MDRange with TagType for parallel_for [\#3781](https://github.com/kokkos/kokkos/pull/3781)
+- Adding OpenMPTarget parallel_scan [\#3655](https://github.com/kokkos/kokkos/pull/3655)
+- SYCL basic TeamPolicy [\#3654](https://github.com/kokkos/kokkos/pull/3654)
+- OpenMPTarget: scratch memory implementation [\#3611](https://github.com/kokkos/kokkos/pull/3611)
+
+**Implemented enhancements Backends and Archs:**
+- SYCL choose a specific GPU [\#3918](https://github.com/kokkos/kokkos/pull/3918)
+- [HIP] Lock access to scratch memory when using Teams [\#3916](https://github.com/kokkos/kokkos/pull/3916)
+- [HIP] fix multithreaded access to get_next_driver [\#3908](https://github.com/kokkos/kokkos/pull/3908)
+- Forward declare HIPHostPinnedSpace and SYCLSharedUSMSpace [\#3902](https://github.com/kokkos/kokkos/pull/3902)
+- Let SYCL USMObjectMem use SharedAllocationRecord [\#3898](https://github.com/kokkos/kokkos/pull/3898)
+- Implement clock_tic for SYCL [\#3893](https://github.com/kokkos/kokkos/pull/3893)
+- Don't use a static variable in HIPInternal::scratch_space [\#3866](https://github.com/kokkos/kokkos/pull/3866)(https://github.com/kokkos/kokkos/pull/3866)
+- Reuse memory for SYCL parallel_reduce [\#3873](https://github.com/kokkos/kokkos/pull/3873)
+- Update SYCL compiler in CI [\#3826](https://github.com/kokkos/kokkos/pull/3826)
+- Introduce HostSharedPtr to manage m_space_instance for Cuda/HIP/SYCL [\#3824](https://github.com/kokkos/kokkos/pull/3824)
+- [HIP] Use shuffle for range reduction [\#3811](https://github.com/kokkos/kokkos/pull/3811)
+- OpenMPTarget: Changes to the hierarchical parallelism [\#3808](https://github.com/kokkos/kokkos/pull/3808)
+- Remove ExtendedReferenceWrapper for SYCL parallel_reduce [\#3802](https://github.com/kokkos/kokkos/pull/3802)
+- Eliminate sycl_indirect_launch [\#3777](https://github.com/kokkos/kokkos/pull/3777)
+- OpenMPTarget: scratch implementation for parallel_reduce [\#3776](https://github.com/kokkos/kokkos/pull/3776)
+- Allow initializing SYCL execution space from sycl::queue and SYCL::impl_static_fence [\#3767](https://github.com/kokkos/kokkos/pull/3767)
+- SYCL TeamPolicy scratch memory alternative [\#3763](https://github.com/kokkos/kokkos/pull/3763)
+- Alternative implementation for SYCL TeamPolicy [\#3759](https://github.com/kokkos/kokkos/pull/3759)
+- Unify handling of synchronous errors in SYCL [\#3754](https://github.com/kokkos/kokkos/pull/3754)
+- core/Cuda: Half_t updates for cgsolve [\#3746](https://github.com/kokkos/kokkos/pull/3746)
+- Unify HIPParallelLaunch structures [\#3733](https://github.com/kokkos/kokkos/pull/3733)
+- Improve performance for SYCL parallel_reduce [\#3732](https://github.com/kokkos/kokkos/pull/3732)
+- Use consistent types in Kokkos_OpenMPTarget_Parallel.hpp [\#3703](https://github.com/kokkos/kokkos/pull/3703)
+- Implement non-blocking kernel launches for HIP backend [\#3697](https://github.com/kokkos/kokkos/pull/3697)
+- Change SYCLInternal::m_queue std::unique_ptr -> std::optional [\#3677](https://github.com/kokkos/kokkos/pull/3677)
+- Use alternative SYCL parallel_reduce implementation [\#3671](https://github.com/kokkos/kokkos/pull/3671)
+- Use runtime values in KokkosExp_MDRangePolicy.hpp [\#3626](https://github.com/kokkos/kokkos/pull/3626)
+- Clean up AnalyzePolicy [\#3564](https://github.com/kokkos/kokkos/pull/3564)
+- Changes for indirect launch of SYCL parallel reduce [\#3511](https://github.com/kokkos/kokkos/pull/3511)
+
+**Implemented enhancements BuildSystem:**
+- Also require C++14 when building gtest [\#3912](https://github.com/kokkos/kokkos/pull/3912)
+- Fix compiling SYCL with OpenMP [\#3874](https://github.com/kokkos/kokkos/pull/3874)
+- Require C++17 for SYCL (at configuration time) [\#3869](https://github.com/kokkos/kokkos/pull/3869)
+- Add COMPILE_DEFINITIONS argument to kokkos_create_imported_tpl [\#3862](https://github.com/kokkos/kokkos/pull/3862)
+- Do not pass arch flags to the linker with no rdc [\#3846](https://github.com/kokkos/kokkos/pull/3846)
+- Try compiling C++14 check with C++14 support and print error message [\#3843](https://github.com/kokkos/kokkos/pull/3843)
+- Enable HIP with Cray Clang [\#3842](https://github.com/kokkos/kokkos/pull/3842)
+- Add an option to disable header self containment tests [\#3834](https://github.com/kokkos/kokkos/pull/3834)
+- CMake check for C++14 [\#3809](https://github.com/kokkos/kokkos/pull/3809)
+- Prefer -std=* over --std=* [\#3779](https://github.com/kokkos/kokkos/pull/3779)
+- Kokkos launch compiler updates [\#3778](https://github.com/kokkos/kokkos/pull/3778)
+- Updated comments and enabled no-op for kokkos_launch_compiler [\#3774](https://github.com/kokkos/kokkos/pull/3774)
+- Apple's Clang not correctly recognised [\#3772](https://github.com/kokkos/kokkos/pull/3772)
+- kokkos_launch_compiler + CUDA auto-detect arch [\#3770](https://github.com/kokkos/kokkos/pull/3770)
+- Add Spack test support for Kokkos [\#3753](https://github.com/kokkos/kokkos/pull/3753)
+- Split SYCL tests for aot compilation [\#3741](https://github.com/kokkos/kokkos/pull/3741)
+- Use consistent OpenMP flag for IntelClang [\#3735](https://github.com/kokkos/kokkos/pull/3735)
+- Add support for -Wno-deprecated-gpu-targets [\#3722](https://github.com/kokkos/kokkos/pull/3722)
+- Add configuration to target CUDA compute capability 8.6 [\#3713](https://github.com/kokkos/kokkos/pull/3713)
+- Added VERSION and SOVERSION to KOKKOS_INTERNAL_ADD_LIBRARY [\#3706](https://github.com/kokkos/kokkos/pull/3706)
+- Add fast-math to known NVCC flags [\#3699](https://github.com/kokkos/kokkos/pull/3699)
+- Add MI-100 arch string [\#3698](https://github.com/kokkos/kokkos/pull/3698)
+- Require CMake >=3.16 [\#3679](https://github.com/kokkos/kokkos/pull/3679)
+- KokkosCI.cmake, KokkosCTest.cmake.in, CTestConfig.cmake.in + CI updates [\#2844](https://github.com/kokkos/kokkos/pull/2844)
+
+**Implemented enhancements Tools:**
+- Improve readability of the callback invocation in profiling [\#3860](https://github.com/kokkos/kokkos/pull/3860)
+- V1.1 Tools Interface: incremental, action-based [\#3812](https://github.com/kokkos/kokkos/pull/3812)
+- Enable launch latency simulations [\#3721](https://github.com/kokkos/kokkos/pull/3721)
+- Added metadata callback to tools interface [\#3711](https://github.com/kokkos/kokkos/pull/3711)
+- MDRange Tile Size Tuning [\#3688](https://github.com/kokkos/kokkos/pull/3688)
+- Added support for command-line args for kokkos-tools [\#3627](https://github.com/kokkos/kokkos/pull/3627)
+- Query max tile sizes for an MDRangePolicy, and set tile sizes on an existing policy [\#3481](https://github.com/kokkos/kokkos/pull/3481)
+
+**Implemented enhancements Other:**
+- Try detecting ndevices in get_gpu [\#3921](https://github.com/kokkos/kokkos/pull/3921)
+- Use strcmp to compare names() [\#3909](https://github.com/kokkos/kokkos/pull/3909)
+- Add execution space arguments for constructor overloads that might allocate a new underlying View [\#3904](https://github.com/kokkos/kokkos/pull/3904)
+- Prefix labels in internal use of kokkos_malloc [\#3891](https://github.com/kokkos/kokkos/pull/3891)
+- Prefix labels for internal uses of SharedAllocationRecord [\#3890](https://github.com/kokkos/kokkos/pull/3890)
+- Add missing hypot math function [\#3880](https://github.com/kokkos/kokkos/pull/3880)
+- Unify algorithm unit tests to avoid code duplication [\#3851](https://github.com/kokkos/kokkos/pull/3851)
+- DualView.template view() better matches for Devices in UVMSpace cases [\#3857](https://github.com/kokkos/kokkos/pull/3857)
+- More extensive disentangling of Policy Traits [\#3829](https://github.com/kokkos/kokkos/pull/3829)
+- Replaced nanosleep and sched_yield with STL routines [\#3825](https://github.com/kokkos/kokkos/pull/3825)
+- Constructing Atomic Subviews [\#3810](https://github.com/kokkos/kokkos/pull/3810)
+- Metadata Declaration in Core [\#3729](https://github.com/kokkos/kokkos/pull/3729)
+- Allow using tagged final functor in parallel_reduce [\#3714](https://github.com/kokkos/kokkos/pull/3714)
+- Major duplicate code removal in SharedAllocationRecord specializations [\#3658](https://github.com/kokkos/kokkos/pull/3658)
+
+**Fixed bugs:**
+- Provide forward declarations in Kokkos_ViewLayoutTiled.hpp for XL [\#3911](https://github.com/kokkos/kokkos/pull/3911)
+- Fixup absolute value of floating points in Kokkos complex [\#3882](https://github.com/kokkos/kokkos/pull/3882)
+- Address intel 17 ICE [\#3881](https://github.com/kokkos/kokkos/pull/3881)
+- Add missing pow(Kokkos::complex) overloads [\#3868](https://github.com/kokkos/kokkos/pull/3868)
+- Fix bug {pow, log}(Kokkos::complex) [\#3866](https://github.com/kokkos/kokkos/pull/3866)(https://github.com/kokkos/kokkos/pull/3866)
+- Cleanup writing to output streams in Cuda [\#3859](https://github.com/kokkos/kokkos/pull/3859)
+- Fixup cache CUDA fallback execution space instance used by DualView::sync [\#3856](https://github.com/kokkos/kokkos/pull/3856)
+- Fix cmake warning with pthread [\#3854](https://github.com/kokkos/kokkos/pull/3854)
+- Fix typo FOUND_CUDA_{DRIVVER -> DRIVER} [\#3852](https://github.com/kokkos/kokkos/pull/3852)
+- Fix bug in SYCL team_reduce [\#3848](https://github.com/kokkos/kokkos/pull/3848)
+- Atrocious bug in MDRange tuning [\#3803](https://github.com/kokkos/kokkos/pull/3803)
+- Fix compiling SYCL with Kokkos_ENABLE_TUNING=ON [\#3800](https://github.com/kokkos/kokkos/pull/3800)
+- Fixed command line parsing bug [\#3797](https://github.com/kokkos/kokkos/pull/3797)
+- Workaround race condition in SYCL parallel_reduce [\#3782](https://github.com/kokkos/kokkos/pull/3782)
+- Fix Atomic{Min,Max} for Kepler30 [\#3780](https://github.com/kokkos/kokkos/pull/3780)
+- Fix SYCL typo [\#3755](https://github.com/kokkos/kokkos/pull/3755)
+- Fixed Kokkos_install_additional_files macro [\#3752](https://github.com/kokkos/kokkos/pull/3752)
+- Fix a typo for Kokkos_ARCH_A64FX [\#3751](https://github.com/kokkos/kokkos/pull/3751)
+- OpenMPTarget: fixes and workarounds to work with "Release" build type [\#3748](https://github.com/kokkos/kokkos/pull/3748)
+- Fix parsing bug for number of devices command line argument [\#3724](https://github.com/kokkos/kokkos/pull/3724)
+- Avoid more warnings with clang and C++20 [\#3719](https://github.com/kokkos/kokkos/pull/3719)
+- Fix gcc-10.1 C++20 warnings [\#3718](https://github.com/kokkos/kokkos/pull/3718)
+- Fix cuda cache config not being set correct [\#3712](https://github.com/kokkos/kokkos/pull/3712)
+- Fix dualview deepcopy perftools [\#3701](https://github.com/kokkos/kokkos/pull/3701)
+- use drand instead of frand in drand [\#3696](https://github.com/kokkos/kokkos/pull/3696)
+
+**Incompatibilities:**
+- Remove unimplemented member functions of SYCLDevice [\#3919](https://github.com/kokkos/kokkos/pull/3919)
+- Replace cl::sycl [\#3896](https://github.com/kokkos/kokkos/pull/3896)
+- Get rid of SYCL workaround in Kokkos_Complex.hpp [\#3884](https://github.com/kokkos/kokkos/pull/3884)
+- Replace most uses of if_c [\#3883](https://github.com/kokkos/kokkos/pull/3883)
+- Remove Impl::enable_if_type [\#3863](https://github.com/kokkos/kokkos/pull/3863)
+- Remove HostBarrier test [\#3847](https://github.com/kokkos/kokkos/pull/3847)
+- Avoid (void) interface [\#3836](https://github.com/kokkos/kokkos/pull/3836)
+- Remove VerifyExecutionCanAccessMemorySpace [\#3813](https://github.com/kokkos/kokkos/pull/3813)
+- Avoid duplicated code in ScratchMemorySpace [\#3793](https://github.com/kokkos/kokkos/pull/3793)
+- Remove superfluous FunctorFinal specialization [\#3788](https://github.com/kokkos/kokkos/pull/3788)
+- Rename cl::sycl -> sycl in Kokkos_MathematicalFunctions.hpp [\#3678](https://github.com/kokkos/kokkos/pull/3678)
+- Remove integer_sequence backward compatibility implementation [\#3533](https://github.com/kokkos/kokkos/pull/3533)
+
+**Enabled tests:**
+- Fixup re-enable core performance tests [\#3903](https://github.com/kokkos/kokkos/pull/3903)
+- Enable more SYCL tests [\#3900](https://github.com/kokkos/kokkos/pull/3900)
+- Restrict MDRange Policy tests for Intel GPUs [\#3853](https://github.com/kokkos/kokkos/pull/3853)
+- Disable death tests for rawhide [\#3844](https://github.com/kokkos/kokkos/pull/3844)
+- OpenMPTarget: Block unit tests that do not pass with the nvidia compiler [\#3839](https://github.com/kokkos/kokkos/pull/3839)
+- Enable Bitset container test for SYCL [\#3830](https://github.com/kokkos/kokkos/pull/3830)
+- Enable some more SYCL tests [\#3744](https://github.com/kokkos/kokkos/pull/3744)
+- Enable SYCL atomic tests [\#3742](https://github.com/kokkos/kokkos/pull/3742)
+- Enable more SYCL perf_tests [\#3692](https://github.com/kokkos/kokkos/pull/3692)
+- Enable examples for SYCL [\#3691](https://github.com/kokkos/kokkos/pull/3691)
+
+## [3.3.01](https://github.com/kokkos/kokkos/tree/3.3.01) (2021-01-06)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.3.00...3.3.01)
+
+**Bug Fixes:**
+- Fix severe performance bug in DualView which added memcpys for sync and modify [\#3693](https://github.com/kokkos/kokkos/issues/#3693)
+- Fix performance bug in CUDA backend, where the cuda Cache config was not set correct.
+
+## [3.3.00](https://github.com/kokkos/kokkos/tree/3.3.00) (2020-12-16)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.2.01...3.3.00)
+
+**Features:**
+- Require C++14 as minimum C++ standard. C++17 and C++20 are supported too.
+- HIP backend is nearly feature complete. Kokkos Dynamic Task Graphs are missing.
+- Major update for OpenMPTarget: many capabilities now work. For details contact us.
+- Added DPC++/SYCL backend: primary capabilites are working.
+- Added Kokkos Graph API analogous to CUDA Graphs.
+- Added parallel_scan support with TeamThreadRange [\#3536](https://github.com/kokkos/kokkos/pull/3536)
+- Added Logical Memory Spaces [\#3546](https://github.com/kokkos/kokkos/pull/3546)
+- Added initial half precision support [\#3439](https://github.com/kokkos/kokkos/pull/3439)
+- Experimental feature: control cuda occupancy [\#3379](https://github.com/kokkos/kokkos/pull/3379)
+
+**Implemented enhancements Backends and Archs:**
+- Add a64fx and fujitsu Compiler support [\#3614](https://github.com/kokkos/kokkos/pull/3614)
+- Adding support for AMD gfx908 archictecture [\#3375](https://github.com/kokkos/kokkos/pull/3375)
+- SYCL parallel\_for MDRangePolicy [\#3583](https://github.com/kokkos/kokkos/pull/3583)
+- SYCL add parallel\_scan [\#3577](https://github.com/kokkos/kokkos/pull/3577)
+- SYCL custom reductions [\#3544](https://github.com/kokkos/kokkos/pull/3544)
+- SYCL Enable container unit tests [\#3550](https://github.com/kokkos/kokkos/pull/3550)
+- SYCL feature level 5 [\#3480](https://github.com/kokkos/kokkos/pull/3480)
+- SYCL Feature level 4 (parallel\_for) [\#3474](https://github.com/kokkos/kokkos/pull/3474)
+- SYCL feature level 3 [\#3451](https://github.com/kokkos/kokkos/pull/3451)
+- SYCL feature level 2 [\#3447](https://github.com/kokkos/kokkos/pull/3447)
+- OpenMPTarget: Hierarchial reduction for + operator on scalars [\#3504](https://github.com/kokkos/kokkos/pull/3504)
+- OpenMPTarget hierarchical [\#3411](https://github.com/kokkos/kokkos/pull/3411)
+- HIP Add Impl::atomic\_[store,load] [\#3440](https://github.com/kokkos/kokkos/pull/3440)
+- HIP enable global lock arrays [\#3418](https://github.com/kokkos/kokkos/pull/3418)
+- HIP Implement multiple occupancy paths for various HIP kernel launchers [\#3366](https://github.com/kokkos/kokkos/pull/3366)
+
+**Implemented enhancements Policies:**
+- MDRangePolicy: Let it be semiregular [\#3494](https://github.com/kokkos/kokkos/pull/3494)
+- MDRangePolicy: Check narrowing conversion in construction [\#3527](https://github.com/kokkos/kokkos/pull/3527)
+- MDRangePolicy: CombinedReducers support [\#3395](https://github.com/kokkos/kokkos/pull/3395)
+- Kokkos Graph: Interface and Default Implementation [\#3362](https://github.com/kokkos/kokkos/pull/3362)
+- Kokkos Graph: add Cuda Graph implementation [\#3369](https://github.com/kokkos/kokkos/pull/3369)
+- TeamPolicy: implemented autotuning of team sizes and vector lengths [\#3206](https://github.com/kokkos/kokkos/pull/3206)
+- RangePolicy: Initialize all data members in default constructor [\#3509](https://github.com/kokkos/kokkos/pull/3509)
+
+**Implemented enhancements BuildSystem:**
+- Auto-generate core test files for all backends [\#3488](https://github.com/kokkos/kokkos/pull/3488)
+- Avoid rewriting test files when calling cmake [\#3548](https://github.com/kokkos/kokkos/pull/3548)
+- RULE\_LAUNCH\_COMPILE and RULE\_LAUNCH\_LINK system for nvcc\_wrapper [\#3136](https://github.com/kokkos/kokkos/pull/3136)
+- Adding -include as a known argument to nvcc\_wrapper [\#3434](https://github.com/kokkos/kokkos/pull/3434)
+- Install hpcbind script [\#3402](https://github.com/kokkos/kokkos/pull/3402)
+- cmake/kokkos\_tribits.cmake: add parsing for args [\#3457](https://github.com/kokkos/kokkos/pull/3457)
+
+**Implemented enhancements Tools:**
+- Changed namespacing of Kokkos::Tools::Impl::Impl::tune\_policy [\#3455](https://github.com/kokkos/kokkos/pull/3455)
+- Delegate to an impl allocate/deallocate method to allow specifying a SpaceHandle for MemorySpaces [\#3530](https://github.com/kokkos/kokkos/pull/3530)
+- Use the Kokkos Profiling interface rather than the Impl interface [\#3518](https://github.com/kokkos/kokkos/pull/3518)
+- Runtime option for tuning [\#3459](https://github.com/kokkos/kokkos/pull/3459)
+- Dual View Tool Events [\#3326](https://github.com/kokkos/kokkos/pull/3326)
+
+**Implemented enhancements Other:**
+- Abort on errors instead of just printing [\#3528](https://github.com/kokkos/kokkos/pull/3528)
+- Enable C++14 macros unconditionally [\#3449](https://github.com/kokkos/kokkos/pull/3449)
+- Make ViewMapping trivially copyable [\#3436](https://github.com/kokkos/kokkos/pull/3436)
+- Rename struct ViewMapping to class [\#3435](https://github.com/kokkos/kokkos/pull/3435)
+- Replace enums in Kokkos\_ViewMapping.hpp (removes -Wextra) [\#3422](https://github.com/kokkos/kokkos/pull/3422)
+- Use bool for enums representing bools [\#3416](https://github.com/kokkos/kokkos/pull/3416)
+- Fence active instead of default execution space instances [\#3388](https://github.com/kokkos/kokkos/pull/3388)
+- Refactor parallel\_reduce fence usage [\#3359](https://github.com/kokkos/kokkos/pull/3359)
+- Moved Space EBO helpers to Kokkos\_EBO [\#3357](https://github.com/kokkos/kokkos/pull/3357)
+- Add remove\_cvref type trait [\#3340](https://github.com/kokkos/kokkos/pull/3340)
+- Adding identity type traits and update definition of identity\_t alias [\#3339](https://github.com/kokkos/kokkos/pull/3339)
+- Add is\_specialization\_of type trait [\#3338](https://github.com/kokkos/kokkos/pull/3338)
+- Make ScratchMemorySpace semi-regular [\#3309](https://github.com/kokkos/kokkos/pull/3309)
+- Optimize min/max atomics with early exit on no-op case [\#3265](https://github.com/kokkos/kokkos/pull/3265)
+- Refactor Backend Development [\#2941](https://github.com/kokkos/kokkos/pull/2941)
+
+**Fixed bugs:**
+- Fixup MDRangePolicy construction from Kokkos arrays [\#3591](https://github.com/kokkos/kokkos/pull/3591)
+- Add atomic functions for unsigned long long using gcc built-in [\#3588](https://github.com/kokkos/kokkos/pull/3588)
+- Fixup silent pointless comparison with zero in checked\_narrow\_cast (compiler workaround) [\#3566](https://github.com/kokkos/kokkos/pull/3566)
+- Fixes for ROCm 3.9 [\#3565](https://github.com/kokkos/kokkos/pull/3565)
+- Fix windows build issues which crept in for the CUDA build [\#3532](https://github.com/kokkos/kokkos/pull/3532)
+- HIP Fix atomics of large data types and clean up lock arrays [\#3529](https://github.com/kokkos/kokkos/pull/3529)
+- Pthreads fix exception resulting from 0 grain size [\#3510](https://github.com/kokkos/kokkos/pull/3510)
+- Fixup do not require atomic operation to be default constructible [\#3503](https://github.com/kokkos/kokkos/pull/3503)
+- Fix race condition in HIP backend [\#3467](https://github.com/kokkos/kokkos/pull/3467)
+- Replace KOKKOS\_DEBUG with KOKKOS\_ENABLE\_DEBUG [\#3458](https://github.com/kokkos/kokkos/pull/3458)
+- Fix multi-stream team scratch space definition for HIP [\#3398](https://github.com/kokkos/kokkos/pull/3398)
+- HIP fix template deduction [\#3393](https://github.com/kokkos/kokkos/pull/3393)
+- Fix compiling with HIP and C++17 [\#3390](https://github.com/kokkos/kokkos/pull/3390)
+- Fix sigFPE in HIP blocksize deduction [\#3378](https://github.com/kokkos/kokkos/pull/3378)
+- Type alias change: replace CS with CTS to avoid conflicts with NVSHMEM [\#3348](https://github.com/kokkos/kokkos/pull/3348)
+- Clang compilation of CUDA backend on Windows [\#3345](https://github.com/kokkos/kokkos/pull/3345)
+- Fix HBW support [\#3343](https://github.com/kokkos/kokkos/pull/3343)
+- Added missing fences to unique token [\#3260](https://github.com/kokkos/kokkos/pull/3260)
+
+**Incompatibilities:**
+- Remove unused utilities (forward, move, and expand\_variadic) from Kokkos::Impl [\#3535](https://github.com/kokkos/kokkos/pull/3535)
+- Remove unused traits [\#3534](https://github.com/kokkos/kokkos/pull/3534)
+- HIP: Remove old HCC code [\#3301](https://github.com/kokkos/kokkos/pull/3301)
+- Prepare for deprecation of ViewAllocateWithoutInitializing [\#3264](https://github.com/kokkos/kokkos/pull/3264)
+- Remove ROCm backend [\#3148](https://github.com/kokkos/kokkos/pull/3148)
+
+## [3.2.01](https://github.com/kokkos/kokkos/tree/3.2.01) (2020-11-17)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.2.00...3.2.01)
+
+**Fixed bugs:**
+- Disallow KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE in shared library builds [\#3332](https://github.com/kokkos/kokkos/pull/3332)
+- Do not install libprinter-tool when testing is enabled [\#3313](https://github.com/kokkos/kokkos/pull/3313)
+- Fix restrict/alignment following refactor [\#3373](https://github.com/kokkos/kokkos/pull/3373)
+ - Intel fix: workaround compiler issue with using statement [\#3383](https://github.com/kokkos/kokkos/pull/3383)
+- Fix zero-length reductions [#\3364](https://github.com/kokkos/kokkos/pull/3364)
+ - Pthread zero-length reduction fix [\#3452](https://github.com/kokkos/kokkos/pull/3452)
+ - HPX zero-length reduction fix [\#3470](https://github.com/kokkos/kokkos/pull/3470)
+ - cuda/9.2 zero-length reduction fix [\#3580](https://github.com/kokkos/kokkos/pull/3580)
+- Fix multi-stream scratch [#\3269](https://github.com/kokkos/kokkos/pull/3269)
+- Guard KOKKOS_ALL_COMPILE_OPTIONS if Cuda is not enabled [\#3387](https://github.com/kokkos/kokkos/pull/3387)
+- Do not include link flags for Fortran linkage [\#3384](https://github.com/kokkos/kokkos/pull/3384)
+- Fix NVIDIA GPU arch macro with autodetection [\#3473](https://github.com/kokkos/kokkos/pull/3473)
+- Fix libdl/test issues with Trilinos [\#3543](https://github.com/kokkos/kokkos/pull/3543)
+ - Register Pthread as Tribits option to be enabled with Trilinos [\#3558](https://github.com/kokkos/kokkos/pull/3558)
+
+**Implemented enhancements:**
+- Separate Cuda timing-based tests into their own executable [\#3407](https://github.com/kokkos/kokkos/pull/3407)
+
+## [3.2.00](https://github.com/kokkos/kokkos/tree/3.2.00) (2020-08-19)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.1.01...3.2.00)
+
+**Implemented enhancements:**
+
+- HIP:Enable stream in HIP [\#3163](https://github.com/kokkos/kokkos/issues/3163)
+- HIP:Add support for shuffle reduction for the HIP backend [\#3154](https://github.com/kokkos/kokkos/issues/3154)
+- HIP:Add implementations of missing HIPHostPinnedSpace methods for LAMMPS [\#3137](https://github.com/kokkos/kokkos/issues/3137)
+- HIP:Require HIP 3.5.0 or higher [\#3099](https://github.com/kokkos/kokkos/issues/3099)
+- HIP:WorkGraphPolicy for HIP [\#3096](https://github.com/kokkos/kokkos/issues/3096)
+- OpenMPTarget: Significant update to the new experimental backend. Requires C++17, works on Intel GPUs, reference counting fixes. [\#3169](https://github.com/kokkos/kokkos/issues/3169)
+- Windows Cuda support [\#3018](https://github.com/kokkos/kokkos/issues/3018)
+- Pass `-Wext-lambda-captures-this` to NVCC when support for `__host__ __device__` lambda is enabled from CUDA 11 [\#3241](https://github.com/kokkos/kokkos/issues/3241)
+- Use explicit staging buffer for constant memory kernel launches and cleanup host/device synchronization [\#3234](https://github.com/kokkos/kokkos/issues/3234)
+- Various fixup to policies including making TeamPolicy default constructible and making RangePolicy and TeamPolicy assignable: [\#3202](https://github.com/kokkos/kokkos/issues/3202) , [\#3203](https://github.com/kokkos/kokkos/issues/3203) , [\#3196](https://github.com/kokkos/kokkos/issues/3196)
+- Annotations for `DefaultExectutionSpace` and `DefaultHostExectutionSpace` to use in static analysis [\#3189](https://github.com/kokkos/kokkos/issues/3189)
+- Add documentation on using Spack to install Kokkos and developing packages that depend on Kokkos [\#3187](https://github.com/kokkos/kokkos/issues/3187)
+- Add OpenMPTarget backend flags for NVC++ compiler [\#3185](https://github.com/kokkos/kokkos/issues/3185)
+- Move deep\_copy/create\_mirror\_view on Experimental::OffsetView into Kokkos:: namespace [\#3166](https://github.com/kokkos/kokkos/issues/3166)
+- Allow for larger block size in HIP [\#3165](https://github.com/kokkos/kokkos/issues/3165)
+- View: Added names of Views to the different View initialize/free kernels [\#3159](https://github.com/kokkos/kokkos/issues/3159)
+- Cuda: Caching cudaFunctorAttributes and whether L1/Shmem prefer was set [\#3151](https://github.com/kokkos/kokkos/issues/3151)
+- BuildSystem: Improved performance in default configuration by defaulting to Release build [\#3131](https://github.com/kokkos/kokkos/issues/3131)
+- Cuda: Update CUDA occupancy calculation [\#3124](https://github.com/kokkos/kokkos/issues/3124)
+- Vector: Adding data() to Vector [\#3123](https://github.com/kokkos/kokkos/issues/3123)
+- BuildSystem: Add CUDA Ampere configuration support [\#3122](https://github.com/kokkos/kokkos/issues/3122)
+- General: Apply [[noreturn]] to Kokkos::abort when applicable [\#3106](https://github.com/kokkos/kokkos/issues/3106)
+- TeamPolicy: Validate storage level argument passed to TeamPolicy::set\_scratch\_size() [\#3098](https://github.com/kokkos/kokkos/issues/3098)
+- BuildSystem: Make kokkos\_has\_string() function in Makefile.kokkos case insensitive [\#3091](https://github.com/kokkos/kokkos/issues/3091)
+- Modify KOKKOS\_FUNCTION macro for clang-tidy analysis [\#3087](https://github.com/kokkos/kokkos/issues/3087)
+- Move allocation profiling to allocate/deallocate calls [\#3084](https://github.com/kokkos/kokkos/issues/3084)
+- BuildSystem: FATAL\_ERROR when attempting in-source build [\#3082](https://github.com/kokkos/kokkos/issues/3082)
+- Change enums in ScatterView to types [\#3076](https://github.com/kokkos/kokkos/issues/3076)
+- HIP: Changes for new compiler/runtime [\#3067](https://github.com/kokkos/kokkos/issues/3067)
+- Extract and use get\_gpu [\#3061](https://github.com/kokkos/kokkos/issues/3061) , [\#3048](https://github.com/kokkos/kokkos/issues/3048)
+- Add is\_allocated to View-like containers [\#3059](https://github.com/kokkos/kokkos/issues/3059)
+- Combined reducers for scalar references [\#3052](https://github.com/kokkos/kokkos/issues/3052)
+- Add configurable capacity for UniqueToken [\#3051](https://github.com/kokkos/kokkos/issues/3051)
+- Add installation testing [\#3034](https://github.com/kokkos/kokkos/issues/3034)
+- HIP: Add UniqueToken [\#3020](https://github.com/kokkos/kokkos/issues/3020)
+- Autodetect number of devices [\#3013](https://github.com/kokkos/kokkos/issues/3013)
+
+
+**Fixed bugs:**
+
+- Check error code from `cudaStreamSynchronize` in CUDA fences [\#3255](https://github.com/kokkos/kokkos/issues/3255)
+- Fix issue with C++ standard flags when using `nvcc\_wrapper` with PGI [\#3254](https://github.com/kokkos/kokkos/issues/3254)
+- Add missing threadfence in lock-based atomics [\#3208](https://github.com/kokkos/kokkos/issues/3208)
+- Fix dedup of linker flags for shared lib on CMake <=3.12 [\#3176](https://github.com/kokkos/kokkos/issues/3176)
+- Fix memory leak with CUDA streams [\#3170](https://github.com/kokkos/kokkos/issues/3170)
+- BuildSystem: Fix OpenMP Target flags for Cray [\#3161](https://github.com/kokkos/kokkos/issues/3161)
+- ScatterView: fix for OpenmpTarget remove inheritance from reducers [\#3162](https://github.com/kokkos/kokkos/issues/3162)
+- BuildSystem: Set OpenMP flags according to host compiler [\#3127](https://github.com/kokkos/kokkos/issues/3127)
+- OpenMP: Fix logic for nested omp in partition\_master bug [\#3101](https://github.com/kokkos/kokkos/issues/3101)
+- nvcc\_wrapper: send --cudart to nvcc instead of host compiler [\#3092](https://github.com/kokkos/kokkos/issues/3092)
+- BuildSystem: Fixes for Cuda/11 and c++17 [\#3085](https://github.com/kokkos/kokkos/issues/3085)
+- HIP: Fix print\_configuration [\#3080](https://github.com/kokkos/kokkos/issues/3080)
+- Conditionally define get\_gpu [\#3072](https://github.com/kokkos/kokkos/issues/3072)
+- Fix bounds for ranges in random number generator [\#3069](https://github.com/kokkos/kokkos/issues/3069)
+- Fix Cuda minor arch check [\#3035](https://github.com/kokkos/kokkos/issues/3035)
+- BuildSystem: Add -expt-relaxed-constexpr flag to nvcc\_wrapper [\#3021](https://github.com/kokkos/kokkos/issues/3021)
+
+**Incompatibilities:**
+
+- Remove ETI support [\#3157](https://github.com/kokkos/kokkos/issues/3157)
+- Remove KOKKOS\_INTERNAL\_ENABLE\_NON\_CUDA\_BACKEND [\#3147](https://github.com/kokkos/kokkos/issues/3147)
+- Remove core/unit\_test/config [\#3146](https://github.com/kokkos/kokkos/issues/3146)
+- Removed the preprocessor branch for KOKKOS\_ENABLE\_PROFILING [\#3115](https://github.com/kokkos/kokkos/issues/3115)
+- Disable profiling with MSVC [\#3066](https://github.com/kokkos/kokkos/issues/3066)
+
+**Closed issues:**
+
+- Silent error (Validate storage level arg to set_scratch_size) [\#3097](https://github.com/kokkos/kokkos/issues/3097)
+- Remove KOKKOS\_ENABLE\_PROFILING Option [\#3095](https://github.com/kokkos/kokkos/issues/3095)
+- Cuda 11 -\> allow C++17 [\#3083](https://github.com/kokkos/kokkos/issues/3083)
+- In source build failure not explained [\#3081](https://github.com/kokkos/kokkos/issues/3081)
+- Allow naming of Views for initialization kernel [\#3070](https://github.com/kokkos/kokkos/issues/3070)
+- DefaultInit tests failing when using CTest resource allocation feature [\#3040](https://github.com/kokkos/kokkos/issues/3040)
+- Add installation testing. [\#3037](https://github.com/kokkos/kokkos/issues/3037)
+- nvcc\_wrapper needs to handle `-expt-relaxed-constexpr` flag [\#3017](https://github.com/kokkos/kokkos/issues/3017)
+- CPU core oversubscription warning on macOS with OpenMP backend [\#2996](https://github.com/kokkos/kokkos/issues/2996)
+- Default behavior of KOKKOS\_NUM\_DEVICES to use all devices available [\#2975](https://github.com/kokkos/kokkos/issues/2975)
+- Assert blocksize \> 0 [\#2974](https://github.com/kokkos/kokkos/issues/2974)
+- Add ability to assign kokkos profile function from executable [\#2973](https://github.com/kokkos/kokkos/issues/2973)
+- ScatterView Support for the pre/post increment operator [\#2967](https://github.com/kokkos/kokkos/issues/2967)
+
+- Compiler issue: Cuda build with clang 10 has errors with the atomic unit tests [\#3237](https://github.com/kokkos/kokkos/issues/3237)
+- Incompatibility of flags for C++ standard with PGI v20.4 on Power9/NVIDIA V100 system [\#3252](https://github.com/kokkos/kokkos/issues/3252)
+- Error configuring as subproject [\#3140](https://github.com/kokkos/kokkos/issues/3140)
+- CMake fails with Nvidia compilers when the GPU architecture option is not supplied (Fix configure with OMPT and Cuda) [\#3207](https://github.com/kokkos/kokkos/issues/3207)
+- PGI compiler being passed the gcc -fopenmp flag [\#3125](https://github.com/kokkos/kokkos/issues/3125)
+- Cuda: Memory leak when using CUDA stream [\#3167](https://github.com/kokkos/kokkos/issues/3167)
+- RangePolicy has an implicitly deleted assignment operator [\#3192](https://github.com/kokkos/kokkos/issues/3192)
+- MemorySpace::allocate needs to have memory pool counting. [\#3064](https://github.com/kokkos/kokkos/issues/3064)
+- Missing write fence for lock based atomics on CUDA [\#3038](https://github.com/kokkos/kokkos/issues/3038)
+- CUDA compute capability version check problem [\#3026](https://github.com/kokkos/kokkos/issues/3026)
+- Make DynRankView fencing consistent [\#3014](https://github.com/kokkos/kokkos/issues/3014)
+- nvcc\_wrapper cant handle -Xcompiler -o out.o [\#2993](https://github.com/kokkos/kokkos/issues/2993)
+- Reductions of non-trivial types of size 4 fail in CUDA shfl operations [\#2990](https://github.com/kokkos/kokkos/issues/2990)
+- complex\_double misalignment in reduce, clang+CUDA [\#2989](https://github.com/kokkos/kokkos/issues/2989)
+- Span of degenerated \(zero-length\) subviews is not zero in some special cases [\#2979](https://github.com/kokkos/kokkos/issues/2979)
+- Rank 1 custom layouts dont work as expected. [\#2840](https://github.com/kokkos/kokkos/issues/2840)
+
+## [3.1.01](https://github.com/kokkos/kokkos/tree/3.1.1) (2020-04-14)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.1.00...3.1.1)
+
+**Fixed bugs:**
+
+- Fix complex_double misalignment in reduce, clang+CUDA [\#2989](https://github.com/kokkos/kokkos/issues/2989)
+- Fix compilation fails when profiling disabled and CUDA enabled [\#3001](https://github.com/kokkos/kokkos/issues/3001)
+- Fix cuda reduction of non-trivial scalars of size 4 [\#2990](https://github.com/kokkos/kokkos/issues/2990)
+- Configure and install version file when building in Trilinos [\#2957](https://github.com/kokkos/kokkos/pull/2957)
+- Fix OpenMPTarget build missing include and namespace [\#3000](https://github.com/kokkos/kokkos/issues/3000)
+- fix typo in KOKKOS_SET_EXE_PROPERTY() [\#2959](https://github.com/kokkos/kokkos/issues/2959)
+- Fix non-zero span subviews of zero sized subviews [\#2979](https://github.com/kokkos/kokkos/issues/2979)
+
+## [3.1.00](https://github.com/kokkos/kokkos/tree/3.1.00) (2020-04-14)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/3.0.00...3.1.00)
+
+**Features:**
+
+- HIP Support for AMD
+- OpenMPTarget Support with clang
+- Windows VS19 (Serial) Support [\#1533](https://github.com/kokkos/kokkos/issues/1533)
+
+**Implemented enhancements:**
+
+- generate\_makefile.bash should allow tests to be disabled [\#2886](https://github.com/kokkos/kokkos/issues/2886)
+- clang/7+cuda/9 build -Werror-unused parameter error in nightly test [\#2884](https://github.com/kokkos/kokkos/issues/2884)
+- ScatterView memory space is not user settable [\#2826](https://github.com/kokkos/kokkos/issues/2826)
+- clang/8+cuda/10.0 build error with c++17 [\#2809](https://github.com/kokkos/kokkos/issues/2809)
+- warnings.... [\#2805](https://github.com/kokkos/kokkos/issues/2805)
+- Kokkos version in cpp define [\#2787](https://github.com/kokkos/kokkos/issues/2787)
+- Remove Defunct QThreads Backend [\#2751](https://github.com/kokkos/kokkos/issues/2751)
+- Improve Kokkos::fence behavior with multiple execution spaces [\#2659](https://github.com/kokkos/kokkos/issues/2659)
+- polylithic\(?\) initialization of Kokkos [\#2658](https://github.com/kokkos/kokkos/issues/2658)
+- Unnecessary\(?\) check for host execution space initialization from Cuda initialization [\#2652](https://github.com/kokkos/kokkos/issues/2652)
+- Kokkos error reporting failures with CUDA GPUs in exclusive mode [\#2471](https://github.com/kokkos/kokkos/issues/2471)
+- atomicMax equivalent \(and other atomics\) [\#2401](https://github.com/kokkos/kokkos/issues/2401)
+- Fix alignment for Kokkos::complex [\#2255](https://github.com/kokkos/kokkos/issues/2255)
+- Warnings with Cuda 10.1 [\#2206](https://github.com/kokkos/kokkos/issues/2206)
+- dual view with Kokkos::ViewAllocateWithoutInitializing [\#2188](https://github.com/kokkos/kokkos/issues/2188)
+- Check error code from cudaOccupancyMaxActiveBlocksPerMultiprocessor [\#2172](https://github.com/kokkos/kokkos/issues/2172)
+- Add non-member Kokkos::resize/realloc for DualView [\#2170](https://github.com/kokkos/kokkos/issues/2170)
+- Construct DualView without initialization [\#2046](https://github.com/kokkos/kokkos/issues/2046)
+- Expose is\_assignable to determine if one view can be assigned to another [\#1936](https://github.com/kokkos/kokkos/issues/1936)
+- profiling label [\#1935](https://github.com/kokkos/kokkos/issues/1935)
+- team\_broadcast of bool failed on CUDA backend [\#1908](https://github.com/kokkos/kokkos/issues/1908)
+- View static\_extent [\#660](https://github.com/kokkos/kokkos/issues/660)
+- Misleading Kokkos::Cuda::initialize ERROR message when compiled for wrong GPU architecture [\#1944](https://github.com/kokkos/kokkos/issues/1944)
+- Cryptic Error When Malloc Fails [\#2164](https://github.com/kokkos/kokkos/issues/2164)
+- Drop support for intermediate standards in CMake [\#2336](https://github.com/kokkos/kokkos/issues/2336)
+
+**Fixed bugs:**
+
+- DualView sync\_device with length zero creates cuda errors [\#2946](https://github.com/kokkos/kokkos/issues/2946)
+- building with nvcc and clang \(or clang based XL\) as host compiler: "Kokkos::atomic\_fetch\_min\(volatile int \*, int\)" has already been defined [\#2903](https://github.com/kokkos/kokkos/issues/2903)
+- Cuda 9.1,10.1 debug builds failing due to -Werror=unused-parameter [\#2880](https://github.com/kokkos/kokkos/issues/2880)
+- clang -Werror: Kokkos\_FixedBufferMemoryPool.hpp:140:28: error: unused parameter 'alloc\_size' [\#2869](https://github.com/kokkos/kokkos/issues/2869)
+- intel/16.0.1, intel/17.0.1 nightly build failures with debugging enabled [\#2867](https://github.com/kokkos/kokkos/issues/2867)
+- intel/16.0.1 debug build errors [\#2863](https://github.com/kokkos/kokkos/issues/2863)
+- xl/16.1.1 with cpp14, openmp build, nightly test failures [\#2856](https://github.com/kokkos/kokkos/issues/2856)
+- Intel nightly test failures: team\_vector [\#2852](https://github.com/kokkos/kokkos/issues/2852)
+- Kokkos Views with intmax/2\<N\<intmax can hang during construction [\#2850](https://github.com/kokkos/kokkos/issues/2850)
+- workgraph\_fib test seg-faults with threads backend and hwloc [\#2797](https://github.com/kokkos/kokkos/issues/2797)
+- cuda.view\_64bit test hangs on Power8+Kepler37 system - develop and 2.9.00 branches [\#2771](https://github.com/kokkos/kokkos/issues/2771)
+- device\_type for Kokkos\_Random ? [\#2693](https://github.com/kokkos/kokkos/issues/2693)
+- "More than one tag given" error in Experimental::require\(\) [\#2608](https://github.com/kokkos/kokkos/issues/2608)
+- Segfault on Marvell from our finalization stack [\#2542](https://github.com/kokkos/kokkos/issues/2542)
+
+## [3.0.00](https://github.com/kokkos/kokkos/tree/3.0.00) (2020-01-27)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.9.00...3.0.00)
+
+**Implemented enhancements:**
+
+- BuildSystem: Standalone Modern CMake Support [\#2104](https://github.com/kokkos/kokkos/issues/2104)
+- StyleFormat: ClangFormat Style [\#2157](https://github.com/kokkos/kokkos/issues/2157)
+- Documentation: Document build system and CMake philosophy [\#2263](https://github.com/kokkos/kokkos/issues/2263)
+- BuildSystem: Add Alias with Namespace Kokkos:: to Interal Libraries [\#2530](https://github.com/kokkos/kokkos/issues/2530)
+- BuildSystem: Universal Kokkos find\_package [\#2099](https://github.com/kokkos/kokkos/issues/2099)
+- BuildSystem: Dropping support for Kokkos\_{DEVICES,OPTIONS,ARCH} in CMake [\#2329](https://github.com/kokkos/kokkos/issues/2329)
+- BuildSystem: Set Kokkos\_DEVICES and Kokkos\_ARCH variables in exported CMake configuration [\#2193](https://github.com/kokkos/kokkos/issues/2193)
+- BuildSystem: Drop support for CUDA 7 and CUDA 8 [\#2489](https://github.com/kokkos/kokkos/issues/2489)
+- BuildSystem: Drop CMake option SEPARATE\_TESTS [\#2266](https://github.com/kokkos/kokkos/issues/2266)
+- BuildSystem: Support expt-relaxed-constexpr same as expt-extended-lambda [\#2411](https://github.com/kokkos/kokkos/issues/2411)
+- BuildSystem: Add Xnvlink to command line options allowed in nvcc\_wrapper [\#2197](https://github.com/kokkos/kokkos/issues/2197)
+- BuildSystem: Install Kokkos config files and target files to lib/cmake/Kokkos [\#2162](https://github.com/kokkos/kokkos/issues/2162)
+- BuildSystem: nvcc\_wrappers and c++ 14 [\#2035](https://github.com/kokkos/kokkos/issues/2035)
+- BuildSystem: Kokkos version major/version minor \(Feature request\) [\#1930](https://github.com/kokkos/kokkos/issues/1930)
+- BuildSystem: CMake namespaces \(and other modern cmake cleanup\) [\#1924](https://github.com/kokkos/kokkos/issues/1924)
+- BuildSystem: Remove capability to install Kokkos via GNU Makefiles [\#2332](https://github.com/kokkos/kokkos/issues/2332)
+- Documentation: Remove PDF ProgrammingGuide in Kokkos replace with link [\#2244](https://github.com/kokkos/kokkos/issues/2244)
+- View: Add Method to Resize View without Initialization [\#2048](https://github.com/kokkos/kokkos/issues/2048)
+- Vector: implement “insert” method for Kokkos\_Vector \(as a serial function on host\) [\#2437](https://github.com/kokkos/kokkos/issues/2437)
+
+**Fixed bugs:**
+
+- ParallelScan: Kokkos::parallel\scan fix race condition seen in inter-block fence [\#2681](https://github.com/kokkos/kokkos/issues/2681)
+- OffsetView: Kokkos::OffsetView missing constructor which takes pointer [\#2247](https://github.com/kokkos/kokkos/issues/2247)
+- OffsetView: Kokkos::OffsetView: allow offset=0 [\#2246](https://github.com/kokkos/kokkos/issues/2246)
+- DeepCopy: Missing DeepCopy instrumentation in Kokkos [\#2522](https://github.com/kokkos/kokkos/issues/2522)
+- nvcc\_wrapper: --host-only fails with multiple -W\* flags [\#2484](https://github.com/kokkos/kokkos/issues/2484)
+- nvcc\_wrapper: taking first -std option is counterintuitive [\#2553](https://github.com/kokkos/kokkos/issues/2553)
+- Subview: Error taking subviews of views with static_extents of min rank [\#2448](https://github.com/kokkos/kokkos/issues/2448)
+- TeamPolicy: reducers with valuetypes without += broken on CUDA [\#2410](https://github.com/kokkos/kokkos/issues/2410)
+- Libs: Fix inconsistency of Kokkos library names in Kokkos and Trilinos [\#1902](https://github.com/kokkos/kokkos/issues/1902)
+- Complex: operator\>\> for complex\<T\> uses std::ostream, not std::istream [\#2313](https://github.com/kokkos/kokkos/issues/2313)
+- Macros: Restrict not honored for non-intel compilers [\#1922](https://github.com/kokkos/kokkos/issues/1922)
+
+
+## [2.9.00](https://github.com/kokkos/kokkos/tree/2.9.00) (2019-06-24)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.8.00...2.9.00)
+
+**Implemented enhancements:**
+
+- Capability: CUDA Streams [\#1723](https://github.com/kokkos/kokkos/issues/1723)
+- Capability: CUDA Stream support for parallel\_reduce [\#2061](https://github.com/kokkos/kokkos/issues/2061)
+- Capability: Feature Request: TeamVectorRange [\#713](https://github.com/kokkos/kokkos/issues/713)
+- Capability: Adding HPX backend [\#2080](https://github.com/kokkos/kokkos/issues/2080)
+- Capability: TaskScheduler to have multiple queues [\#565](https://github.com/kokkos/kokkos/issues/565)
+- Capability: Support for additional reductions in ScatterView [\#1674](https://github.com/kokkos/kokkos/issues/1674)
+- Capability: Request: deep\_copy within parallel regions [\#689](https://github.com/kokkos/kokkos/issues/689)
+- Capability: Feature Request: `create\_mirror\_view\_without\_initializing` [\#1765](https://github.com/kokkos/kokkos/issues/1765)
+- View: Use SFINAE to restrict possible View type conversions [\#2127](https://github.com/kokkos/kokkos/issues/2127)
+- Deprecation: Deprecate ExecutionSpace::fence\(\) as static function and make it non-static [\#2140](https://github.com/kokkos/kokkos/issues/2140)
+- Deprecation: Deprecate LayoutTileLeft [\#2122](https://github.com/kokkos/kokkos/issues/2122)
+- Macros: KOKKOS\_RESTRICT defined for non-Intel compilers [\#2038](https://github.com/kokkos/kokkos/issues/2038)
+
+**Fixed bugs:**
+
+- Cuda: TeamThreadRange loop count on device is passed by reference to host static constexpr [\#1733](https://github.com/kokkos/kokkos/issues/1733)
+- Cuda: Build error with relocatable device code with CUDA 10.1 GCC 7.3 [\#2134](https://github.com/kokkos/kokkos/issues/2134)
+- Cuda: cudaFuncSetCacheConfig is setting CachePreferShared too often [\#2066](https://github.com/kokkos/kokkos/issues/2066)
+- Cuda: TeamPolicy doesn't throw then created with non-viable vector length and also doesn't backscale to viable one [\#2020](https://github.com/kokkos/kokkos/issues/2020)
+- Cuda: cudaMemcpy error for large league sizes on V100 [\#1991](https://github.com/kokkos/kokkos/issues/1991)
+- Cuda: illegal warp sync in parallel\_reduce by functor on Turing 75 [\#1958](https://github.com/kokkos/kokkos/issues/1958)
+- TeamThreadRange: Inconsistent results from TeamThreadRange reduction [\#1905](https://github.com/kokkos/kokkos/issues/1905)
+- Atomics: atomic\_fetch\_oper & atomic\_oper\_fetch don't build for complex\<float\> [\#1964](https://github.com/kokkos/kokkos/issues/1964)
+- Views: Kokkos randomread Views leak memory [\#2155](https://github.com/kokkos/kokkos/issues/2155)
+- ScatterView: LayoutLeft overload currently non-functional [\#2165](https://github.com/kokkos/kokkos/issues/2165)
+- KNL: With intel 17.2.174 illegal instruction in random number test [\#2078](https://github.com/kokkos/kokkos/issues/2078)
+- Bitset: Enable copy constructor on device [\#2094](https://github.com/kokkos/kokkos/issues/2094)
+- Examples: do not compile due to template deduction error \(multi\_fem\) [\#1928](https://github.com/kokkos/kokkos/issues/1928)
+
+## [2.8.00](https://github.com/kokkos/kokkos/tree/2.8.00) (2019-02-05)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.7.24...2.8.00)
+
+**Implemented enhancements:**
+
+- Capability, Tests: C++14 support and testing [\#1914](https://github.com/kokkos/kokkos/issues/1914)
+- Capability: Add environment variables for all command line arguments [\#1798](https://github.com/kokkos/kokkos/issues/1798)
+- Capability: --kokkos-ndevices not working for Slurm [\#1920](https://github.com/kokkos/kokkos/issues/1920)
+- View: Undefined behavior when deep copying from and to an empty unmanaged view [\#1967](https://github.com/kokkos/kokkos/issues/1967)
+- BuildSystem: nvcc\_wrapper should stop immediately if nvcc is not in PATH [\#1861](https://github.com/kokkos/kokkos/issues/1861)
+
+**Fixed bugs:**
+
+- Cuda: Fix Volta Issues 1 Non-deterministic behavior on Volta, runs fine on Pascal [\#1949](https://github.com/kokkos/kokkos/issues/1949)
+- Cuda: Fix Volta Issues 2 CUDA Team Scan gives wrong values on Volta with -G compile flag [\#1942](https://github.com/kokkos/kokkos/issues/1942)
+- Cuda: illegal warp sync in parallel\_reduce by functor on Turing 75 [\#1958](https://github.com/kokkos/kokkos/issues/1958)
+- Threads: Pthreads backend does not handle RangePolicy with offset correctly [\#1976](https://github.com/kokkos/kokkos/issues/1976)
+- Atomics: atomic\_fetch\_oper has no case for Kokkos::complex\<double\> or other 16-byte types [\#1951](https://github.com/kokkos/kokkos/issues/1951)
+- MDRangePolicy: Fix zero-length range [\#1948](https://github.com/kokkos/kokkos/issues/1948)
+- TeamThreadRange: TeamThreadRange MaxLoc reduce doesnt compile [\#1909](https://github.com/kokkos/kokkos/issues/1909)
+
+## [2.7.24](https://github.com/kokkos/kokkos/tree/2.7.24) (2018-11-04)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.7.00...2.7.24)
+
+**Implemented enhancements:**
+
+- DualView: Add non-templated functions for sync, need\_sync, view, modify [\#1858](https://github.com/kokkos/kokkos/issues/1858)
+- DualView: Avoid needlessly allocates and initializes modify\_host and modify\_device flag views [\#1831](https://github.com/kokkos/kokkos/issues/1831)
+- DualView: Incorrect deduction of "not device type" [\#1659](https://github.com/kokkos/kokkos/issues/1659)
+- BuildSystem: Add KOKKOS\_ENABLE\_CXX14 and KOKKOS\_ENABLE\_CXX17 [\#1602](https://github.com/kokkos/kokkos/issues/1602)
+- BuildSystem: Installed kokkos\_generated\_settings.cmake contains build directories instead of install directories [\#1838](https://github.com/kokkos/kokkos/issues/1838)
+- BuildSystem: KOKKOS\_ARCH: add ticks to printout of improper arch setting [\#1649](https://github.com/kokkos/kokkos/issues/1649)
+- BuildSystem: Make core/src/Makefile for Cuda use needed nvcc\_wrapper [\#1296](https://github.com/kokkos/kokkos/issues/1296)
+- Build: Support PGI as host compiler for NVCC [\#1828](https://github.com/kokkos/kokkos/issues/1828)
+- Build: Many Warnings Fixed e.g.[\#1786](https://github.com/kokkos/kokkos/issues/1786)
+- Capability: OffsetView with non-zero begin index [\#567](https://github.com/kokkos/kokkos/issues/567)
+- Capability: Reductions into device side view [\#1788](https://github.com/kokkos/kokkos/issues/1788)
+- Capability: Add max\_size to Kokkos::Array [\#1760](https://github.com/kokkos/kokkos/issues/1760)
+- Capability: View Assignment: LayoutStride -\> LayoutLeft and LayoutStride -\> LayoutRight [\#1594](https://github.com/kokkos/kokkos/issues/1594)
+- Capability: Atomic function allow implicit conversion of update argument [\#1571](https://github.com/kokkos/kokkos/issues/1571)
+- Capability: Add team\_size\_max with tagged functors [\#663](https://github.com/kokkos/kokkos/issues/663)
+- Capability: Fix allignment of views from Kokkos\_ScratchSpace should use different alignment [\#1700](https://github.com/kokkos/kokkos/issues/1700)
+- Capabilitiy: create\_mirror\_view\_and\_copy for DynRankView [\#1651](https://github.com/kokkos/kokkos/issues/1651)
+- Capability: DeepCopy HBWSpace / HostSpace [\#548](https://github.com/kokkos/kokkos/issues/548)
+- ROCm: support team vector scan [\#1645](https://github.com/kokkos/kokkos/issues/1645)
+- ROCm: Merge from rocm-hackathon2 [\#1636](https://github.com/kokkos/kokkos/issues/1636)
+- ROCm: Add ParallelScanWithTotal [\#1611](https://github.com/kokkos/kokkos/issues/1611)
+- ROCm: Implement MDRange in ROCm [\#1314](https://github.com/kokkos/kokkos/issues/1314)
+- ROCm: Implement Reducers for Nested Parallelism Levels [\#963](https://github.com/kokkos/kokkos/issues/963)
+- ROCm: Add asynchronous deep copy [\#959](https://github.com/kokkos/kokkos/issues/959)
+- Tests: Memory pool test seems to allocate 8GB [\#1830](https://github.com/kokkos/kokkos/issues/1830)
+- Tests: Add unit\_test for team\_broadcast [\#734](https://github.com/kokkos/kokkos/issues/734)
+
+**Fixed bugs:**
+
+- BuildSystem: Makefile.kokkos gets gcc-toolchain wrong if gcc is cached [\#1841](https://github.com/kokkos/kokkos/issues/1841)
+- BuildSystem: kokkos\_generated\_settings.cmake placement is inconsistent [\#1771](https://github.com/kokkos/kokkos/issues/1771)
+- BuildSystem: Invalid escape sequence \. in kokkos\_functions.cmake [\#1661](https://github.com/kokkos/kokkos/issues/1661)
+- BuildSystem: Problem in Kokkos generated cmake file [\#1770](https://github.com/kokkos/kokkos/issues/1770)
+- BuildSystem: invalid file names on windows [\#1671](https://github.com/kokkos/kokkos/issues/1671)
+- Tests: reducers min/max\_loc test fails randomly due to multiple min values and thus multiple valid locations [\#1681](https://github.com/kokkos/kokkos/issues/1681)
+- Tests: cuda.scatterview unit test causes "Bus error" when force\_uvm and enable\_lambda are enabled [\#1852](https://github.com/kokkos/kokkos/issues/1852)
+- Tests: cuda.cxx11 unit test fails when force\_uvm and enable\_lambda are enabled [\#1850](https://github.com/kokkos/kokkos/issues/1850)
+- Tests: threads.reduce\_device\_view\_range\_policy failing with Cuda/8.0.44 and RDC [\#1836](https://github.com/kokkos/kokkos/issues/1836)
+- Build: compile error when compiling Kokkos with hwloc 2.0.1 \(on OSX 10.12.6, with g++ 7.2.0\) [\#1506](https://github.com/kokkos/kokkos/issues/1506)
+- Build: dual\_view.view broken with UVM [\#1834](https://github.com/kokkos/kokkos/issues/1834)
+- Build: White cuda/9.2 + gcc/7.2 warnings triggering errors [\#1833](https://github.com/kokkos/kokkos/issues/1833)
+- Build: warning: enum constant in boolean context [\#1813](https://github.com/kokkos/kokkos/issues/1813)
+- Capability: Fix overly conservative max\_team\_size thingy [\#1808](https://github.com/kokkos/kokkos/issues/1808)
+- DynRankView: Ctors taking ViewAllocateWithoutInitializing broken [\#1783](https://github.com/kokkos/kokkos/issues/1783)
+- Cuda: Apollo cuda.team\_broadcast test fail with clang-6.0 [\#1762](https://github.com/kokkos/kokkos/issues/1762)
+- Cuda: Clang spurious test failure in impl\_view\_accessible [\#1753](https://github.com/kokkos/kokkos/issues/1753)
+- Cuda: Kokkos::complex\<double\> atomic deadlocks with Clang 6 Cuda build with -O0 [\#1752](https://github.com/kokkos/kokkos/issues/1752)
+- Cuda: LayoutStride Test fails for UVM as default memory space [\#1688](https://github.com/kokkos/kokkos/issues/1688)
+- Cuda: Scan wrong values on Volta [\#1676](https://github.com/kokkos/kokkos/issues/1676)
+- Cuda: Kokkos::deep\_copy error with CudaUVM and Kokkos::Serial spaces [\#1652](https://github.com/kokkos/kokkos/issues/1652)
+- Cuda: cudaErrorInvalidConfiguration with debug build [\#1647](https://github.com/kokkos/kokkos/issues/1647)
+- Cuda: parallel\_for with TeamPolicy::team\_size\_recommended with launch bounds not working -- reported by Daniel Holladay [\#1283](https://github.com/kokkos/kokkos/issues/1283)
+- Cuda: Using KOKKOS\_CLASS\_LAMBDA in a class with Kokkos::Random\_XorShift64\_Pool member data [\#1696](https://github.com/kokkos/kokkos/issues/1696)
+- Long Build Times on Darwin [\#1721](https://github.com/kokkos/kokkos/issues/1721)
+- Capability: Typo in Kokkos\_Sort.hpp - BinOp3D - wrong comparison [\#1720](https://github.com/kokkos/kokkos/issues/1720)
+- Buffer overflow in SharedAllocationRecord in Kokkos\_HostSpace.cpp [\#1673](https://github.com/kokkos/kokkos/issues/1673)
+- Serial unit test failure [\#1632](https://github.com/kokkos/kokkos/issues/1632)
+
+## [2.7.00](https://github.com/kokkos/kokkos/tree/2.7.00) (2018-05-24)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.6.00...2.7.00)
+
+**Part of the Kokkos C++ Performance Portability Programming EcoSystem 2.7**
+
+**Implemented enhancements:**
+
+- Deprecate team\_size auto adjusting to maximal value possible [\#1618](https://github.com/kokkos/kokkos/issues/1618)
+- DynamicView - remove restrictions to std::is\_trivial types and value\_type is power of two [\#1586](https://github.com/kokkos/kokkos/issues/1586)
+- Kokkos::StaticCrsGraph does not propagate memory traits \(e.g., Unmanaged\) [\#1581](https://github.com/kokkos/kokkos/issues/1581)
+- Adding ETI for DeepCopy / ViewFill etc. [\#1578](https://github.com/kokkos/kokkos/issues/1578)
+- Deprecate all the left over KOKKOS\_HAVE\_ Macros and Kokkos\_OldMacros.hpp [\#1572](https://github.com/kokkos/kokkos/issues/1572)
+- Error if Kokkos\_ARCH set in CMake [\#1555](https://github.com/kokkos/kokkos/issues/1555)
+- Deprecate ExecSpace::initialize / ExecSpace::finalize [\#1532](https://github.com/kokkos/kokkos/issues/1532)
+- New API for TeamPolicy property setting [\#1531](https://github.com/kokkos/kokkos/issues/1531)
+- clang 6.0 + cuda debug out-of-memory test failure [\#1521](https://github.com/kokkos/kokkos/issues/1521)
+- Cuda UniqueToken interface not consistent with other backends [\#1505](https://github.com/kokkos/kokkos/issues/1505)
+- Move Reducers out of Experimental namespace [\#1494](https://github.com/kokkos/kokkos/issues/1494)
+- Provide scope guard for initialize/finalize [\#1479](https://github.com/kokkos/kokkos/issues/1479)
+- Check Kokkos::is\_initialized in SharedAllocationRecord dtor [\#1465](https://github.com/kokkos/kokkos/issues/1465)
+- Remove static list of allocations [\#1464](https://github.com/kokkos/kokkos/issues/1464)
+- Makefiles: Support single compile/link line use case [\#1402](https://github.com/kokkos/kokkos/issues/1402)
+- ThreadVectorRange with a range [\#1400](https://github.com/kokkos/kokkos/issues/1400)
+- Exclusive scan + last value API [\#1358](https://github.com/kokkos/kokkos/issues/1358)
+- Install kokkos\_generated\_settings.cmake [\#1348](https://github.com/kokkos/kokkos/issues/1348)
+- Kokkos arrays \(not views!\) don't do bounds checking in debug mode [\#1342](https://github.com/kokkos/kokkos/issues/1342)
+- Expose round-robin GPU assignment outside of initialize\(int, char\*\*\) [\#1318](https://github.com/kokkos/kokkos/issues/1318)
+- DynamicView misses use\_count and label function [\#1298](https://github.com/kokkos/kokkos/issues/1298)
+- View constructor should check arguments [\#1286](https://github.com/kokkos/kokkos/issues/1286)
+- False Positive on Oversubscription Warning [\#1207](https://github.com/kokkos/kokkos/issues/1207)
+- Allow \(require\) execution space for 1st arg of VerifyExecutionCanAccessMemorySpace [\#1192](https://github.com/kokkos/kokkos/issues/1192)
+- ROCm: Add ROCmHostPinnedSpace [\#958](https://github.com/kokkos/kokkos/issues/958)
+- power of two functions [\#656](https://github.com/kokkos/kokkos/issues/656)
+- CUDA 8 has 64bit \_\_shfl [\#361](https://github.com/kokkos/kokkos/issues/361)
+- Add TriBITS/CMake configure information about node types [\#243](https://github.com/kokkos/kokkos/issues/243)
+
+**Fixed bugs:**
+
+- CUDA atomic\_fetch\_sub for doubles is hitting CAS instead of intrinsic [\#1624](https://github.com/kokkos/kokkos/issues/1624)
+- Bug: use of ballot on Volta [\#1612](https://github.com/kokkos/kokkos/issues/1612)
+- Kokkos::deep\_copy memory access failures [\#1583](https://github.com/kokkos/kokkos/issues/1583)
+- g++ -std option doubly set for cmake project [\#1548](https://github.com/kokkos/kokkos/issues/1548)
+- ViewFill for 1D Views of larger 32bit entries fails [\#1541](https://github.com/kokkos/kokkos/issues/1541)
+- CUDA Volta another warpsync bug [\#1520](https://github.com/kokkos/kokkos/issues/1520)
+- triple\_nested\_parallelism fails with KOKKOS\_DEBUG and CUDA [\#1513](https://github.com/kokkos/kokkos/issues/1513)
+- Jenkins errors in Kokkos\_SharedAlloc.cpp with debug build [\#1511](https://github.com/kokkos/kokkos/issues/1511)
+- Kokkos::Sort out-of-bounds with empty bins [\#1504](https://github.com/kokkos/kokkos/issues/1504)
+- Get rid of deprecated functions inside Kokkos [\#1484](https://github.com/kokkos/kokkos/issues/1484)
+- get\_work\_partition casts int64\_t to int, causing a seg fault [\#1481](https://github.com/kokkos/kokkos/issues/1481)
+- NVCC bug with \_\_device\_\_ on defaulted function [\#1470](https://github.com/kokkos/kokkos/issues/1470)
+- CMake example broken with CUDA backend [\#1468](https://github.com/kokkos/kokkos/issues/1468)
+
+
+## [2.6.00](https://github.com/kokkos/kokkos/tree/2.6.00) (2018-03-07)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.5.00...2.6.00)
+
+**Part of the Kokkos C++ Performance Portability Programming EcoSystem 2.6**
+
+**Implemented enhancements:**
+
+- Support NVIDIA Volta microarchitecture [\#1466](https://github.com/kokkos/kokkos/issues/1466)
+- Kokkos - Define empty functions when profiling disabled [\#1424](https://github.com/kokkos/kokkos/issues/1424)
+- Don't use \_\_constant\_\_ cache for lock arrays, enable once per run update instead of once per call [\#1385](https://github.com/kokkos/kokkos/issues/1385)
+- task dag enhancement. [\#1354](https://github.com/kokkos/kokkos/issues/1354)
+- Cuda task team collectives and stack size [\#1353](https://github.com/kokkos/kokkos/issues/1353)
+- Replace View operator acceptance of more than rank integers with 'access' function [\#1333](https://github.com/kokkos/kokkos/issues/1333)
+- Interoperability: Do not shut down backend execution space runtimes upon calling finalize. [\#1305](https://github.com/kokkos/kokkos/issues/1305)
+- shmem\_size for LayoutStride [\#1291](https://github.com/kokkos/kokkos/issues/1291)
+- Kokkos::resize performs poorly on 1D Views [\#1270](https://github.com/kokkos/kokkos/issues/1270)
+- stride\(\) is inconsistent with dimension\(\), extent\(\), etc. [\#1214](https://github.com/kokkos/kokkos/issues/1214)
+- Kokkos::sort defaults to std::sort on host [\#1208](https://github.com/kokkos/kokkos/issues/1208)
+- DynamicView with host size grow [\#1206](https://github.com/kokkos/kokkos/issues/1206)
+- Unmanaged View with Anonymous Memory Space [\#1175](https://github.com/kokkos/kokkos/issues/1175)
+- Sort subset of Kokkos::DynamicView [\#1160](https://github.com/kokkos/kokkos/issues/1160)
+- MDRange policy doesn't support lambda reductions [\#1054](https://github.com/kokkos/kokkos/issues/1054)
+- Add ability to set hook on Kokkos::finalize [\#714](https://github.com/kokkos/kokkos/issues/714)
+- Atomics with Serial Backend - Default should be Disable? [\#549](https://github.com/kokkos/kokkos/issues/549)
+- KOKKOS\_ENABLE\_DEPRECATED\_CODE [\#1359](https://github.com/kokkos/kokkos/issues/1359)
+
+**Fixed bugs:**
+
+- cuda\_internal\_maximum\_warp\_count returns 8, but I believe it should return 16 for P100 [\#1269](https://github.com/kokkos/kokkos/issues/1269)
+- Cuda: level 1 scratch memory bug \(reported by Stan Moore\) [\#1434](https://github.com/kokkos/kokkos/issues/1434)
+- MDRangePolicy Reduction requires value\_type typedef in Functor [\#1379](https://github.com/kokkos/kokkos/issues/1379)
+- Kokkos DeepCopy between empty views fails [\#1369](https://github.com/kokkos/kokkos/issues/1369)
+- Several issues with new CMake build infrastructure \(reported by Eric Phipps\) [\#1365](https://github.com/kokkos/kokkos/issues/1365)
+- deep\_copy between rank-1 host/device views of differing layouts without UVM no longer works \(reported by Eric Phipps\) [\#1363](https://github.com/kokkos/kokkos/issues/1363)
+- Profiling can't be disabled in CMake, and a parallel\_for is missing for tasks \(reported by Kyungjoo Kim\) [\#1349](https://github.com/kokkos/kokkos/issues/1349)
+- get\_work\_partition int overflow \(reported by berryj5\) [\#1327](https://github.com/kokkos/kokkos/issues/1327)
+- Kokkos::deep\_copy must fence even if the two views are the same [\#1303](https://github.com/kokkos/kokkos/issues/1303)
+- CudaUVMSpace::allocate/deallocate must fence [\#1302](https://github.com/kokkos/kokkos/issues/1302)
+- ViewResize on CUDA fails in Debug because of too many resources requested [\#1299](https://github.com/kokkos/kokkos/issues/1299)
+- Cuda 9 and intrepid2 calls from Panzer. [\#1183](https://github.com/kokkos/kokkos/issues/1183)
+- Slowdown due to tracking\_enabled\(\) in 2.04.00 \(found by Albany app\) [\#1016](https://github.com/kokkos/kokkos/issues/1016)
+- Bounds checking fails with zero-span Views \(reported by Stan Moore\) [\#1411](https://github.com/kokkos/kokkos/issues/1411)
+
+
+## [2.5.00](https://github.com/kokkos/kokkos/tree/2.5.00) (2017-12-15)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.04.11...2.5.00)
+
+**Part of the Kokkos C++ Performance Portability Programming EcoSystem 2.5**
+
+**Implemented enhancements:**
+
+- Provide Makefile.kokkos logic for CMake and TriBITS [\#878](https://github.com/kokkos/kokkos/issues/878)
+- Add Scatter View [\#825](https://github.com/kokkos/kokkos/issues/825)
+- Drop gcc 4.7 and intel 14 from supported compiler list [\#603](https://github.com/kokkos/kokkos/issues/603)
+- Enable construction of unmanaged view using common\_view\_alloc\_prop [\#1170](https://github.com/kokkos/kokkos/issues/1170)
+- Unused Function Warning with XL [\#1267](https://github.com/kokkos/kokkos/issues/1267)
+- Add memory pool parameter check [\#1218](https://github.com/kokkos/kokkos/issues/1218)
+- CUDA9: Fix warning for unsupported long double [\#1189](https://github.com/kokkos/kokkos/issues/1189)
+- CUDA9: fix warning on defaulted function marking [\#1188](https://github.com/kokkos/kokkos/issues/1188)
+- CUDA9: fix warnings for deprecated warp level functions [\#1187](https://github.com/kokkos/kokkos/issues/1187)
+- Add CUDA 9.0 nightly testing [\#1174](https://github.com/kokkos/kokkos/issues/1174)
+- {OMPI,MPICH}\_CXX hack breaks nvcc\_wrapper use case [\#1166](https://github.com/kokkos/kokkos/issues/1166)
+- KOKKOS\_HAVE\_CUDA\_LAMBDA became KOKKOS\_CUDA\_USE\_LAMBDA [\#1274](https://github.com/kokkos/kokkos/issues/1274)
+
+**Fixed bugs:**
+
+- MinMax Reducer with tagged operator doesn't compile [\#1251](https://github.com/kokkos/kokkos/issues/1251)
+- Reducers for Tagged operators give wrong answer [\#1250](https://github.com/kokkos/kokkos/issues/1250)
+- Kokkos not Compatible with Big Endian Machines? [\#1235](https://github.com/kokkos/kokkos/issues/1235)
+- Parallel Scan hangs forever on BG/Q [\#1234](https://github.com/kokkos/kokkos/issues/1234)
+- Threads backend doesn't compile with Clang on OS X [\#1232](https://github.com/kokkos/kokkos/issues/1232)
+- $\(shell date\) needs quote [\#1264](https://github.com/kokkos/kokkos/issues/1264)
+- Unqualified parallel\_for call conflicts with user-defined parallel\_for [\#1219](https://github.com/kokkos/kokkos/issues/1219)
+- KokkosAlgorithms: CMake issue in unit tests [\#1212](https://github.com/kokkos/kokkos/issues/1212)
+- Intel 18 Error: "simd pragma has been deprecated" [\#1210](https://github.com/kokkos/kokkos/issues/1210)
+- Memory leak in Kokkos::initialize [\#1194](https://github.com/kokkos/kokkos/issues/1194)
+- CUDA9: compiler error with static assert template arguments [\#1190](https://github.com/kokkos/kokkos/issues/1190)
+- Kokkos::Serial::is\_initialized returns always true [\#1184](https://github.com/kokkos/kokkos/issues/1184)
+- Triple nested parallelism still fails on bowman [\#1093](https://github.com/kokkos/kokkos/issues/1093)
+- OpenMP openmp.range on Develop Runs Forever on POWER7+ with RHEL7 and GCC4.8.5 [\#995](https://github.com/kokkos/kokkos/issues/995)
+- Rendezvous performance at global scope [\#985](https://github.com/kokkos/kokkos/issues/985)
+
+
+## [2.04.11](https://github.com/kokkos/kokkos/tree/2.04.11) (2017-10-28)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.04.04...2.04.11)
+
+**Implemented enhancements:**
+
+- Add Subview pattern. [\#648](https://github.com/kokkos/kokkos/issues/648)
+- Add Kokkos "global" is\_initialized [\#1060](https://github.com/kokkos/kokkos/issues/1060)
+- Add create\_mirror\_view\_and\_copy [\#1161](https://github.com/kokkos/kokkos/issues/1161)
+- Add KokkosConcepts SpaceAccessibility function [\#1092](https://github.com/kokkos/kokkos/issues/1092)
+- Option to Disable Initialize Warnings [\#1142](https://github.com/kokkos/kokkos/issues/1142)
+- Mature task-DAG capability [\#320](https://github.com/kokkos/kokkos/issues/320)
+- Promote Work DAG from experimental [\#1126](https://github.com/kokkos/kokkos/issues/1126)
+- Implement new WorkGraph push/pop [\#1108](https://github.com/kokkos/kokkos/issues/1108)
+- Kokkos\_ENABLE\_Cuda\_Lambda should default ON [\#1101](https://github.com/kokkos/kokkos/issues/1101)
+- Add multidimensional parallel for example and improve unit test [\#1064](https://github.com/kokkos/kokkos/issues/1064)
+- Fix ROCm: Performance tests not building [\#1038](https://github.com/kokkos/kokkos/issues/1038)
+- Make KOKKOS\_ALIGN\_SIZE a configure-time option [\#1004](https://github.com/kokkos/kokkos/issues/1004)
+- Make alignment consistent [\#809](https://github.com/kokkos/kokkos/issues/809)
+- Improve subview construction on Cuda backend [\#615](https://github.com/kokkos/kokkos/issues/615)
+
+**Fixed bugs:**
+
+- Kokkos::vector fixes for application [\#1134](https://github.com/kokkos/kokkos/issues/1134)
+- DynamicView non-power of two value\_type [\#1177](https://github.com/kokkos/kokkos/issues/1177)
+- Memory pool bug [\#1154](https://github.com/kokkos/kokkos/issues/1154)
+- Cuda launch bounds performance regression bug [\#1140](https://github.com/kokkos/kokkos/issues/1140)
+- Significant performance regression in LAMMPS after updating Kokkos [\#1139](https://github.com/kokkos/kokkos/issues/1139)
+- CUDA compile error [\#1128](https://github.com/kokkos/kokkos/issues/1128)
+- MDRangePolicy neg idx test failure in debug mode [\#1113](https://github.com/kokkos/kokkos/issues/1113)
+- subview construction on Cuda backend [\#615](https://github.com/kokkos/kokkos/issues/615)
+
+## [2.04.04](https://github.com/kokkos/kokkos/tree/2.04.04) (2017-09-11)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.04.00...2.04.04)
+
+**Implemented enhancements:**
+
+- OpenMP partition: set number of threads on nested level [\#1082](https://github.com/kokkos/kokkos/issues/1082)
+- Add StaticCrsGraph row\(\) method [\#1071](https://github.com/kokkos/kokkos/issues/1071)
+- Enhance Kokkos complex operator overloading [\#1052](https://github.com/kokkos/kokkos/issues/1052)
+- Tell Trilinos packages about host+device lambda [\#1019](https://github.com/kokkos/kokkos/issues/1019)
+- Function markup for defaulted class members [\#952](https://github.com/kokkos/kokkos/issues/952)
+- Add deterministic random number generator [\#857](https://github.com/kokkos/kokkos/issues/857)
+
+**Fixed bugs:**
+
+- Fix reduction\_identity\<T\>::max for floating point numbers [\#1048](https://github.com/kokkos/kokkos/issues/1048)
+- Fix MD iteration policy ignores lower bound on GPUs [\#1041](https://github.com/kokkos/kokkos/issues/1041)
+- (Experimental) HBWSpace Linking issues in KokkosKernels [\#1094](https://github.com/kokkos/kokkos/issues/1094)
+- (Experimental) ROCm: algorithms/unit\_tests test\_sort failing with segfault [\#1070](https://github.com/kokkos/kokkos/issues/1070)
+
+## [2.04.00](https://github.com/kokkos/kokkos/tree/2.04.00) (2017-08-16)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.03.13...2.04.00)
+
+**Implemented enhancements:**
+
+- Added ROCm backend to support AMD GPUs
+- Kokkos::complex\<T\> behaves slightly differently from std::complex\<T\> [\#1011](https://github.com/kokkos/kokkos/issues/1011)
+- Kokkos::Experimental::Crs constructor arguments were in the wrong order [\#992](https://github.com/kokkos/kokkos/issues/992)
+- Work graph construction ease-of-use (one lambda for count and fill) [\#991](https://github.com/kokkos/kokkos/issues/991)
+- when\_all returns pointer of futures (improved interface) [\#990](https://github.com/kokkos/kokkos/issues/990)
+- Allow assignment of LayoutLeft to LayoutRight or vice versa for rank-0 Views [\#594](https://github.com/kokkos/kokkos/issues/594)
+- Changed the meaning of Kokkos\_ENABLE\_CXX11\_DISPATCH\_LAMBDA [\#1035](https://github.com/kokkos/kokkos/issues/1035)
+
+**Fixed bugs:**
+
+- memory pool default constructor does not properly set member variables. [\#1007](https://github.com/kokkos/kokkos/issues/1007)
+
+## [2.03.13](https://github.com/kokkos/kokkos/tree/2.03.13) (2017-07-27)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.03.05...2.03.13)
+
+**Implemented enhancements:**
+
+- Disallow enabling both OpenMP and Threads in the same executable [\#406](https://github.com/kokkos/kokkos/issues/406)
+- Make Kokkos::OpenMP respect OMP environment even if hwloc is available [\#630](https://github.com/kokkos/kokkos/issues/630)
+- Improve Atomics Performance on KNL/Broadwell where PREFETCHW/RFO is Available [\#898](https://github.com/kokkos/kokkos/issues/898)
+- Kokkos::resize should test whether dimensions have changed before resizing [\#904](https://github.com/kokkos/kokkos/issues/904)
+- Develop performance-regression/acceptance tests [\#737](https://github.com/kokkos/kokkos/issues/737)
+- Make the deep\_copy Profiling hook a start/end system [\#890](https://github.com/kokkos/kokkos/issues/890)
+- Add deep\_copy Profiling hook [\#843](https://github.com/kokkos/kokkos/issues/843)
+- Append tag name to parallel construct name for Profiling [\#842](https://github.com/kokkos/kokkos/issues/842)
+- Add view label to `View bounds error` message for CUDA backend [\#870](https://github.com/kokkos/kokkos/issues/870)
+- Disable printing the loaded profiling library [\#824](https://github.com/kokkos/kokkos/issues/824)
+- "Declared but never referenced" warnings [\#853](https://github.com/kokkos/kokkos/issues/853)
+- Warnings about lock\_address\_cuda\_space [\#852](https://github.com/kokkos/kokkos/issues/852)
+- WorkGraph execution policy [\#771](https://github.com/kokkos/kokkos/issues/771)
+- Simplify makefiles by guarding compilation with appropriate KOKKOS\_ENABLE\_\#\#\# macros [\#716](https://github.com/kokkos/kokkos/issues/716)
+- Cmake build: wrong include install directory [\#668](https://github.com/kokkos/kokkos/issues/668)
+- Derived View type and allocation [\#566](https://github.com/kokkos/kokkos/issues/566)
+- Fix Compiler warnings when compiling core unit tests for Cuda [\#214](https://github.com/kokkos/kokkos/issues/214)
+
+**Fixed bugs:**
+
+- Out-of-bounds read in Kokkos\_Layout.hpp [\#975](https://github.com/kokkos/kokkos/issues/975)
+- CudaClang: Fix failing test with Clang 4.0 [\#941](https://github.com/kokkos/kokkos/issues/941)
+- Respawn when memory pool allocation fails \(not available memory\) [\#940](https://github.com/kokkos/kokkos/issues/940)
+- Memory pool aborts on zero allocation request, returns NULL for \< minimum [\#939](https://github.com/kokkos/kokkos/issues/939)
+- Error with TaskScheduler query of underlying memory pool [\#917](https://github.com/kokkos/kokkos/issues/917)
+- Profiling::\*Callee static variables declared in header [\#863](https://github.com/kokkos/kokkos/issues/863)
+- calling \*Space::name\(\) causes compile error [\#862](https://github.com/kokkos/kokkos/issues/862)
+- bug in Profiling::deallocateData [\#860](https://github.com/kokkos/kokkos/issues/860)
+- task\_depend test failing, CUDA 8.0 + Pascal + RDC [\#829](https://github.com/kokkos/kokkos/issues/829)
+- \[develop branch\] Standalone cmake issues [\#826](https://github.com/kokkos/kokkos/issues/826)
+- Kokkos CUDA failes to compile with OMPI\_CXX and MPICH\_CXX wrappers [\#776](https://github.com/kokkos/kokkos/issues/776)
+- Task Team reduction on Pascal [\#767](https://github.com/kokkos/kokkos/issues/767)
+- CUDA stack overflow with TaskDAG test [\#758](https://github.com/kokkos/kokkos/issues/758)
+- TeamVector test on Cuda [\#670](https://github.com/kokkos/kokkos/issues/670)
+- Clang 4.0 Cuda Build broken again [\#560](https://github.com/kokkos/kokkos/issues/560)
+
+
+## [2.03.05](https://github.com/kokkos/kokkos/tree/2.03.05) (2017-05-27)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.03.00...2.03.05)
+
+**Implemented enhancements:**
+
+- Harmonize Custom Reductions over nesting levels [\#802](https://github.com/kokkos/kokkos/issues/802)
+- Prevent users directly including KokkosCore\_config.h [\#815](https://github.com/kokkos/kokkos/issues/815)
+- DualView aborts on concurrent host/device modify \(in debug mode\) [\#814](https://github.com/kokkos/kokkos/issues/814)
+- Abort when running on a NVIDIA CC5.0 or higher architecture with code compiled for CC \< 5.0 [\#813](https://github.com/kokkos/kokkos/issues/813)
+- Add "name" function to ExecSpaces [\#806](https://github.com/kokkos/kokkos/issues/806)
+- Allow null Future in task spawn dependences [\#795](https://github.com/kokkos/kokkos/issues/795)
+- Add Unit Tests for Kokkos::complex [\#785](https://github.com/kokkos/kokkos/issues/785)
+- Add pow function for Kokkos::complex [\#784](https://github.com/kokkos/kokkos/issues/784)
+- Square root of a complex [\#729](https://github.com/kokkos/kokkos/issues/729)
+- Command line processing of --threads argument prevents users from having any commandline arguments starting with --threads [\#760](https://github.com/kokkos/kokkos/issues/760)
+- Protected deprecated API with appropriate macro [\#756](https://github.com/kokkos/kokkos/issues/756)
+- Allow task scheduler memory pool to be used by tasks [\#747](https://github.com/kokkos/kokkos/issues/747)
+- View bounds checking on host-side performance: constructing a std::string [\#723](https://github.com/kokkos/kokkos/issues/723)
+- Add check for AppleClang as compiler distinct from check for Clang. [\#705](https://github.com/kokkos/kokkos/issues/705)
+- Uninclude source files for specific configurations to prevent link warning. [\#701](https://github.com/kokkos/kokkos/issues/701)
+- Add --small option to snapshot script [\#697](https://github.com/kokkos/kokkos/issues/697)
+- CMake Standalone Support [\#674](https://github.com/kokkos/kokkos/issues/674)
+- CMake build unit test and install [\#808](https://github.com/kokkos/kokkos/issues/808)
+- CMake: Fix having kokkos as a subdirectory in a pure cmake project [\#629](https://github.com/kokkos/kokkos/issues/629)
+- Tribits macro assumes build directory is in top level source directory [\#654](https://github.com/kokkos/kokkos/issues/654)
+- Use bin/nvcc\_wrapper, not config/nvcc\_wrapper [\#562](https://github.com/kokkos/kokkos/issues/562)
+- Allow MemoryPool::allocate\(\) to be called from multiple threads per warp. [\#487](https://github.com/kokkos/kokkos/issues/487)
+- Allow MemoryPool::allocate\\(\\) to be called from multiple threads per warp. [\#487](https://github.com/kokkos/kokkos/issues/487)
+- Move OpenMP 4.5 OpenMPTarget backend into Develop [\#456](https://github.com/kokkos/kokkos/issues/456)
+- Testing on ARM testbed [\#288](https://github.com/kokkos/kokkos/issues/288)
+
+**Fixed bugs:**
+
+- Fix label in OpenMP parallel\_reduce verify\_initialized [\#834](https://github.com/kokkos/kokkos/issues/834)
+- TeamScratch Level 1 on Cuda hangs [\#820](https://github.com/kokkos/kokkos/issues/820)
+- \[bug\] memory pool. [\#786](https://github.com/kokkos/kokkos/issues/786)
+- Some Reduction Tests fail on Intel 18 with aggressive vectorization on [\#774](https://github.com/kokkos/kokkos/issues/774)
+- Error copying dynamic view on copy of memory pool [\#773](https://github.com/kokkos/kokkos/issues/773)
+- CUDA stack overflow with TaskDAG test [\#758](https://github.com/kokkos/kokkos/issues/758)
+- ThreadVectorRange Customized Reduction Bug [\#739](https://github.com/kokkos/kokkos/issues/739)
+- set\_scratch\_size overflows [\#726](https://github.com/kokkos/kokkos/issues/726)
+- Get wrong results for compiler checks in Makefile on OS X. [\#706](https://github.com/kokkos/kokkos/issues/706)
+- Fix check if multiple host architectures enabled. [\#702](https://github.com/kokkos/kokkos/issues/702)
+- Threads Backend Does not Pass on Cray Compilers [\#609](https://github.com/kokkos/kokkos/issues/609)
+- Rare bug in memory pool where allocation can finish on superblock in empty state [\#452](https://github.com/kokkos/kokkos/issues/452)
+- LDFLAGS in core/unit\_test/Makefile: potential "undefined reference" to pthread lib [\#148](https://github.com/kokkos/kokkos/issues/148)
+
+## [2.03.00](https://github.com/kokkos/kokkos/tree/2.03.00) (2017-04-25)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.02.15...2.03.00)
+
+**Implemented enhancements:**
+
+- UnorderedMap: make it accept Devices or MemorySpaces [\#711](https://github.com/kokkos/kokkos/issues/711)
+- sort to accept DynamicView and \[begin,end\) indices [\#691](https://github.com/kokkos/kokkos/issues/691)
+- ENABLE Macros should only be used via \#ifdef or \#if defined [\#675](https://github.com/kokkos/kokkos/issues/675)
+- Remove impl/Kokkos\_Synchronic\_\* [\#666](https://github.com/kokkos/kokkos/issues/666)
+- Turning off IVDEP for Intel 14. [\#638](https://github.com/kokkos/kokkos/issues/638)
+- Using an installed Kokkos in a target application using CMake [\#633](https://github.com/kokkos/kokkos/issues/633)
+- Create Kokkos Bill of Materials [\#632](https://github.com/kokkos/kokkos/issues/632)
+- MDRangePolicy and tagged evaluators [\#547](https://github.com/kokkos/kokkos/issues/547)
+- Add PGI support [\#289](https://github.com/kokkos/kokkos/issues/289)
+
+**Fixed bugs:**
+
+- Output from PerTeam fails [\#733](https://github.com/kokkos/kokkos/issues/733)
+- Cuda: architecture flag not added to link line [\#688](https://github.com/kokkos/kokkos/issues/688)
+- Getting large chunks of memory for a thread team in a universal way [\#664](https://github.com/kokkos/kokkos/issues/664)
+- Kokkos RNG normal\(\) function hangs for small seed value [\#655](https://github.com/kokkos/kokkos/issues/655)
+- Kokkos Tests Errors on Shepard/HSW Builds [\#644](https://github.com/kokkos/kokkos/issues/644)
+
+## [2.02.15](https://github.com/kokkos/kokkos/tree/2.02.15) (2017-02-10)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.02.07...2.02.15)
+
+**Implemented enhancements:**
+
+- Containers: Adding block partitioning to StaticCrsGraph [\#625](https://github.com/kokkos/kokkos/issues/625)
+- Kokkos Make System can induce Errors on Cray Volta System [\#610](https://github.com/kokkos/kokkos/issues/610)
+- OpenMP: error out if KOKKOS\_HAVE\_OPENMP is defined but not \_OPENMP [\#605](https://github.com/kokkos/kokkos/issues/605)
+- CMake: fix standalone build with tests [\#604](https://github.com/kokkos/kokkos/issues/604)
+- Change README \(that GitHub shows when opening Kokkos project page\) to tell users how to submit PRs [\#597](https://github.com/kokkos/kokkos/issues/597)
+- Add correctness testing for all operators of Atomic View [\#420](https://github.com/kokkos/kokkos/issues/420)
+- Allow assignment of Views with compatible memory spaces [\#290](https://github.com/kokkos/kokkos/issues/290)
+- Build only one version of Kokkos library for tests [\#213](https://github.com/kokkos/kokkos/issues/213)
+- Clean out old KOKKOS\_HAVE\_CXX11 macros clauses [\#156](https://github.com/kokkos/kokkos/issues/156)
+- Harmonize Macro names [\#150](https://github.com/kokkos/kokkos/issues/150)
+
+**Fixed bugs:**
+
+- Cray and PGI: Kokkos\_Parallel\_Reduce [\#634](https://github.com/kokkos/kokkos/issues/634)
+- Kokkos Make System can induce Errors on Cray Volta System [\#610](https://github.com/kokkos/kokkos/issues/610)
+- Normal\(\) function random number generator doesn't give the expected distribution [\#592](https://github.com/kokkos/kokkos/issues/592)
+
+## [2.02.07](https://github.com/kokkos/kokkos/tree/2.02.07) (2016-12-16)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.02.01...2.02.07)
+
+**Implemented enhancements:**
+
+- Add CMake option to enable Cuda Lambda support [\#589](https://github.com/kokkos/kokkos/issues/589)
+- Add CMake option to enable Cuda RDC support [\#588](https://github.com/kokkos/kokkos/issues/588)
+- Add Initial Intel Sky Lake Xeon-HPC Compiler Support to Kokkos Make System [\#584](https://github.com/kokkos/kokkos/issues/584)
+- Building Tutorial Examples [\#582](https://github.com/kokkos/kokkos/issues/582)
+- Internal way for using ThreadVectorRange without TeamHandle [\#574](https://github.com/kokkos/kokkos/issues/574)
+- Testing: Add testing for uvm and rdc [\#571](https://github.com/kokkos/kokkos/issues/571)
+- Profiling: Add Memory Tracing and Region Markers [\#557](https://github.com/kokkos/kokkos/issues/557)
+- nvcc\_wrapper not installed with Kokkos built with CUDA through CMake [\#543](https://github.com/kokkos/kokkos/issues/543)
+- Improve DynRankView debug check [\#541](https://github.com/kokkos/kokkos/issues/541)
+- Benchmarks: Add Gather benchmark [\#536](https://github.com/kokkos/kokkos/issues/536)
+- Testing: add spot\_check option to test\_all\_sandia [\#535](https://github.com/kokkos/kokkos/issues/535)
+- Deprecate Kokkos::Impl::VerifyExecutionCanAccessMemorySpace [\#527](https://github.com/kokkos/kokkos/issues/527)
+- Add AtomicAdd support for 64bit float for Pascal [\#522](https://github.com/kokkos/kokkos/issues/522)
+- Add Restrict and Aligned memory trait [\#517](https://github.com/kokkos/kokkos/issues/517)
+- Kokkos Tests are Not Run using Compiler Optimization [\#501](https://github.com/kokkos/kokkos/issues/501)
+- Add support for clang 3.7 w/ openmp backend [\#393](https://github.com/kokkos/kokkos/issues/393)
+- Provide an error throw class [\#79](https://github.com/kokkos/kokkos/issues/79)
+
+**Fixed bugs:**
+
+- Cuda UVM Allocation test broken with UVM as default space [\#586](https://github.com/kokkos/kokkos/issues/586)
+- Bug \(develop branch only\): multiple tests are now failing when forcing uvm usage. [\#570](https://github.com/kokkos/kokkos/issues/570)
+- Error in generate\_makefile.sh for Kokkos when Compiler is Empty String/Fails [\#568](https://github.com/kokkos/kokkos/issues/568)
+- XL 13.1.4 incorrect C++11 flag [\#553](https://github.com/kokkos/kokkos/issues/553)
+- Improve DynRankView debug check [\#541](https://github.com/kokkos/kokkos/issues/541)
+- Installing Library on MAC broken due to cp -u [\#539](https://github.com/kokkos/kokkos/issues/539)
+- Intel Nightly Testing with Debug enabled fails [\#534](https://github.com/kokkos/kokkos/issues/534)
+
+## [2.02.01](https://github.com/kokkos/kokkos/tree/2.02.01) (2016-11-01)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.02.00...2.02.01)
+
+**Implemented enhancements:**
+
+- Add Changelog generation to our process. [\#506](https://github.com/kokkos/kokkos/issues/506)
+
+**Fixed bugs:**
+
+- Test scratch\_request fails in Serial with Debug enabled [\#520](https://github.com/kokkos/kokkos/issues/520)
+- Bug In BoundsCheck for DynRankView [\#516](https://github.com/kokkos/kokkos/issues/516)
+
+## [2.02.00](https://github.com/kokkos/kokkos/tree/2.02.00) (2016-10-30)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.01.10...2.02.00)
+
+**Implemented enhancements:**
+
+- Add PowerPC assembly for grabbing clock register in memory pool [\#511](https://github.com/kokkos/kokkos/issues/511)
+- Add GCC 6.x support [\#508](https://github.com/kokkos/kokkos/issues/508)
+- Test install and build against installed library [\#498](https://github.com/kokkos/kokkos/issues/498)
+- Makefile.kokkos adds expt-extended-lambda to cuda build with clang [\#490](https://github.com/kokkos/kokkos/issues/490)
+- Add top-level makefile option to just test kokkos-core unit-test [\#485](https://github.com/kokkos/kokkos/issues/485)
+- Split and harmonize Object Files of Core UnitTests to increase build parallelism [\#484](https://github.com/kokkos/kokkos/issues/484)
+- LayoutLeft to LayoutLeft subview for 3D and 4D views [\#473](https://github.com/kokkos/kokkos/issues/473)
+- Add official Cuda 8.0 support [\#468](https://github.com/kokkos/kokkos/issues/468)
+- Allow C++1Z Flag for Class Lambda capture [\#465](https://github.com/kokkos/kokkos/issues/465)
+- Add Clang 4.0+ compilation of Cuda code [\#455](https://github.com/kokkos/kokkos/issues/455)
+- Possible Issue with Intel 17.0.098 and GCC 6.1.0 in Develop Branch [\#445](https://github.com/kokkos/kokkos/issues/445)
+- Add name of view to "View bounds error" [\#432](https://github.com/kokkos/kokkos/issues/432)
+- Move Sort Binning Operators into Kokkos namespace [\#421](https://github.com/kokkos/kokkos/issues/421)
+- TaskPolicy - generate error when attempt to use uninitialized [\#396](https://github.com/kokkos/kokkos/issues/396)
+- Import WithoutInitializing and AllowPadding into Kokkos namespace [\#325](https://github.com/kokkos/kokkos/issues/325)
+- TeamThreadRange requires begin, end to be the same type [\#305](https://github.com/kokkos/kokkos/issues/305)
+- CudaUVMSpace should track \# allocations, due to CUDA limit on \# UVM allocations [\#300](https://github.com/kokkos/kokkos/issues/300)
+- Remove old View and its infrastructure [\#259](https://github.com/kokkos/kokkos/issues/259)
+
+**Fixed bugs:**
+
+- Bug in TestCuda\_Other.cpp: most likely assembly inserted into Device code [\#515](https://github.com/kokkos/kokkos/issues/515)
+- Cuda Compute Capability check of GPU is outdated [\#509](https://github.com/kokkos/kokkos/issues/509)
+- multi\_scratch test with hwloc and pthreads seg-faults. [\#504](https://github.com/kokkos/kokkos/issues/504)
+- generate\_makefile.bash: "make install" is broken [\#503](https://github.com/kokkos/kokkos/issues/503)
+- make clean in Out of Source Build/Tests Does Not Work Correctly [\#502](https://github.com/kokkos/kokkos/issues/502)
+- Makefiles for test and examples have issues in Cuda when CXX is not explicitly specified [\#497](https://github.com/kokkos/kokkos/issues/497)
+- Dispatch lambda test directly inside GTEST macro doesn't work with nvcc [\#491](https://github.com/kokkos/kokkos/issues/491)
+- UnitTests with HWLOC enabled fail if run with mpirun bound to a single core [\#489](https://github.com/kokkos/kokkos/issues/489)
+- Failing Reducer Test on Mac with Pthreads [\#479](https://github.com/kokkos/kokkos/issues/479)
+- make test Dumps Error with Clang Not Found [\#471](https://github.com/kokkos/kokkos/issues/471)
+- OpenMP TeamPolicy member broadcast not using correct volatile shared variable [\#424](https://github.com/kokkos/kokkos/issues/424)
+- TaskPolicy - generate error when attempt to use uninitialized [\#396](https://github.com/kokkos/kokkos/issues/396)
+- New task policy implementation is pulling in old experimental code. [\#372](https://github.com/kokkos/kokkos/issues/372)
+- MemoryPool unit test hangs on Power8 with GCC 6.1.0 [\#298](https://github.com/kokkos/kokkos/issues/298)
+
+## [2.01.10](https://github.com/kokkos/kokkos/tree/2.01.10) (2016-09-27)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.01.06...2.01.10)
+
+**Implemented enhancements:**
+
+- Enable Profiling by default in Tribits build [\#438](https://github.com/kokkos/kokkos/issues/438)
+- parallel\_reduce\(0\), parallel\_scan\(0\) unit tests [\#436](https://github.com/kokkos/kokkos/issues/436)
+- data\(\)==NULL after realloc with LayoutStride [\#351](https://github.com/kokkos/kokkos/issues/351)
+- Fix tutorials to track new Kokkos::View [\#323](https://github.com/kokkos/kokkos/issues/323)
+- Rename team policy set\_scratch\_size. [\#195](https://github.com/kokkos/kokkos/issues/195)
+
+**Fixed bugs:**
+
+- Possible Issue with Intel 17.0.098 and GCC 6.1.0 in Develop Branch [\#445](https://github.com/kokkos/kokkos/issues/445)
+- Makefile spits syntax error [\#435](https://github.com/kokkos/kokkos/issues/435)
+- Kokkos::sort fails for view with all the same values [\#422](https://github.com/kokkos/kokkos/issues/422)
+- Generic Reducers: can't accept inline constructed reducer [\#404](https://github.com/kokkos/kokkos/issues/404)
+- data\\(\\)==NULL after realloc with LayoutStride [\#351](https://github.com/kokkos/kokkos/issues/351)
+- const subview of const view with compile time dimensions on Cuda backend [\#310](https://github.com/kokkos/kokkos/issues/310)
+- Kokkos \(in Trilinos\) Causes Internal Compiler Error on CUDA 8.0.21-EA on POWER8 [\#307](https://github.com/kokkos/kokkos/issues/307)
+- Core Oversubscription Detection Broken? [\#159](https://github.com/kokkos/kokkos/issues/159)
+
+
+## [2.01.06](https://github.com/kokkos/kokkos/tree/2.01.06) (2016-09-02)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/2.01.00...2.01.06)
+
+**Implemented enhancements:**
+
+- Add "standard" reducers for lambda-supportable customized reduce [\#411](https://github.com/kokkos/kokkos/issues/411)
+- TaskPolicy - single thread back-end execution [\#390](https://github.com/kokkos/kokkos/issues/390)
+- Kokkos master clone tag [\#387](https://github.com/kokkos/kokkos/issues/387)
+- Query memory requirements from task policy [\#378](https://github.com/kokkos/kokkos/issues/378)
+- Output order of test\_atomic.cpp is confusing [\#373](https://github.com/kokkos/kokkos/issues/373)
+- Missing testing for atomics [\#341](https://github.com/kokkos/kokkos/issues/341)
+- Feature request for Kokkos to provide Kokkos::atomic\_fetch\_max and atomic\_fetch\_min [\#336](https://github.com/kokkos/kokkos/issues/336)
+- TaskPolicy\<Cuda\> performance requires teams mapped to warps [\#218](https://github.com/kokkos/kokkos/issues/218)
+
+**Fixed bugs:**
+
+- Reduce with Teams broken for custom initialize [\#407](https://github.com/kokkos/kokkos/issues/407)
+- Failing Kokkos build on Debian [\#402](https://github.com/kokkos/kokkos/issues/402)
+- Failing Tests on NVIDIA Pascal GPUs [\#398](https://github.com/kokkos/kokkos/issues/398)
+- Algorithms: fill\_random assumes dimensions fit in unsigned int [\#389](https://github.com/kokkos/kokkos/issues/389)
+- Kokkos::subview with RandomAccess Memory Trait [\#385](https://github.com/kokkos/kokkos/issues/385)
+- Build warning \(signed / unsigned comparison\) in Cuda implementation [\#365](https://github.com/kokkos/kokkos/issues/365)
+- wrong results for a parallel\_reduce with CUDA8 / Maxwell50 [\#352](https://github.com/kokkos/kokkos/issues/352)
+- Hierarchical parallelism - 3 level unit test [\#344](https://github.com/kokkos/kokkos/issues/344)
+- Can I allocate a View w/ both WithoutInitializing & AllowPadding? [\#324](https://github.com/kokkos/kokkos/issues/324)
+- subview View layout determination [\#309](https://github.com/kokkos/kokkos/issues/309)
+- Unit tests with Cuda - Maxwell [\#196](https://github.com/kokkos/kokkos/issues/196)
+
+## [2.01.00](https://github.com/kokkos/kokkos/tree/2.01.00) (2016-07-21)
+[Full Changelog](https://github.com/kokkos/kokkos/compare/End_C++98...2.01.00)
+
+**Implemented enhancements:**
+
+- Edit ViewMapping so assigning Views with the same custom layout compiles when const casting [\#327](https://github.com/kokkos/kokkos/issues/327)
+- DynRankView: Performance improvement for operator\(\) [\#321](https://github.com/kokkos/kokkos/issues/321)
+- Interoperability between static and dynamic rank views [\#295](https://github.com/kokkos/kokkos/issues/295)
+- subview member function ? [\#280](https://github.com/kokkos/kokkos/issues/280)
+- Inter-operatibility between View and DynRankView. [\#245](https://github.com/kokkos/kokkos/issues/245)
+- \(Trilinos\) build warning in atomic\_assign, with Kokkos::complex [\#177](https://github.com/kokkos/kokkos/issues/177)
+- View\<\>::shmem\_size should runtime check for number of arguments equal to rank [\#176](https://github.com/kokkos/kokkos/issues/176)
+- Custom reduction join via lambda argument [\#99](https://github.com/kokkos/kokkos/issues/99)
+- DynRankView with 0 dimensions passed in at construction [\#293](https://github.com/kokkos/kokkos/issues/293)
+- Inject view\_alloc and friends into Kokkos namespace [\#292](https://github.com/kokkos/kokkos/issues/292)
+- Less restrictive TeamPolicy reduction on Cuda [\#286](https://github.com/kokkos/kokkos/issues/286)
+- deep\_copy using remap with source execution space [\#267](https://github.com/kokkos/kokkos/issues/267)
+- Suggestion: Enable opt-in L1 caching via nvcc-wrapper [\#261](https://github.com/kokkos/kokkos/issues/261)
+- More flexible create\_mirror functions [\#260](https://github.com/kokkos/kokkos/issues/260)
+- Rename View::memory\_span to View::required\_allocation\_size [\#256](https://github.com/kokkos/kokkos/issues/256)
+- Use of subviews and views with compile-time dimensions [\#237](https://github.com/kokkos/kokkos/issues/237)
+- Use of subviews and views with compile-time dimensions [\#237](https://github.com/kokkos/kokkos/issues/237)
+- Kokkos::Timer [\#234](https://github.com/kokkos/kokkos/issues/234)
+- Fence CudaUVMSpace allocations [\#230](https://github.com/kokkos/kokkos/issues/230)
+- View::operator\(\) accept std::is\_integral and std::is\_enum [\#227](https://github.com/kokkos/kokkos/issues/227)
+- Allocating zero size View [\#216](https://github.com/kokkos/kokkos/issues/216)
+- Thread scalable memory pool [\#212](https://github.com/kokkos/kokkos/issues/212)
+- Add a way to disable memory leak output [\#194](https://github.com/kokkos/kokkos/issues/194)
+- Kokkos exec space init should init Kokkos profiling [\#192](https://github.com/kokkos/kokkos/issues/192)
+- Runtime rank wrapper for View [\#189](https://github.com/kokkos/kokkos/issues/189)
+- Profiling Interface [\#158](https://github.com/kokkos/kokkos/issues/158)
+- Fix View assignment \(of managed to unmanaged\) [\#153](https://github.com/kokkos/kokkos/issues/153)
+- Add unit test for assignment of managed View to unmanaged View [\#152](https://github.com/kokkos/kokkos/issues/152)
+- Check for oversubscription of threads with MPI in Kokkos::initialize [\#149](https://github.com/kokkos/kokkos/issues/149)
+- Dynamic resizeable 1dimensional view [\#143](https://github.com/kokkos/kokkos/issues/143)
+- Develop TaskPolicy for CUDA [\#142](https://github.com/kokkos/kokkos/issues/142)
+- New View : Test Compilation Downstream [\#138](https://github.com/kokkos/kokkos/issues/138)
+- New View Implementation [\#135](https://github.com/kokkos/kokkos/issues/135)
+- Add variant of subview that lets users add traits [\#134](https://github.com/kokkos/kokkos/issues/134)
+- NVCC-WRAPPER: Add --host-only flag [\#121](https://github.com/kokkos/kokkos/issues/121)
+- Address gtest issue with TriBITS Kokkos build outside of Trilinos [\#117](https://github.com/kokkos/kokkos/issues/117)
+- Make tests pass with -expt-extended-lambda on CUDA [\#108](https://github.com/kokkos/kokkos/issues/108)
+- Dynamic scheduling for parallel\_for and parallel\_reduce [\#106](https://github.com/kokkos/kokkos/issues/106)
+- Runtime or compile time error when reduce functor's join is not properly specified as const member function or with volatile arguments [\#105](https://github.com/kokkos/kokkos/issues/105)
+- Error out when the number of threads is modified after kokkos is initialized [\#104](https://github.com/kokkos/kokkos/issues/104)
+- Porting to POWER and remove assumption of X86 default [\#103](https://github.com/kokkos/kokkos/issues/103)
+- Dynamic scheduling option for RangePolicy [\#100](https://github.com/kokkos/kokkos/issues/100)
+- SharedMemory Support for Lambdas [\#81](https://github.com/kokkos/kokkos/issues/81)
+- Recommended TeamSize for Lambdas [\#80](https://github.com/kokkos/kokkos/issues/80)
+- Add Aggressive Vectorization Compilation mode [\#72](https://github.com/kokkos/kokkos/issues/72)
+- Dynamic scheduling team execution policy [\#53](https://github.com/kokkos/kokkos/issues/53)
+- UVM allocations in multi-GPU systems [\#50](https://github.com/kokkos/kokkos/issues/50)
+- Synchronic in Kokkos::Impl [\#44](https://github.com/kokkos/kokkos/issues/44)
+- index and dimension types in for loops [\#28](https://github.com/kokkos/kokkos/issues/28)
+- Subview assign of 1D Strided with stride 1 to LayoutLeft/Right [\#1](https://github.com/kokkos/kokkos/issues/1)
+
+**Fixed bugs:**
+
+- misspelled variable name in Kokkos\_Atomic\_Fetch + missing unit tests [\#340](https://github.com/kokkos/kokkos/issues/340)
+- seg fault Kokkos::Impl::CudaInternal::print\_configuration [\#338](https://github.com/kokkos/kokkos/issues/338)
+- Clang compiler error with named parallel\_reduce, tags, and TeamPolicy. [\#335](https://github.com/kokkos/kokkos/issues/335)
+- Shared Memory Allocation Error at parallel\_reduce [\#311](https://github.com/kokkos/kokkos/issues/311)
+- DynRankView: Fix resize and realloc [\#303](https://github.com/kokkos/kokkos/issues/303)
+- Scratch memory and dynamic scheduling [\#279](https://github.com/kokkos/kokkos/issues/279)
+- MemoryPool infinite loop when out of memory [\#312](https://github.com/kokkos/kokkos/issues/312)
+- Kokkos DynRankView changes break Sacado and Panzer [\#299](https://github.com/kokkos/kokkos/issues/299)
+- MemoryPool fails to compile on non-cuda non-x86 [\#297](https://github.com/kokkos/kokkos/issues/297)
+- Random Number Generator Fix [\#296](https://github.com/kokkos/kokkos/issues/296)
+- View template parameter ordering Bug [\#282](https://github.com/kokkos/kokkos/issues/282)
+- Serial task policy broken. [\#281](https://github.com/kokkos/kokkos/issues/281)
+- deep\_copy with LayoutStride should not memcpy [\#262](https://github.com/kokkos/kokkos/issues/262)
+- DualView::need\_sync should be a const method [\#248](https://github.com/kokkos/kokkos/issues/248)
+- Arbitrary-sized atomics on GPUs broken; loop forever [\#238](https://github.com/kokkos/kokkos/issues/238)
+- boolean reduction value\_type changes answer [\#225](https://github.com/kokkos/kokkos/issues/225)
+- Custom init\(\) function for parallel\_reduce with array value\_type [\#210](https://github.com/kokkos/kokkos/issues/210)
+- unit\_test Makefile is Broken - Recursively Calls itself until Machine Apocalypse. [\#202](https://github.com/kokkos/kokkos/issues/202)
+- nvcc\_wrapper Does Not Support -Xcompiler \<compiler option\> [\#198](https://github.com/kokkos/kokkos/issues/198)
+- Kokkos exec space init should init Kokkos profiling [\#192](https://github.com/kokkos/kokkos/issues/192)
+- Kokkos Threads Backend impl\_shared\_alloc Broken on Intel 16.1 \(Shepard Haswell\) [\#186](https://github.com/kokkos/kokkos/issues/186)
+- pthread back end hangs if used uninitialized [\#182](https://github.com/kokkos/kokkos/issues/182)
+- parallel\_reduce of size 0, not calling init/join [\#175](https://github.com/kokkos/kokkos/issues/175)
+- Bug in Threads with OpenMP enabled [\#173](https://github.com/kokkos/kokkos/issues/173)
+- KokkosExp\_SharedAlloc, m\_team\_work\_index inaccessible [\#166](https://github.com/kokkos/kokkos/issues/166)
+- 128-bit CAS without Assembly Broken? [\#161](https://github.com/kokkos/kokkos/issues/161)
+- fatal error: Cuda/Kokkos\_Cuda\_abort.hpp: No such file or directory [\#157](https://github.com/kokkos/kokkos/issues/157)
+- Power8: Fix OpenMP backend [\#139](https://github.com/kokkos/kokkos/issues/139)
+- Data race in Kokkos OpenMP initialization [\#131](https://github.com/kokkos/kokkos/issues/131)
+- parallel\_launch\_local\_memory and cuda 7.5 [\#125](https://github.com/kokkos/kokkos/issues/125)
+- Resize can fail with Cuda due to asynchronous dispatch [\#119](https://github.com/kokkos/kokkos/issues/119)
+- Qthread taskpolicy initialization bug. [\#92](https://github.com/kokkos/kokkos/issues/92)
+- Windows: sys/mman.h [\#89](https://github.com/kokkos/kokkos/issues/89)
+- Windows: atomic\_fetch\_sub\(\) [\#88](https://github.com/kokkos/kokkos/issues/88)
+- Windows: snprintf [\#87](https://github.com/kokkos/kokkos/issues/87)
+- Parallel\_Reduce with TeamPolicy and league size of 0 returns garbage [\#85](https://github.com/kokkos/kokkos/issues/85)
+- Throw with Cuda when using \(2D\) team\_policy parallel\_reduce with less than a warp size [\#76](https://github.com/kokkos/kokkos/issues/76)
+- Scalar views don't work with Kokkos::Atomic memory trait [\#69](https://github.com/kokkos/kokkos/issues/69)
+- Reduce the number of threads per team for Cuda [\#63](https://github.com/kokkos/kokkos/issues/63)
+- Named Kernels fail for reductions with CUDA [\#60](https://github.com/kokkos/kokkos/issues/60)
+- Kokkos View dimension\_\(\) for long returning unsigned int [\#20](https://github.com/kokkos/kokkos/issues/20)
+- atomic test hangs with LLVM [\#6](https://github.com/kokkos/kokkos/issues/6)
+- OpenMP Test should set omp\_set\_num\_threads to 1 [\#4](https://github.com/kokkos/kokkos/issues/4)
+
+**Closed issues:**
+
+- develop branch broken with CUDA 8 and --expt-extended-lambda [\#354](https://github.com/kokkos/kokkos/issues/354)
+- --arch=KNL with Intel 2016 build failure [\#349](https://github.com/kokkos/kokkos/issues/349)
+- Error building with Cuda when passing -DKOKKOS\_CUDA\_USE\_LAMBDA to generate\_makefile.bash [\#343](https://github.com/kokkos/kokkos/issues/343)
+- Can I safely use int indices in a 2-D View with capacity \> 2B? [\#318](https://github.com/kokkos/kokkos/issues/318)
+- Kokkos::ViewAllocateWithoutInitializing is not working [\#317](https://github.com/kokkos/kokkos/issues/317)
+- Intel build on Mac OS X [\#277](https://github.com/kokkos/kokkos/issues/277)
+- deleted [\#271](https://github.com/kokkos/kokkos/issues/271)
+- Broken Mira build [\#268](https://github.com/kokkos/kokkos/issues/268)
+- 32-bit build [\#246](https://github.com/kokkos/kokkos/issues/246)
+- parallel\_reduce with RDC crashes linker [\#232](https://github.com/kokkos/kokkos/issues/232)
+- build of Kokkos\_Sparse\_MV\_impl\_spmv\_Serial.cpp.o fails if you use nvcc and have cuda disabled [\#209](https://github.com/kokkos/kokkos/issues/209)
+- Kokkos Serial execution space is not tested with TeamPolicy. [\#207](https://github.com/kokkos/kokkos/issues/207)
+- Unit test failure on Hansen KokkosCore\_UnitTest\_Cuda\_MPI\_1 [\#200](https://github.com/kokkos/kokkos/issues/200)
+- nvcc compiler warning: calling a \_\_host\_\_ function from a \_\_host\_\_ \_\_device\_\_ function is not allowed [\#180](https://github.com/kokkos/kokkos/issues/180)
+- Intel 15 build error with defaulted "move" operators [\#171](https://github.com/kokkos/kokkos/issues/171)
+- missing libkokkos.a during Trilinos 12.4.2 build, yet other libkokkos\*.a libs are there [\#165](https://github.com/kokkos/kokkos/issues/165)
+- Tie atomic updates to execution space or even to thread team? \(speculation\) [\#144](https://github.com/kokkos/kokkos/issues/144)
+- New View: Compiletime/size Test [\#137](https://github.com/kokkos/kokkos/issues/137)
+- New View : Performance Test [\#136](https://github.com/kokkos/kokkos/issues/136)
+- Signed/unsigned comparison warning in CUDA parallel [\#130](https://github.com/kokkos/kokkos/issues/130)
+- Kokkos::complex: Need op\* w/ std::complex & real [\#126](https://github.com/kokkos/kokkos/issues/126)
+- Use uintptr\_t for casting pointers [\#110](https://github.com/kokkos/kokkos/issues/110)
+- Default thread mapping behavior between P and Q threads. [\#91](https://github.com/kokkos/kokkos/issues/91)
+- Windows: Atomic\_Fetch\_Exchange\(\) return type [\#90](https://github.com/kokkos/kokkos/issues/90)
+- Synchronic unit test is way too long [\#84](https://github.com/kokkos/kokkos/issues/84)
+- nvcc\_wrapper -\> $\(NVCC\_WRAPPER\) [\#42](https://github.com/kokkos/kokkos/issues/42)
+- Check compiler version and print helpful message [\#39](https://github.com/kokkos/kokkos/issues/39)
+- Kokkos shared memory on Cuda uses a lot of registers [\#31](https://github.com/kokkos/kokkos/issues/31)
+- Can not pass unit test `cuda.space` without a GT 720 [\#25](https://github.com/kokkos/kokkos/issues/25)
+- Makefile.kokkos lacks bounds checking option that CMake has [\#24](https://github.com/kokkos/kokkos/issues/24)
+- Kokkos can not complete unit tests with CUDA UVM enabled [\#23](https://github.com/kokkos/kokkos/issues/23)
+- Simplify teams + shared memory histogram example to remove vectorization [\#21](https://github.com/kokkos/kokkos/issues/21)
+- Kokkos needs to rever to ${PROJECT\_NAME}\_ENABLE\_CXX11 not Trilinos\_ENABLE\_CXX11 [\#17](https://github.com/kokkos/kokkos/issues/17)
+- Kokkos Base Makefile adds AVX to KNC Build [\#16](https://github.com/kokkos/kokkos/issues/16)
+- MS Visual Studio 2013 Build Errors [\#9](https://github.com/kokkos/kokkos/issues/9)
+- subview\(X, ALL\(\), j\) for 2-D LayoutRight View X: should it view a column? [\#5](https://github.com/kokkos/kokkos/issues/5)
+
+## [End_C++98](https://github.com/kokkos/kokkos/tree/End_C++98) (2015-04-15)
+
+
+\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*
--- /dev/null
+cff-version: 1.2.0
+title: Kokkos
+message: >-
+ If you use this software, please cite the overview paper
+type: software
+authors:
+ - name: The Kokkos authors
+ website: https://kokkos.org/community/team/
+identifiers:
+ - type: url
+ website: https://kokkos.org/kokkos-core-wiki/citation.html
+repository-code: 'https://github.com/kokkos/kokkos'
+url: 'https://kokkos.org/'
+license: Apache-2.0
+preferred-citation:
+ type: article
+ authors:
+ - given-names: Christian R.
+ family-names: Trott
+ - given-names: Damien
+ family-names: Lebrun-Grandié
+ - given-names: Daniel
+ family-names: Arndt
+ - family-names: Ciesko
+ given-names: Jan
+ - given-names: Vinh
+ family-names: Dang
+ - family-names: Ellingwood
+ given-names: Nathan
+ - given-names: Rahulkumar
+ family-names: Gayatri
+ - given-names: Evan
+ family-names: Harvey
+ - given-names: Daisy S.
+ family-names: Hollman
+ - given-names: Dan
+ family-names: Ibanez
+ - given-names: Nevin
+ family-names: Liber
+ - given-names: Jonathan
+ family-names: Madsen
+ - given-names: Jeff
+ family-names: Miles
+ - given-names: David
+ family-names: Poliakoff
+ - given-names: Amy
+ family-names: Powell
+ - given-names: Sivasankaran
+ family-names: Rajamanickam
+ - given-names: Mikael
+ family-names: Simberg
+ - given-names: Dan
+ family-names: Sunderland
+ - given-names: Bruno
+ family-names: Turcksin
+ - given-names: Jeremiah
+ family-names: Wilke
+ doi: 10.1109/TPDS.2021.3097283
+ journal: IEEE Transactions on Parallel and Distributed Systems
+ start: 805
+ end: 817
+ title: "Kokkos 3: Programming Model Extensions for the Exascale Era"
+ volume: 33
+ issue: 4
+ year: 2022
--- /dev/null
+cmake_minimum_required(VERSION 3.16 FATAL_ERROR)
+
+# Disable in-source builds to prevent source tree corruption.
+if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
+ message(
+ FATAL_ERROR
+ "FATAL: In-source builds are not allowed. You should create a separate directory for build files and delete CMakeCache.txt."
+ )
+endif()
+
+# We want to determine if options are given with the wrong case
+# In order to detect which arguments are given to compare against
+# the list of valid arguments, at the beginning here we need to
+# form a list of all the given variables. If it begins with any
+# case of KoKkOS, we add it to the list.
+
+get_cmake_property(_variableNames VARIABLES)
+set(KOKKOS_GIVEN_VARIABLES)
+foreach(var ${_variableNames})
+ string(TOUPPER ${var} UC_VAR)
+ string(FIND ${UC_VAR} KOKKOS IDX)
+ if(${IDX} EQUAL 0)
+ list(APPEND KOKKOS_GIVEN_VARIABLES ${var})
+ endif()
+endforeach()
+
+# Basic initialization (Used in KOKKOS_SETTINGS)
+set(Kokkos_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+set(KOKKOS_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+set(KOKKOS_SRC_PATH ${Kokkos_SOURCE_DIR})
+set(KOKKOS_PATH ${Kokkos_SOURCE_DIR})
+set(KOKKOS_TOP_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR})
+
+set(PACKAGE_NAME Kokkos)
+set(PACKAGE_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
+
+# Is this build a subdirectory of another project
+get_directory_property(HAS_PARENT PARENT_DIRECTORY)
+
+include(${KOKKOS_SRC_PATH}/cmake/kokkos_functions.cmake)
+include(${KOKKOS_SRC_PATH}/cmake/kokkos_pick_cxx_std.cmake)
+
+set(KOKKOS_ENABLED_OPTIONS) #exported in config file
+set(KOKKOS_ENABLED_DEVICES) #exported in config file
+set(KOKKOS_ENABLED_TPLS) #exported in config file
+set(KOKKOS_ENABLED_ARCH_LIST) #exported in config file
+
+#These are helper flags used for sanity checks during config
+#Certain features should depend on other features being configured first
+set(KOKKOS_CFG_DAG_NONE On) #sentinel to indicate no dependencies
+set(KOKKOS_CFG_DAG_DEVICES_DONE Off)
+set(KOKKOS_CFG_DAG_OPTIONS_DONE Off)
+set(KOKKOS_CFG_DAG_ARCH_DONE Off)
+set(KOKKOS_CFG_DAG_CXX_STD_DONE Off)
+set(KOKKOS_CFG_DAG_COMPILER_ID_DONE Off)
+function(KOKKOS_CFG_DEPENDS SUCCESSOR PRECURSOR)
+ set(PRE_FLAG KOKKOS_CFG_DAG_${PRECURSOR})
+ set(POST_FLAG KOKKOS_CFG_DAG_${SUCCESSOR})
+ if(NOT ${PRE_FLAG})
+ message(
+ FATAL_ERROR "Bad CMake refactor: feature ${SUCCESSOR} cannot be configured until ${PRECURSOR} is configured"
+ )
+ endif()
+ global_set(${POST_FLAG} On)
+endfunction()
+
+list(APPEND CMAKE_MODULE_PATH cmake/Modules)
+
+set(CMAKE_DISABLE_SOURCE_CHANGES ON)
+set(CMAKE_DISABLE_IN_SOURCE_BUILD ON)
+
+# What language are we compiling Kokkos as
+# downstream dependencies need to match this!
+set(KOKKOS_COMPILE_LANGUAGE CXX)
+# use lower case here since we didn't parse options yet
+if(Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE AND Kokkos_ENABLE_CUDA)
+
+ # Without this as a language for the package we would get a C++ compiler enabled.
+ # but we still need a C++ compiler even if we build all our cpp files as CUDA only
+ # because otherwise the C++ features don't work etc.
+ # This is just the rather odd way CMake does this, since CUDA doesn't imply C++ even
+ # though it is a C++ extension ... (but I guess it didn't use to be back in CUDA 4 or 5
+ # days.
+ set(KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE CXX)
+
+ set(KOKKOS_COMPILE_LANGUAGE CUDA)
+endif()
+# use lower case here since we haven't parsed options yet
+if(Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE AND Kokkos_ENABLE_HIP)
+
+ # Without this as a language for the package we would get a C++ compiler enabled.
+ # but we still need a C++ compiler even if we build all our cpp files as HIP only
+ # because otherwise the C++ features don't work etc.
+ set(KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE CXX)
+
+ set(KOKKOS_COMPILE_LANGUAGE HIP)
+endif()
+
+if(Spack_WORKAROUND)
+ if(Kokkos_ENABLE_COMPILE_AS_CMAKE_LANGUAGE)
+ message(FATAL_ERROR "Can't currently use Kokkos_ENABLE_COMPILER_AS_CMAKE_LANGUAGE in a spack installation!")
+ endif()
+
+ #if we are explicitly using Spack for development,
+ #nuke the Spack compiler
+ set(SPACK_CXX $ENV{SPACK_CXX})
+ if(SPACK_CXX)
+ set(CMAKE_CXX_COMPILER ${SPACK_CXX} CACHE STRING "the C++ compiler" FORCE)
+ set(ENV{CXX} ${SPACK_CXX})
+ endif()
+endif()
+# Always call the project command to define Kokkos_ variables
+# and to make sure that C++ is an enabled language
+project(Kokkos ${KOKKOS_COMPILE_LANGUAGE} ${KOKKOS_INTERNAL_EXTRA_COMPILE_LANGUAGE})
+if(NOT HAS_PARENT)
+ if(NOT CMAKE_BUILD_TYPE)
+ set(DEFAULT_BUILD_TYPE "RelWithDebInfo")
+ message(STATUS "Setting build type to '${DEFAULT_BUILD_TYPE}' as none was specified.")
+ set(CMAKE_BUILD_TYPE "${DEFAULT_BUILD_TYPE}"
+ CACHE STRING "Choose the type of build, options are: Debug, Release, RelWithDebInfo and MinSizeRel." FORCE
+ )
+ endif()
+endif()
+
+if(NOT CMAKE_SIZEOF_VOID_P)
+ string(FIND ${CMAKE_CXX_COMPILER} nvcc_wrapper FIND_IDX)
+ if(NOT FIND_IDX STREQUAL -1)
+ message(
+ FATAL_ERROR
+ "Kokkos did not configure correctly and failed to validate compiler. The most likely cause is CUDA linkage using nvcc_wrapper. Please ensure your CUDA environment is correctly configured."
+ )
+ else()
+ message(
+ FATAL_ERROR
+ "Kokkos did not configure correctly and failed to validate compiler. The most likely cause is linkage errors during CMake compiler validation. Please consult the CMake error log shown below for the exact error during compiler validation"
+ )
+ endif()
+elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
+ if(CMAKE_SIZEOF_VOID_P EQUAL 4)
+ message(WARNING "32-bit builds are experimental and not officially supported.")
+ set(KOKKOS_IMPL_32BIT ON)
+ else()
+ message(
+ FATAL_ERROR
+ "Kokkos assumes a 64-bit build, i.e., 8-byte pointers, but found ${CMAKE_SIZEOF_VOID_P}-byte pointers instead;"
+ )
+ endif()
+endif()
+
+set(Kokkos_VERSION_MAJOR 4)
+set(Kokkos_VERSION_MINOR 5)
+set(Kokkos_VERSION_PATCH 1)
+set(Kokkos_VERSION "${Kokkos_VERSION_MAJOR}.${Kokkos_VERSION_MINOR}.${Kokkos_VERSION_PATCH}")
+message(STATUS "Kokkos version: ${Kokkos_VERSION}")
+math(EXPR KOKKOS_VERSION "${Kokkos_VERSION_MAJOR} * 10000 + ${Kokkos_VERSION_MINOR} * 100 + ${Kokkos_VERSION_PATCH}")
+# mathematical expressions below are not stricly necessary but they eliminate
+# the rather aggravating leading 0 in the releases patch version number, and,
+# in some way, are a sanity check for our arithmetic
+math(EXPR KOKKOS_VERSION_MAJOR "${KOKKOS_VERSION} / 10000")
+math(EXPR KOKKOS_VERSION_MINOR "${KOKKOS_VERSION} / 100 % 100")
+math(EXPR KOKKOS_VERSION_PATCH "${KOKKOS_VERSION} % 100")
+
+# Load either the real TriBITS or a TriBITS wrapper
+# for certain utility functions that are universal (like GLOBAL_SET)
+include(${KOKKOS_SRC_PATH}/cmake/fake_tribits.cmake)
+
+if(Kokkos_ENABLE_CUDA)
+ # If we are building CUDA, we have tricked CMake because we declare a CXX project
+ # If the default C++ standard for a given compiler matches the requested
+ # standard, then CMake just omits the -std flag in later versions of CMake
+ # This breaks CUDA compilation (CUDA compiler can have a different default
+ # -std then the underlying host compiler by itself). Setting this variable
+ # forces CMake to always add the -std flag even if it thinks it doesn't need it
+ global_set(CMAKE_CXX_STANDARD_DEFAULT 98)
+endif()
+
+# These are the variables we will append to as we go
+# I really wish these were regular variables
+# but scoping issues can make it difficult
+global_set(KOKKOS_COMPILE_OPTIONS)
+global_set(KOKKOS_LINK_OPTIONS)
+global_set(KOKKOS_AMDGPU_OPTIONS)
+global_set(KOKKOS_CUDA_OPTIONS)
+global_set(KOKKOS_CUDAFE_OPTIONS)
+global_set(KOKKOS_XCOMPILER_OPTIONS)
+# We need to append text here for making sure TPLs
+# we import are available for an installed Kokkos
+global_set(KOKKOS_TPL_EXPORTS)
+# KOKKOS_DEPENDENCE is used by kokkos_launch_compiler
+global_set(KOKKOS_COMPILE_DEFINITIONS KOKKOS_DEPENDENCE)
+# MSVC never goes through kokkos_launch_compiler
+if(NOT MSVC)
+ global_append(KOKKOS_LINK_OPTIONS -DKOKKOS_DEPENDENCE)
+endif()
+
+include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/kokkos_configure_trilinos.cmake)
+
+if(Kokkos_ENABLE_TESTS)
+ find_package(GTest QUIET)
+endif()
+
+# Include a set of Kokkos-specific wrapper functions that
+# will either call raw CMake or TriBITS
+# These are functions like KOKKOS_INCLUDE_DIRECTORIES
+include(${KOKKOS_SRC_PATH}/cmake/kokkos_tribits.cmake)
+
+# Check the environment and set certain variables
+# to allow platform-specific checks
+include(${KOKKOS_SRC_PATH}/cmake/kokkos_check_env.cmake)
+
+include(${KOKKOS_SRC_PATH}/cmake/build_env_info.cmake)
+check_git_setup()
+
+# The build environment setup goes in the following steps
+# 1) Check all the enable options. This includes checking Kokkos_DEVICES
+# 2) Check the compiler ID (type and version)
+# 3) Check the CXX standard and select important CXX flags
+# 4) Check for any third-party libraries (TPLs) like hwloc
+# 5) Check if optimizing for a particular architecture and add arch-specific flags
+kokkos_setup_build_environment()
+
+# Finish off the build
+# 6) Recurse into subdirectories and configure individual libraries
+# 7) Export and install targets
+
+option(BUILD_SHARED_LIBS "Build shared libraries" OFF)
+
+set(KOKKOS_COMPONENT_LIBRARIES kokkoscore kokkoscontainers kokkosalgorithms kokkossimd)
+set_property(GLOBAL PROPERTY KOKKOS_INT_LIBRARIES kokkos ${KOKKOS_COMPONENT_LIBRARIES})
+
+if(HAS_PARENT)
+ set(KOKKOS_HEADER_DIR "include/kokkos")
+ set(KOKKOS_IS_SUBDIRECTORY TRUE)
+else()
+ set(KOKKOS_HEADER_DIR "${CMAKE_INSTALL_INCLUDEDIR}")
+ set(KOKKOS_IS_SUBDIRECTORY FALSE)
+endif()
+
+#------------------------------------------------------------------------------
+#
+# A) Forward declare the package so that certain options are also defined for
+# subpackages
+
+#------------------------------------------------------------------------------
+#
+# D) Process the subpackages (subdirectories) for Kokkos
+#
+kokkos_process_subpackages()
+
+#------------------------------------------------------------------------------
+#
+# E) If Kokkos itself is enabled, process the Kokkos package
+#
+
+kokkos_configure_core()
+
+if(NOT Kokkos_INSTALL_TESTING)
+ add_library(kokkos INTERFACE)
+ #Make sure in-tree projects can reference this as Kokkos::
+ #to match the installed target names
+ add_library(Kokkos::kokkos ALIAS kokkos)
+ # all_libs target is required for TriBITS-compliance
+ add_library(Kokkos::all_libs ALIAS kokkos)
+ target_link_libraries(kokkos INTERFACE ${KOKKOS_COMPONENT_LIBRARIES})
+ kokkos_internal_add_library_install(kokkos)
+endif()
+include(${KOKKOS_SRC_PATH}/cmake/kokkos_install.cmake)
+
+# nvcc_wrapper is Kokkos' wrapper for NVIDIA's NVCC CUDA compiler.
+# Kokkos needs nvcc_wrapper in order to build. Other libraries and
+# executables also need nvcc_wrapper. Thus, we need to install it.
+# If the argument of DESTINATION is a relative path, CMake computes it
+# as relative to ${CMAKE_INSTALL_PATH}.
+# KOKKOS_INSTALL_ADDITIONAL_FILES will install nvcc wrapper and other generated
+# files
+kokkos_install_additional_files()
+
+# Finally - if we are a subproject - make sure the enabled devices are visible
+if(HAS_PARENT)
+ foreach(DEV Kokkos_ENABLED_DEVICES)
+ #I would much rather not make these cache variables or global properties, but I can't
+ #make any guarantees on whether PARENT_SCOPE is good enough to make
+ #these variables visible where I need them
+ set(Kokkos_ENABLE_${DEV} ON PARENT_SCOPE)
+ set_property(GLOBAL PROPERTY Kokkos_ENABLE_${DEV} ON)
+ endforeach()
+endif()
--- /dev/null
+# Contributing to Kokkos
+
+## Pull Requests
+We actively welcome pull requests.
+1. Fork the repo and create your branch from `develop`.
+2. If you've added code that should be tested, add tests.
+3. If you've changed APIs, update the documentation.
+4. Ensure the test suite passes.
+
+Before sending your patch for review, please try to ensure that it is formatted properly. We use clang-format version 16 for this.
+
+## Issues
+We use GitHub issues to track public bugs. Please ensure your description is clear and has sufficient instructions to be able to reproduce the issue.
+
+## License
+By contributing to Kokkos, you agree that your contributions will be licensed under the LICENSE file in the root directory of this source tree.
--- /dev/null
+************************************************************************
+
+ Kokkos v. 4.0
+ Copyright (2022) National Technology & Engineering
+ Solutions of Sandia, LLC (NTESS).
+
+Under the terms of Contract DE-NA0003525 with NTESS,
+the U.S. Government retains certain rights in this software.
--- /dev/null
+ ==============================================================================
+ Kokkos is under the Apache License v2.0 with LLVM Exceptions:
+ ==============================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS Apache 2.0
+
+ ---- LLVM Exceptions to the Apache 2.0 License ----
+
+ As an exception, if, as a result of your compiling your source code, portions
+ of this Software are embedded into an Object form of such source code, you
+ may redistribute such embedded portions in such Object form without complying
+ with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+ In addition, if you combine or link compiled forms of this Software with
+ software that is licensed under the GPLv2 ("Combined Software") and if a
+ court of competent jurisdiction determines that the patent provision (Section
+ 3), the indemnity provision (Section 9) or other Section of the License
+ conflicts with the conditions of the GPLv2, you may retroactively and
+ prospectively choose to deem waived or otherwise exclude such Section(s) of
+ the License, but only in their entirety and only with respect to the Combined
+ Software.
+
+ ==============================================================================
+ Software from third parties included in Kokkos:
+ ==============================================================================
+
+ Kokkos contains third party software which is under different license
+ terms. All such code will be identified clearly using at least one of two
+ mechanisms:
+ 1) It will be in a separate directory tree with its own `LICENSE.txt` or
+ `LICENSE` file at the top containing the specific license and restrictions
+ which apply to that software, or
+ 2) It will contain specific license and restriction terms at the top of every
+ file.
+
+
+ THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ Questions? Contact:
+ Christian R. Trott (crtrott@sandia.gov) and
+ Damien T. Lebrun-Grandie (lebrungrandt@ornl.gov)
+
+ ************************************************************************
--- /dev/null
+[](https://kokkos.org)
+
+# Kokkos: Core Libraries
+
+Kokkos Core implements a programming model in C++ for writing performance portable
+applications targeting all major HPC platforms. For that purpose it provides
+abstractions for both parallel execution of code and data management.
+Kokkos is designed to target complex node architectures with N-level memory
+hierarchies and multiple types of execution resources. It currently can use
+CUDA, HIP, SYCL, HPX, OpenMP and C++ threads as backend programming models with several other
+backends in development.
+
+**Kokkos Core is part of the [Kokkos C++ Performance Portability Programming Ecosystem](https://kokkos.org/about/abstract/).**
+
+Kokkos is a [Linux Foundation](https://linuxfoundation.org) project.
+
+## Learning about Kokkos
+
+To start learning about Kokkos:
+
+- [Kokkos Lectures](https://kokkos.org/kokkos-core-wiki/videolectures.html): they contain a mix of lecture videos and hands-on exercises covering all the important capabilities.
+
+- [Programming guide](https://kokkos.org/kokkos-core-wiki/programmingguide.html): contains in "narrative" form a technical description of the programming model, machine model, and the main building blocks like the Views and parallel dispatch.
+
+- [API reference](https://kokkos.org/kokkos-core-wiki/): organized by category, i.e., [core](https://kokkos.org/kokkos-core-wiki/API/core-index.html), [algorithms](https://kokkos.org/kokkos-core-wiki/API/algorithms-index.html) and [containers](https://kokkos.org/kokkos-core-wiki/API/containers-index.html) or, if you prefer, in [alphabetical order](https://kokkos.org/kokkos-core-wiki/API/alphabetical.html).
+
+- [Use cases and Examples](https://kokkos.org/kokkos-core-wiki/usecases.html): a serie of examples ranging from how to use Kokkos with MPI to Fortran interoperability.
+
+## Obtaining Kokkos
+
+The latest release of Kokkos can be obtained from the [GitHub releases page](https://github.com/kokkos/kokkos/releases/latest).
+
+The current release is [4.5.01](https://github.com/kokkos/kokkos/releases/tag/4.5.01).
+
+```bash
+curl -OJ -L https://github.com/kokkos/kokkos/releases/download/4.5.01/kokkos-4.5.01.tar.gz
+# Or with wget
+wget https://github.com/kokkos/kokkos/releases/download/4.5.01/kokkos-4.5.01.tar.gz
+```
+
+To clone the latest development version of Kokkos from GitHub:
+
+```bash
+git clone -b develop https://github.com/kokkos/kokkos.git
+```
+
+### Building Kokkos
+
+To build Kokkos, you will need to have a C++ compiler that supports C++17 or later.
+All requirements including minimum and primary tested compiler versions can be found [here](https://kokkos.org/kokkos-core-wiki/requirements.html).
+
+Building and installation instructions are described [here](https://kokkos.org/kokkos-core-wiki/building.html).
+
+You can also install Kokkos using [Spack](https://spack.io/): `spack install kokkos`. [Available configuration options](https://packages.spack.io/package.html?name=kokkos) can be displayed using `spack info kokkos`.
+
+## For the complete documentation: [kokkos.org/kokkos-core-wiki/](https://kokkos.org/kokkos-core-wiki/)
+
+## Support
+
+For questions find us on Slack: https://kokkosteam.slack.com or open a GitHub issue.
+
+For non-public questions send an email to: *crtrott(at)sandia.gov*
+
+## Contributing
+
+Please see [this page](https://kokkos.org/kokkos-core-wiki/contributing.html) for details on how to contribute.
+
+## Citing Kokkos
+
+Please see the [following page](https://kokkos.org/kokkos-core-wiki/citation.html).
+
+## License
+
+[](https://spdx.org/licenses/LLVM-exception.html)
+
+Under the terms of Contract DE-NA0003525 with NTESS,
+the U.S. Government retains certain rights in this software.
+
+The full license statement used in all headers is available [here](https://kokkos.org/kokkos-core-wiki/license.html) or
+[here](https://github.com/kokkos/kokkos/blob/develop/LICENSE).
--- /dev/null
+# Reporting Security Issues
+
+To report a security issue, please email
+[lebrungrandt@ornl.gov](mailto:lebrungrandt@ornl.gov)
+and [crtrott@sandia.gov](mailto:crtrott@sandia.gov)
+with a description of the issue, the steps you took to create the issue,
+affected versions, and, if known, mitigations for the issue.
+
+Our vulnerability management team will respond within 5 working days of your
+email. If the issue is confirmed as a vulnerability, we will open a
+Security Advisory and acknowledge your contributions as part of it. This project
+follows a 90 day disclosure timeline.
--- /dev/null
+
+
+# Kokkos Spack
+
+This gives instructions for using Spack to install Kokkos and developing packages that depend on Kokkos.
+
+## Getting Started
+
+Make sure you have downloaded [Spack](https://github.com/spack/spack).
+The easiest way to configure the Spack environment is:
+````bash
+> source spack/share/spack/setup-env.sh
+````
+with other scripts available for other shells.
+You can display information about how to install packages with:
+````bash
+> spack info kokkos
+````
+This will print all the information about how to install Kokkos with Spack.
+For detailed instructions on how to use Spack, see the [User Manual](https://spack.readthedocs.io).
+
+## Setting Up Spack: Avoiding the Package Cascade
+By default, Spack doesn't 'see' anything on your system - including things like CMake and CUDA.
+This can be limited by adding a `packages.yaml` to your `$HOME/.spack` folder that includes CMake (and CUDA, if applicable). For example, your `packages.yaml` file could be:
+````yaml
+packages:
+ cuda:
+ buildable: false
+ externals:
+ - prefix: /opt/local/ppc64le-pwr8-nvidia/cuda/10.1.243
+ spec: cuda@10.1.243
+ - modules:
+ - cuda/10.1.243
+ spec: cuda@10.1.243
+ cmake:
+ buildable: false
+ externals:
+ - prefix: /opt/local/ppc64le/cmake/3.16.8
+ spec: cmake@3.16.8
+ - modules:
+ - cmake/3.16.8
+ spec: cmake@3.16.8
+````
+The `modules` entry is only necessary on systems that require loading Modules (i.e. most DOE systems).
+The `buildable` flag is useful to make sure Spack crashes if there is a path error,
+rather than having a type-o and Spack rebuilding everything because `cmake` isn't found.
+You can verify your environment is set up correctly by running `spack graph` or `spack spec`.
+For example:
+````bash
+> spack graph kokkos +cuda
+o kokkos
+|\
+o | cuda
+ /
+o cmake
+````
+Without the existing CUDA and CMake being identified in `packages.yaml`, a (subset!) of the output would be:
+````bash
+o kokkos
+|\
+| o cmake
+| |\
+| | | |\
+| | | | | |\
+| | | | | | | |\
+| | | | | | | | | |\
+| | | | | | | o | | | libarchive
+| | | | | | | |\ \ \ \
+| | | | | | | | | |\ \ \ \
+| | | | | | | | | | | | |_|/
+| | | | | | | | | | | |/| |
+| | | | | | | | | | | | | o curl
+| | |_|_|_|_|_|_|_|_|_|_|/|
+| |/| | | |_|_|_|_|_|_|_|/
+| | | | |/| | | | | | | |
+| | | | o | | | | | | | | openssl
+| |/| | | | | | | | | | |
+| | | | | | | | | | o | | libxml2
+| | |_|_|_|_|_|_|_|/| | |
+| | | | | | | | | | |\ \ \
+| o | | | | | | | | | | | | zlib
+| / / / / / / / / / / / /
+| o | | | | | | | | | | | xz
+| / / / / / / / / / / /
+| o | | | | | | | | | | rhash
+| / / / / / / / / / /
+| | | | o | | | | | | nettle
+| | | | |\ \ \ \ \ \ \
+| | | o | | | | | | | | libuv
+| | | | o | | | | | | | autoconf
+| | |_|/| | | | | | | |
+| | | | |/ / / / / / /
+| o | | | | | | | | | perl
+| o | | | | | | | | | gdbm
+| o | | | | | | | | | readline
+````
+
+## Configuring Kokkos as a Project Dependency
+Say you have a project "SuperScience" which needs to use Kokkos.
+In your `package.py` file, you would generally include something like:
+````python
+class SuperScience(CMakePackage):
+ ...
+ depends_on("kokkos")
+````
+Often projects want to tweak behavior when using certain features, e.g.
+````python
+ depends_on("kokkos+cuda", when="+cuda")
+````
+if your project needs CUDA-specific logic to configure and build.
+This illustrates the general principle in Spack of "flowing-up".
+A user requests a feature in the final app:
+````bash
+> spack install superscience+cuda
+````
+This flows upstream to the Kokkos dependency, causing the `kokkos+cuda` variant to build.
+The downstream app (SuperScience) tells the upstream app (Kokkos) how to build.
+
+Because Kokkos is a performance portability library, it somewhat inverts this principle.
+Kokkos "flows-down", telling your application how best to configure for performance.
+Rather than a downstream app (SuperScience) telling the upstream (Kokkos) what variants to build,
+a pre-built Kokkos should be telling the downstream app SuperScience what variants to use.
+Kokkos works best when there is an "expert" configuration installed on your system.
+Your build should simply request `-DKokkos_ROOT=<BEST_KOKKOS_FOR_MY_SYSTEM>` and configure appropriately based on the Kokkos it finds.
+
+Kokkos has many, many build variants.
+Where possible, projects should only depend on a general Kokkos, not specific variants.
+We recommend instead adding for each system you build on a Kokkos configuration to your `packages.yaml` file (usually found in `~/.spack` for specific users).
+For a Xeon + Volta system, this could look like:
+````yaml
+ kokkos:
+ variants: +cuda +openmp +cuda_lambda +wrapper ^cuda@10.1 cuda_arch=70
+ compiler: [gcc@7.2.0]
+````
+which gives the "best" Kokkos configuration as CUDA+OpenMP optimized for a Volta 70 architecture using CUDA 10.1.
+It also enables support for CUDA Lambdas.
+The `+wrapper` option tells Kokkos to build with the special `nvcc_wrapper` (more below).
+Note here that we use the built-in `cuda_arch` variant of Spack to specify the archicture.
+For a Haswell system, we use
+````yaml
+ kokkos:
+ variants: +openmp std=14 target=haswell
+ compiler: [intel@18]
+````
+which uses the built-in microarchitecture variants of Spack.
+Consult the Spack documentation for more details of Spack microarchitectures
+and CUDA architectures.
+Spack does not currently provide an AMD GPU microarchitecture option.
+If building for HIP or an AMD GPU, Kokkos provides an `amd_gpu_arch` similar to `cuda_arch`.
+````yaml
+ kokkos:
+ variants: +hip amd_gpu_arch=vega900
+````
+
+Without an optimal default in your `packages.yaml` file, it is highly likely that the default Kokkos configuration you get will not be what you want.
+For example, CUDA is not enabled by default (there is no easy logic to conditionally activate this for CUDA-enabled systems).
+If you don't specify a CUDA build variant in a `packages.yaml` and you build your Kokkos-dependent project:
+````bash
+> spack install superscience
+````
+you may end up just getting the default Kokkos (i.e. Serial).
+Before running `spack install <package>` we recommend running `spack spec <package>` to confirm your dependency tree is correct.
+For example, with Kokkos Kernels:
+````bash
+kokkos-kernels@3.0%gcc@8.3.0~blas build_type=RelWithDebInfo ~cblas~complex_double~complex_float~cublas~cuda cuda_arch=none ~cusparse~diy+double execspace_cuda=auto execspace_openmp=auto execspace_serial=auto execspace_threads=auto ~float~lapack~lapacke+layoutleft~layoutright memspace_cudaspace=auto memspace_cudauvmspace=auto +memspace_hostspace~mkl+offset_int+offset_size_t~openmp+ordinal_int~ordinal_int64_t~serial~superlu arch=linux-rhel7-skylake_avx512
+ ^cmake@3.16.2%gcc@8.3.0~doc+ncurses+openssl+ownlibs~qt arch=linux-rhel7-skylake_avx512
+ ^kokkos@3.0%gcc@8.3.0~aggressive_vectorization~amdavx~armv80~armv81~armv8_thunderx~armv8_tx2~bdw~bgq build_type=RelWithDebInfo ~carrizo~compiler_warnings+cuda cuda_arch=none +cuda_lambda~cuda_ldg_intrinsic~cuda_relocatable_device_code~cuda_uvm~debug~debug_bounds_check~debug_dualview_modify_check~deprecated_code~diy~epyc~examples~explicit_instantiation~fiji~gfx901~hpx~hpx_async_dispatch~hsw~hwloc~kaveri~kepler30~kepler32~kepler35~kepler37~knc~knl~maxwell50~maxwell52~maxwell53~memkind~numactl+openmp~pascal60~pascal61~power7~power8~power9+profiling~profiling_load_print~pthread~qthread~rocm~ryzen~serial~skx~snb std=14 ~tests~turing75~vega+volta70~volta72+wrapper~wsm arch=linux-rhel7-skylake_avx512
+ ^cuda@10.1%gcc@8.3.0 arch=linux-rhel7-skylake_avx512
+ ^kokkos-nvcc-wrapper@old%gcc@8.3.0 build_type=RelWithDebInfo +mpi arch=linux-rhel7-skylake_avx512
+ ^openmpi@4.0.2%gcc@8.3.0~cuda+cxx_exceptions fabrics=none ~java~legacylaunchers~memchecker patches=073477a76bba780c67c36e959cd3ee6910743e2735c7e76850ffba6791d498e4 ~pmi schedulers=none ~sqlite3~thread_multiple+vt arch=linux-rhel7-skylake_avx512
+````
+The output can be very verbose, but we can verify the expected `kokkos`:
+````bash
+kokkos@3.0%gcc@8.3.0~aggressive_vectorization~amdavx~armv80~armv81~armv8_thunderx~armv8_tx2~bdw~bgq build_type=RelWithDebInfo ~carrizo~compiler_warnings+cuda cuda_arch=none +cuda_lambda~cuda_ldg_intrinsic~cuda_relocatable_device_code~cuda_uvm~debug~debug_bounds_check~debug_dualview_modify_check~deprecated_code~diy~epyc~examples~explicit_instantiation~fiji~gfx901~hpx~hpx_async_dispatch~hsw~hwloc~kaveri~kepler30~kepler32~kepler35~kepler37~knc~knl~maxwell50~maxwell52~maxwell53~memkind~numactl+openmp~pascal60~pascal61~power7~power8~power9+profiling~profiling_load_print~pthread~qthread~rocm~ryzen~serial~skx~snb std=11 ~tests~turing75~vega+volta70~volta72+wrapper~wsm arch=linux-rhel7-skylake_avx512
+````
+We see that we do have `+volta70` and `+wrapper`, e.g.
+
+### Spack Environments
+The encouraged way to use Spack is with Spack environments ([more details here](https://spack-tutorial.readthedocs.io/en/latest/tutorial_environments.html#dealing-with-many-specs-at-once)).
+Rather than installing packages one-at-a-time, you add packages to an environment.
+After adding all packages, you concretize and install them all.
+Using environments, one can explicitly add a desired Kokkos for the environment, e.g.
+````bash
+> spack add kokkos +cuda +cuda_lambda +volta70
+> spack add my_project +my_variant
+> ...
+> spack install
+````
+All packages within the environment will build against the CUDA-enabled Kokkos,
+even if they only request a default Kokkos.
+
+## NVCC Wrapper
+Kokkos is a C++ project, but often builds for the CUDA backend.
+This is particularly problematic with CMake. At this point, `nvcc` does not accept all the flags that normally get passed to a C++ compiler.
+Kokkos provides `nvcc_wrapper` that identifies correctly as a C++ compiler to CMake and accepts C++ flags, but uses `nvcc` as the underlying compiler.
+`nvcc` itself also uses an underlying host compiler, e.g. GCC.
+
+In Spack, the underlying host compiler is specified as below, e.g.:
+````bash
+> spack install package %gcc@8.0.0
+````
+This is still valid for Kokkos. To use the special wrapper for CUDA builds, request a desired compiler and simply add the `+wrapper` variant.
+````bash
+> spack install kokkos +cuda +wrapper %gcc@7.2.0
+````
+Downstream projects depending on Kokkos need to override their compiler.
+Kokkos provides the compiler in a `kokkos_cxx` variable,
+which points to either `nvcc_wrapper` when needed or the regular compiler otherwise.
+Spack projects already do this to use MPI compiler wrappers.
+````python
+def cmake_args(self):
+ options = []
+ ...
+ options.append("-DCMAKE_CXX_COMPILER=%s" % self.spec["kokkos"].kokkos_cxx)
+ ...
+ return options
+````
+Note: `nvcc_wrapper` works with the MPI compiler wrappers.
+If building your project with MPI, do NOT set your compiler to `nvcc_wrapper`.
+Instead set your compiler to `mpicxx` and `nvcc_wrapper` will be used under the hood.
+````python
+def cmake_args(self):
+ options = []
+ ...
+ options.append("-DCMAKE_CXX_COMPILER=%s" % self.spec["mpi"].mpicxx)
+ ...
+ return options
+````
+To accomplish this, `nvcc_wrapper` must depend on MPI (even though it uses no MPI).
+This has the unfortunate consequence that Kokkos CUDA projects not using MPI will implicitly depend on MPI anyway.
+This behavior is necessary for now, but will hopefully be removed later.
+When using environments, if MPI is not needed, you can remove the MPI dependency with:
+````bash
+> spack add kokkos-nvcc-wrapper ~mpi
+````
+
+## Developing With Spack
+
+Spack has historically been much more suited to *deployment* of mature packages than active testing or developing.
+However, recent features have improved support for development.
+Future releases are likely to make this even easier and incorporate Git integration.
+The most common commands will do a full build and install of the packages.
+If doing development, you may wish to merely set up a build environment.
+This allows you to modify the source and re-build.
+In this case, you can stop after configuring.
+Suppose you have Kokkos checkout in the folder `kokkos-src`:
+````bash
+> spack dev-build -d kokkos-src -u cmake kokkos@develop +wrapper +openmp
+````
+This sets up a development environment for you in `kokkos-src` which you can use (Bash example shown):
+Note: Always specify `develop` as the version when doing `dev-build`, except in rare cases.
+You are usually developing a feature branch that will merge into `develop`,
+hence you are making a new `develop` branch.
+
+````bash
+> cd kokko-src
+> source spack-build-env.txt
+> cd spack-build
+> make
+````
+Before sourcing the Spack development environment, you may wish to save your current environment:
+````bash
+> declare -px > myenv.sh
+````
+When done with Spack, you can then restore your original environment:
+````bash
+> source myenv.sh
+````
--- /dev/null
+if(NOT Kokkos_INSTALL_TESTING)
+ add_subdirectory(src)
+endif()
+# FIXME_OPENACC: temporarily disabled due to unimplemented features
+if(NOT ((KOKKOS_ENABLE_OPENMPTARGET AND KOKKOS_CXX_COMPILER_ID STREQUAL NVHPC) OR KOKKOS_ENABLE_OPENACC))
+ kokkos_add_test_directories(unit_tests)
+endif()
--- /dev/null
+#I have to leave these here for tribits
+kokkos_include_directories(${CMAKE_CURRENT_BINARY_DIR})
+kokkos_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+#-----------------------------------------------------------------------------
+
+file(GLOB ALGO_HEADERS *.hpp)
+file(GLOB ALGO_SOURCES *.cpp)
+append_glob(ALGO_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/std_algorithms/*.hpp)
+append_glob(ALGO_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/std_algorithms/impl/*.hpp)
+
+install(
+ DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/"
+ DESTINATION ${KOKKOS_HEADER_DIR}
+ FILES_MATCHING
+ PATTERN "*.hpp"
+)
+
+#-----------------------------------------------------------------------------
+
+# We have to pass the sources in here for Tribits
+# These will get ignored for standalone CMake and a true interface library made
+kokkos_add_interface_library(kokkosalgorithms NOINSTALLHEADERS ${ALGO_HEADERS} SOURCES ${ALGO_SOURCES})
+kokkos_lib_include_directories(
+ kokkosalgorithms ${KOKKOS_TOP_BUILD_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
+)
+
+kokkos_link_tpl(kokkoscontainers PUBLIC ROCTHRUST)
+kokkos_link_tpl(kokkoscore PUBLIC ONEDPL)
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+void KOKKOS_ALGORITHMS_SRC_DUMMY_PREVENT_LINK_ERROR() {}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_NESTED_SORT_HPP_
+#define KOKKOS_NESTED_SORT_HPP_
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NESTED_SORT
+#endif
+
+#include "sorting/Kokkos_NestedSortPublicAPI.hpp"
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NESTED_SORT
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NESTED_SORT
+#endif
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_RANDOM_HPP
#define KOKKOS_RANDOM_HPP
#endif
#ifdef KOKKOS_ENABLE_HIP
template <>
-struct Random_XorShift1024_UseCArrayState<Kokkos::Experimental::HIP>
- : std::false_type {};
+struct Random_XorShift1024_UseCArrayState<Kokkos::HIP> : std::false_type {};
#endif
#ifdef KOKKOS_ENABLE_OPENMPTARGET
template <>
struct Random_UniqueIndex {
using locks_view_type = View<int**, DeviceType>;
KOKKOS_FUNCTION
- static int get_state_idx(const locks_view_type) {
+ static int get_state_idx(const locks_view_type&) {
KOKKOS_IF_ON_HOST(
(return DeviceType::execution_space::impl_hardware_thread_id();))
#if defined(KOKKOS_ENABLE_CUDA)
#define KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP Kokkos::Cuda
#elif defined(KOKKOS_ENABLE_HIP)
-#define KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP Kokkos::Experimental::HIP
+#define KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP Kokkos::HIP
#endif
template <class MemorySpace>
#ifdef KOKKOS_ENABLE_SYCL
template <class MemorySpace>
-struct Random_UniqueIndex<
- Kokkos::Device<Kokkos::Experimental::SYCL, MemorySpace>> {
+struct Random_UniqueIndex<Kokkos::Device<Kokkos::SYCL, MemorySpace>> {
using locks_view_type =
- View<int**, Kokkos::Device<Kokkos::Experimental::SYCL, MemorySpace>>;
+ View<int**, Kokkos::Device<Kokkos::SYCL, MemorySpace>>;
KOKKOS_FUNCTION
static int get_state_idx(const locks_view_type& locks_) {
auto item = sycl::ext::oneapi::experimental::this_nd_item<3>();
std::size_t threadIdx[3] = {item.get_local_id(2), item.get_local_id(1),
item.get_local_id(0)};
std::size_t blockIdx[3] = {item.get_group(2), item.get_group(1),
- item.get_group(0)};
+ item.get_group(0)};
std::size_t blockDim[3] = {item.get_local_range(2), item.get_local_range(1),
item.get_local_range(0)};
std::size_t gridDim[3] = {
return drand(end - start) + start;
}
- // Marsaglia polar method for drawing a standard normal distributed random
+ // Box-muller method for drawing a standard normal distributed random
// number
KOKKOS_INLINE_FUNCTION
double normal() {
- double S = 2.0;
- double U;
- while (S >= 1.0) {
- U = 2.0 * drand() - 1.0;
- const double V = 2.0 * drand() - 1.0;
- S = U * U + V * V;
- }
- return U * std::sqrt(-2.0 * std::log(S) / S);
+ constexpr auto two_pi = 2 * Kokkos::numbers::pi_v<double>;
+
+ const double u = drand();
+ const double v = drand();
+ const double r = Kokkos::sqrt(-2.0 * Kokkos::log(u));
+ const double theta = v * two_pi;
+ return r * Kokkos::cos(theta);
}
KOKKOS_INLINE_FUNCTION
using execution_space = typename device_type::execution_space;
using locks_type = View<int**, device_type>;
using state_data_type = View<uint64_t**, device_type>;
- locks_type locks_;
- state_data_type state_;
- int num_states_;
- int padding_;
+
+ locks_type locks_ = {};
+ state_data_type state_ = {};
+ int num_states_ = {};
+ int padding_ = {};
public:
using generator_type = Random_XorShift64<DeviceType>;
- KOKKOS_INLINE_FUNCTION
- Random_XorShift64_Pool() {
- num_states_ = 0;
- padding_ = 0;
- }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEFAULTED_FUNCTION Random_XorShift64_Pool() = default;
+
+ KOKKOS_DEFAULTED_FUNCTION Random_XorShift64_Pool(
+ Random_XorShift64_Pool const&) = default;
+
+ KOKKOS_DEFAULTED_FUNCTION Random_XorShift64_Pool& operator=(
+ Random_XorShift64_Pool const&) = default;
+#else
+ Random_XorShift64_Pool() = default;
+#endif
Random_XorShift64_Pool(uint64_t seed) {
num_states_ = 0;
init(seed, execution_space().concurrency());
}
- KOKKOS_INLINE_FUNCTION
- Random_XorShift64_Pool(const Random_XorShift64_Pool& src)
- : locks_(src.locks_), state_(src.state_), num_states_(src.num_states_) {}
-
- KOKKOS_INLINE_FUNCTION
- Random_XorShift64_Pool operator=(const Random_XorShift64_Pool& src) {
- locks_ = src.locks_;
- state_ = src.state_;
- num_states_ = src.num_states_;
- padding_ = src.padding_;
- return *this;
- }
-
void init(uint64_t seed, int num_states) {
if (seed == 0) seed = uint64_t(1318319);
// I only want to pad on CPU like archs (less than 1000 threads). 64 is a
deep_copy(locks_, h_lock);
}
- KOKKOS_INLINE_FUNCTION
- Random_XorShift64<DeviceType> get_state() const {
+ KOKKOS_INLINE_FUNCTION Random_XorShift64<DeviceType> get_state() const {
+ KOKKOS_EXPECTS(num_states_ > 0);
const int i = Impl::Random_UniqueIndex<device_type>::get_state_idx(locks_);
return Random_XorShift64<DeviceType>(state_(i, 0), i);
}
KOKKOS_INLINE_FUNCTION
void free_state(const Random_XorShift64<DeviceType>& state) const {
state_(state.state_idx_, 0) = state.state_;
+ // Release the lock only after the state has been updated in memory
+ Kokkos::memory_fence();
locks_(state.state_idx_, 0) = 0;
}
};
return drand(end - start) + start;
}
- // Marsaglia polar method for drawing a standard normal distributed random
+ // Box-muller method for drawing a standard normal distributed random
// number
KOKKOS_INLINE_FUNCTION
double normal() {
- double S = 2.0;
- double U;
- while (S >= 1.0) {
- U = 2.0 * drand() - 1.0;
- const double V = 2.0 * drand() - 1.0;
- S = U * U + V * V;
- }
- return U * std::sqrt(-2.0 * std::log(S) / S);
+ constexpr auto two_pi = 2 * Kokkos::numbers::pi_v<double>;
+
+ const double u = drand();
+ const double v = drand();
+ const double r = Kokkos::sqrt(-2.0 * Kokkos::log(u));
+ const double theta = v * two_pi;
+ return r * Kokkos::cos(theta);
}
KOKKOS_INLINE_FUNCTION
using execution_space = typename device_type::execution_space;
using locks_type = View<int**, device_type>;
using int_view_type = View<int**, device_type>;
- using state_data_type = View<uint64_t * [16], device_type>;
+ using state_data_type = View<uint64_t* [16], device_type>;
- locks_type locks_;
- state_data_type state_;
- int_view_type p_;
- int num_states_;
- int padding_;
+ locks_type locks_ = {};
+ state_data_type state_ = {};
+ int_view_type p_ = {};
+ int num_states_ = {};
+ int padding_ = {};
friend class Random_XorShift1024<DeviceType>;
public:
using generator_type = Random_XorShift1024<DeviceType>;
- KOKKOS_INLINE_FUNCTION
- Random_XorShift1024_Pool() { num_states_ = 0; }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEFAULTED_FUNCTION Random_XorShift1024_Pool() = default;
- inline Random_XorShift1024_Pool(uint64_t seed) {
- num_states_ = 0;
+ KOKKOS_DEFAULTED_FUNCTION Random_XorShift1024_Pool(
+ Random_XorShift1024_Pool const&) = default;
- init(seed, execution_space().concurrency());
- }
+ KOKKOS_DEFAULTED_FUNCTION Random_XorShift1024_Pool& operator=(
+ Random_XorShift1024_Pool const&) = default;
+#else
+ Random_XorShift1024_Pool() = default;
+#endif
- KOKKOS_INLINE_FUNCTION
- Random_XorShift1024_Pool(const Random_XorShift1024_Pool& src)
- : locks_(src.locks_),
- state_(src.state_),
- p_(src.p_),
- num_states_(src.num_states_) {}
+ Random_XorShift1024_Pool(uint64_t seed) {
+ num_states_ = 0;
- KOKKOS_INLINE_FUNCTION
- Random_XorShift1024_Pool operator=(const Random_XorShift1024_Pool& src) {
- locks_ = src.locks_;
- state_ = src.state_;
- p_ = src.p_;
- num_states_ = src.num_states_;
- padding_ = src.padding_;
- return *this;
+ init(seed, execution_space().concurrency());
}
- inline void init(uint64_t seed, int num_states) {
+ void init(uint64_t seed, int num_states) {
if (seed == 0) seed = uint64_t(1318319);
// I only want to pad on CPU like archs (less than 1000 threads). 64 is a
// magic number, or random number I just wanted something not too large and
KOKKOS_INLINE_FUNCTION
Random_XorShift1024<DeviceType> get_state() const {
+ KOKKOS_EXPECTS(num_states_ > 0);
const int i = Impl::Random_UniqueIndex<device_type>::get_state_idx(locks_);
return Random_XorShift1024<DeviceType>(state_, p_(i, 0), i);
};
KOKKOS_INLINE_FUNCTION
void free_state(const Random_XorShift1024<DeviceType>& state) const {
for (int i = 0; i < 16; i++) state_(state.state_idx_, i) = state.state_[i];
- p_(state.state_idx_, 0) = state.p_;
+ p_(state.state_idx_, 0) = state.p_;
+ // Release the lock only after the state has been updated in memory
+ Kokkos::memory_fence();
locks_(state.state_idx_, 0) = 0;
}
};
"Kokkos::fill_random",
Kokkos::RangePolicy<ExecutionSpace>(exec, 0, (LDA + 127) / 128),
Impl::fill_random_functor_begin_end<ViewType, RandomPool, 128,
- ViewType::Rank, IndexType>(
+ ViewType::rank, IndexType>(
a, g, begin, end));
}
void fill_random(ViewType a, RandomPool g,
typename ViewType::const_value_type begin,
typename ViewType::const_value_type end) {
- fill_random(typename ViewType::execution_space{}, a, g, begin, end);
+ Kokkos::fence(
+ "fill_random: fence before since no execution space instance provided");
+ typename ViewType::execution_space exec;
+ fill_random(exec, a, g, begin, end);
+ exec.fence(
+ "fill_random: fence after since no execution space instance provided");
}
template <class ViewType, class RandomPool, class IndexType = int64_t>
void fill_random(ViewType a, RandomPool g,
typename ViewType::const_value_type range) {
- fill_random(typename ViewType::execution_space{}, a, g, 0, range);
+ Kokkos::fence(
+ "fill_random: fence before since no execution space instance provided");
+ typename ViewType::execution_space exec;
+ fill_random(exec, a, g, 0, range);
+ exec.fence(
+ "fill_random: fence after since no execution space instance provided");
}
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SORT_HPP_
+#define KOKKOS_SORT_HPP_
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
+#endif
+
+#include "sorting/Kokkos_BinSortPublicAPI.hpp"
+#include "sorting/Kokkos_SortPublicAPI.hpp"
+#include "sorting/Kokkos_SortByKeyPublicAPI.hpp"
+#include "sorting/Kokkos_NestedSortPublicAPI.hpp"
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
+#endif
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_HPP
#define KOKKOS_STD_ALGORITHMS_HPP
// following the std classification.
// modifying ops
-#include "std_algorithms/Kokkos_Swap.hpp"
#include "std_algorithms/Kokkos_IterSwap.hpp"
// non-modifying sequence
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_BIN_OPS_PUBLIC_API_HPP_
+#define KOKKOS_BIN_OPS_PUBLIC_API_HPP_
+
+#include <Kokkos_Macros.hpp>
+#include <type_traits>
+
+namespace Kokkos {
+
+template <class KeyViewType>
+struct BinOp1D {
+ int max_bins_ = {};
+ double mul_ = {};
+ double min_ = {};
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED BinOp1D() = default;
+#else
+ BinOp1D() = delete;
+#endif
+
+ // Construct BinOp with number of bins, minimum value and maximum value
+ BinOp1D(int max_bins, typename KeyViewType::const_value_type min,
+ typename KeyViewType::const_value_type max)
+ : max_bins_(max_bins + 1),
+ // Cast to double to avoid possible overflow when using integer
+ mul_(static_cast<double>(max_bins) /
+ (static_cast<double>(max) - static_cast<double>(min))),
+ min_(static_cast<double>(min)) {
+ // For integral types the number of bins may be larger than the range
+ // in which case we can exactly have one unique value per bin
+ // and then don't need to sort bins.
+ if (std::is_integral<typename KeyViewType::const_value_type>::value &&
+ (static_cast<double>(max) - static_cast<double>(min)) <=
+ static_cast<double>(max_bins)) {
+ mul_ = 1.;
+ }
+ }
+
+ // Determine bin index from key value
+ template <class ViewType>
+ KOKKOS_INLINE_FUNCTION int bin(ViewType& keys, const int& i) const {
+ return static_cast<int>(mul_ * (static_cast<double>(keys(i)) - min_));
+ }
+
+ // Return maximum bin index + 1
+ KOKKOS_INLINE_FUNCTION
+ int max_bins() const { return max_bins_; }
+
+ // Compare to keys within a bin if true new_val will be put before old_val
+ template <class ViewType, typename iType1, typename iType2>
+ KOKKOS_INLINE_FUNCTION bool operator()(ViewType& keys, iType1& i1,
+ iType2& i2) const {
+ return keys(i1) < keys(i2);
+ }
+};
+
+template <class KeyViewType>
+struct BinOp3D {
+ int max_bins_[3] = {};
+ double mul_[3] = {};
+ double min_[3] = {};
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED BinOp3D() = default;
+#else
+ BinOp3D() = delete;
+#endif
+
+ BinOp3D(int max_bins[], typename KeyViewType::const_value_type min[],
+ typename KeyViewType::const_value_type max[]) {
+ max_bins_[0] = max_bins[0];
+ max_bins_[1] = max_bins[1];
+ max_bins_[2] = max_bins[2];
+ mul_[0] = static_cast<double>(max_bins[0]) /
+ (static_cast<double>(max[0]) - static_cast<double>(min[0]));
+ mul_[1] = static_cast<double>(max_bins[1]) /
+ (static_cast<double>(max[1]) - static_cast<double>(min[1]));
+ mul_[2] = static_cast<double>(max_bins[2]) /
+ (static_cast<double>(max[2]) - static_cast<double>(min[2]));
+ min_[0] = static_cast<double>(min[0]);
+ min_[1] = static_cast<double>(min[1]);
+ min_[2] = static_cast<double>(min[2]);
+ }
+
+ template <class ViewType>
+ KOKKOS_INLINE_FUNCTION int bin(ViewType& keys, const int& i) const {
+ return int((((int(mul_[0] * (keys(i, 0) - min_[0])) * max_bins_[1]) +
+ int(mul_[1] * (keys(i, 1) - min_[1]))) *
+ max_bins_[2]) +
+ int(mul_[2] * (keys(i, 2) - min_[2])));
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ int max_bins() const { return max_bins_[0] * max_bins_[1] * max_bins_[2]; }
+
+ template <class ViewType, typename iType1, typename iType2>
+ KOKKOS_INLINE_FUNCTION bool operator()(ViewType& keys, iType1& i1,
+ iType2& i2) const {
+ if (keys(i1, 0) > keys(i2, 0))
+ return true;
+ else if (keys(i1, 0) == keys(i2, 0)) {
+ if (keys(i1, 1) > keys(i2, 1))
+ return true;
+ else if (keys(i1, 1) == keys(i2, 1)) {
+ if (keys(i1, 2) > keys(i2, 2)) return true;
+ }
+ }
+ return false;
+ }
+};
+
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_BIN_SORT_PUBLIC_API_HPP_
+#define KOKKOS_BIN_SORT_PUBLIC_API_HPP_
+
+#include "Kokkos_BinOpsPublicAPI.hpp"
+#include "impl/Kokkos_CopyOpsForBinSortImpl.hpp"
+#include <Kokkos_Core.hpp>
+#include <algorithm>
+
+namespace Kokkos {
+
+template <class KeyViewType, class BinSortOp,
+ class Space = typename KeyViewType::device_type,
+ class SizeType = typename KeyViewType::memory_space::size_type>
+class BinSort {
+ public:
+ template <class DstViewType, class SrcViewType>
+ struct copy_functor {
+ using src_view_type = typename SrcViewType::const_type;
+
+ using copy_op = Impl::CopyOp<DstViewType, src_view_type>;
+
+ DstViewType dst_values;
+ src_view_type src_values;
+ int dst_offset;
+
+ copy_functor(DstViewType const& dst_values_, int const& dst_offset_,
+ SrcViewType const& src_values_)
+ : dst_values(dst_values_),
+ src_values(src_values_),
+ dst_offset(dst_offset_) {}
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const int& i) const {
+ copy_op::copy(dst_values, i + dst_offset, src_values, i);
+ }
+ };
+
+ template <class DstViewType, class PermuteViewType, class SrcViewType>
+ struct copy_permute_functor {
+ // If a Kokkos::View then can generate constant random access
+ // otherwise can only use the constant type.
+
+ using src_view_type = std::conditional_t<
+ Kokkos::is_view<SrcViewType>::value,
+ Kokkos::View<typename SrcViewType::const_data_type,
+ typename SrcViewType::array_layout,
+ typename SrcViewType::device_type
+#if !defined(KOKKOS_COMPILER_NVHPC) || (KOKKOS_COMPILER_NVHPC >= 230700)
+ ,
+ Kokkos::MemoryTraits<Kokkos::RandomAccess>
+#endif
+ >,
+ typename SrcViewType::const_type>;
+
+ using perm_view_type = typename PermuteViewType::const_type;
+
+ using copy_op = Impl::CopyOp<DstViewType, src_view_type>;
+
+ DstViewType dst_values;
+ perm_view_type sort_order;
+ src_view_type src_values;
+ int src_offset;
+
+ copy_permute_functor(DstViewType const& dst_values_,
+ PermuteViewType const& sort_order_,
+ SrcViewType const& src_values_, int const& src_offset_)
+ : dst_values(dst_values_),
+ sort_order(sort_order_),
+ src_values(src_values_),
+ src_offset(src_offset_) {}
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const int& i) const {
+ copy_op::copy(dst_values, i, src_values, src_offset + sort_order(i));
+ }
+ };
+
+ // Naming this alias "execution_space" would be problematic since it would be
+ // considered as execution space for the various functors which might use
+ // another execution space through sort() or create_permute_vector().
+ using exec_space = typename Space::execution_space;
+ using bin_op_type = BinSortOp;
+
+ struct bin_count_tag {};
+ struct bin_offset_tag {};
+ struct bin_binning_tag {};
+ struct bin_sort_bins_tag {};
+
+ public:
+ using size_type = SizeType;
+ using value_type = size_type;
+
+ using offset_type = Kokkos::View<size_type*, Space>;
+ using bin_count_type = Kokkos::View<const int*, Space>;
+
+ using const_key_view_type = typename KeyViewType::const_type;
+
+ // If a Kokkos::View then can generate constant random access
+ // otherwise can only use the constant type.
+
+ using const_rnd_key_view_type = std::conditional_t<
+ Kokkos::is_view<KeyViewType>::value,
+ Kokkos::View<typename KeyViewType::const_data_type,
+ typename KeyViewType::array_layout,
+ typename KeyViewType::device_type,
+ Kokkos::MemoryTraits<Kokkos::RandomAccess> >,
+ const_key_view_type>;
+
+ using non_const_key_scalar = typename KeyViewType::non_const_value_type;
+ using const_key_scalar = typename KeyViewType::const_value_type;
+
+ using bin_count_atomic_type =
+ Kokkos::View<int*, Space, Kokkos::MemoryTraits<Kokkos::Atomic> >;
+
+ private:
+ const_key_view_type keys;
+ const_rnd_key_view_type keys_rnd;
+
+ public:
+ BinSortOp bin_op;
+ offset_type bin_offsets;
+ bin_count_atomic_type bin_count_atomic;
+ bin_count_type bin_count_const;
+ offset_type sort_order;
+
+ int range_begin;
+ int range_end;
+ bool sort_within_bins;
+
+ public:
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED BinSort() = default;
+#else
+ BinSort() = delete;
+#endif
+
+ //----------------------------------------
+ // Constructor: takes the keys, the binning_operator and optionally whether to
+ // sort within bins (default false)
+ template <typename ExecutionSpace>
+ BinSort(const ExecutionSpace& exec, const_key_view_type keys_,
+ int range_begin_, int range_end_, BinSortOp bin_op_,
+ bool sort_within_bins_ = false)
+ : keys(keys_),
+ keys_rnd(keys_),
+ bin_op(bin_op_),
+ bin_offsets(),
+ bin_count_atomic(),
+ bin_count_const(),
+ sort_order(),
+ range_begin(range_begin_),
+ range_end(range_end_),
+ sort_within_bins(sort_within_bins_) {
+ static_assert(
+ Kokkos::SpaceAccessibility<ExecutionSpace,
+ typename Space::memory_space>::accessible,
+ "The provided execution space must be able to access the memory space "
+ "BinSort was initialized with!");
+ if (bin_op.max_bins() <= 0)
+ Kokkos::abort(
+ "The number of bins in the BinSortOp object must be greater than 0!");
+ bin_count_atomic = Kokkos::View<int*, Space>(
+ "Kokkos::SortImpl::BinSortFunctor::bin_count", bin_op.max_bins());
+ bin_count_const = bin_count_atomic;
+ bin_offsets =
+ offset_type(view_alloc(exec, WithoutInitializing,
+ "Kokkos::SortImpl::BinSortFunctor::bin_offsets"),
+ bin_op.max_bins());
+ sort_order =
+ offset_type(view_alloc(exec, WithoutInitializing,
+ "Kokkos::SortImpl::BinSortFunctor::sort_order"),
+ range_end - range_begin);
+ }
+
+ BinSort(const_key_view_type keys_, int range_begin_, int range_end_,
+ BinSortOp bin_op_, bool sort_within_bins_ = false)
+ : BinSort(exec_space{}, keys_, range_begin_, range_end_, bin_op_,
+ sort_within_bins_) {}
+
+ template <typename ExecutionSpace>
+ BinSort(const ExecutionSpace& exec, const_key_view_type keys_,
+ BinSortOp bin_op_, bool sort_within_bins_ = false)
+ : BinSort(exec, keys_, 0, keys_.extent(0), bin_op_, sort_within_bins_) {}
+
+ BinSort(const_key_view_type keys_, BinSortOp bin_op_,
+ bool sort_within_bins_ = false)
+ : BinSort(exec_space{}, keys_, bin_op_, sort_within_bins_) {}
+
+ //----------------------------------------
+ // Create the permutation vector, the bin_offset array and the bin_count
+ // array. Can be called again if keys changed
+ template <class ExecutionSpace>
+ void create_permute_vector(const ExecutionSpace& exec) {
+ static_assert(
+ Kokkos::SpaceAccessibility<ExecutionSpace,
+ typename Space::memory_space>::accessible,
+ "The provided execution space must be able to access the memory space "
+ "BinSort was initialized with!");
+
+ const size_t len = range_end - range_begin;
+ Kokkos::parallel_for(
+ "Kokkos::Sort::BinCount",
+ Kokkos::RangePolicy<ExecutionSpace, bin_count_tag>(exec, 0, len),
+ *this);
+ Kokkos::parallel_scan("Kokkos::Sort::BinOffset",
+ Kokkos::RangePolicy<ExecutionSpace, bin_offset_tag>(
+ exec, 0, bin_op.max_bins()),
+ *this);
+
+ Kokkos::deep_copy(exec, bin_count_atomic, 0);
+ Kokkos::parallel_for(
+ "Kokkos::Sort::BinBinning",
+ Kokkos::RangePolicy<ExecutionSpace, bin_binning_tag>(exec, 0, len),
+ *this);
+
+ if (sort_within_bins)
+ Kokkos::parallel_for(
+ "Kokkos::Sort::BinSort",
+ Kokkos::RangePolicy<ExecutionSpace, bin_sort_bins_tag>(
+ exec, 0, bin_op.max_bins()),
+ *this);
+ }
+
+ // Create the permutation vector, the bin_offset array and the bin_count
+ // array. Can be called again if keys changed
+ void create_permute_vector() {
+ Kokkos::fence("Kokkos::Binsort::create_permute_vector: before");
+ exec_space e{};
+ create_permute_vector(e);
+ e.fence("Kokkos::Binsort::create_permute_vector: after");
+ }
+
+ // Sort a subset of a view with respect to the first dimension using the
+ // permutation array
+ template <class ExecutionSpace, class ValuesViewType>
+ void sort(const ExecutionSpace& exec, ValuesViewType const& values,
+ int values_range_begin, int values_range_end) const {
+ if (values.extent(0) == 0) {
+ return;
+ }
+
+ static_assert(
+ Kokkos::SpaceAccessibility<ExecutionSpace,
+ typename Space::memory_space>::accessible,
+ "The provided execution space must be able to access the memory space "
+ "BinSort was initialized with!");
+ static_assert(
+ Kokkos::SpaceAccessibility<
+ ExecutionSpace, typename ValuesViewType::memory_space>::accessible,
+ "The provided execution space must be able to access the memory space "
+ "of the View argument!");
+
+ const size_t len = range_end - range_begin;
+ const size_t values_len = values_range_end - values_range_begin;
+ if (len != values_len) {
+ Kokkos::abort(
+ "BinSort::sort: values range length != permutation vector length");
+ }
+
+ using scratch_view_type =
+ Kokkos::View<typename ValuesViewType::data_type,
+ typename ValuesViewType::device_type>;
+ scratch_view_type sorted_values(
+ view_alloc(exec, WithoutInitializing,
+ "Kokkos::SortImpl::BinSortFunctor::sorted_values"),
+ values.rank_dynamic > 0 ? len : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ values.rank_dynamic > 1 ? values.extent(1)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ values.rank_dynamic > 2 ? values.extent(2)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ values.rank_dynamic > 3 ? values.extent(3)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ values.rank_dynamic > 4 ? values.extent(4)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ values.rank_dynamic > 5 ? values.extent(5)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ values.rank_dynamic > 6 ? values.extent(6)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ values.rank_dynamic > 7 ? values.extent(7)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG);
+
+ {
+ copy_permute_functor<scratch_view_type /* DstViewType */
+ ,
+ offset_type /* PermuteViewType */
+ ,
+ ValuesViewType /* SrcViewType */
+ >
+ functor(sorted_values, sort_order, values,
+ values_range_begin - range_begin);
+
+ parallel_for("Kokkos::Sort::CopyPermute",
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, len), functor);
+ }
+
+ {
+ copy_functor<ValuesViewType, scratch_view_type> functor(
+ values, range_begin, sorted_values);
+
+ parallel_for("Kokkos::Sort::Copy",
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, len), functor);
+ }
+ }
+
+ // Sort a subset of a view with respect to the first dimension using the
+ // permutation array
+ template <class ValuesViewType>
+ void sort(ValuesViewType const& values, int values_range_begin,
+ int values_range_end) const {
+ Kokkos::fence("Kokkos::Binsort::sort: before");
+ exec_space exec;
+ sort(exec, values, values_range_begin, values_range_end);
+ exec.fence("Kokkos::BinSort:sort: after");
+ }
+
+ template <class ExecutionSpace, class ValuesViewType>
+ void sort(ExecutionSpace const& exec, ValuesViewType const& values) const {
+ this->sort(exec, values, 0, /*values.extent(0)*/ range_end - range_begin);
+ }
+
+ template <class ValuesViewType>
+ void sort(ValuesViewType const& values) const {
+ this->sort(values, 0, /*values.extent(0)*/ range_end - range_begin);
+ }
+
+ // Get the permutation vector
+ KOKKOS_INLINE_FUNCTION
+ offset_type get_permute_vector() const { return sort_order; }
+
+ // Get the start offsets for each bin
+ KOKKOS_INLINE_FUNCTION
+ offset_type get_bin_offsets() const { return bin_offsets; }
+
+ // Get the count for each bin
+ KOKKOS_INLINE_FUNCTION
+ bin_count_type get_bin_count() const { return bin_count_const; }
+
+ public:
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const bin_count_tag& /*tag*/, const int i) const {
+ const int j = range_begin + i;
+ bin_count_atomic(bin_op.bin(keys, j))++;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const bin_offset_tag& /*tag*/, const int i,
+ value_type& offset, const bool& final) const {
+ if (final) {
+ bin_offsets(i) = offset;
+ }
+ offset += bin_count_const(i);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const bin_binning_tag& /*tag*/, const int i) const {
+ const int j = range_begin + i;
+ const int bin = bin_op.bin(keys, j);
+ const int count = bin_count_atomic(bin)++;
+
+ sort_order(bin_offsets(bin) + count) = j;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const bin_sort_bins_tag& /*tag*/, const int i) const {
+ auto bin_size = bin_count_const(i);
+ if (bin_size <= 1) return;
+ constexpr bool use_std_sort =
+ std::is_same_v<typename exec_space::memory_space, HostSpace>;
+ int lower_bound = bin_offsets(i);
+ int upper_bound = lower_bound + bin_size;
+ // Switching to std::sort for more than 10 elements has been found
+ // reasonable experimentally.
+ if (use_std_sort && bin_size > 10) {
+ KOKKOS_IF_ON_HOST(
+ (std::sort(sort_order.data() + lower_bound,
+ sort_order.data() + upper_bound,
+ [this](int p, int q) { return bin_op(keys_rnd, p, q); });))
+ } else {
+ for (int k = lower_bound + 1; k < upper_bound; ++k) {
+ int old_idx = sort_order(k);
+ int j = k - 1;
+ while (j >= lower_bound) {
+ int new_idx = sort_order(j);
+ if (!bin_op(keys_rnd, old_idx, new_idx)) break;
+ sort_order(j + 1) = new_idx;
+ --j;
+ }
+ sort_order(j + 1) = old_idx;
+ }
+ }
+ }
+};
+
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_NESTED_SORT_PUBLIC_API_HPP_
+#define KOKKOS_NESTED_SORT_PUBLIC_API_HPP_
+
+#include "impl/Kokkos_NestedSortImpl.hpp"
+#include <Kokkos_Core.hpp>
+#include <std_algorithms/impl/Kokkos_HelperPredicates.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class TeamMember, class ViewType>
+KOKKOS_INLINE_FUNCTION void sort_team(const TeamMember& t,
+ const ViewType& view) {
+ Impl::sort_nested_impl(t, view, nullptr,
+ Experimental::Impl::StdAlgoLessThanBinaryPredicate<
+ typename ViewType::non_const_value_type>(),
+ Impl::NestedRange<true>());
+}
+
+template <class TeamMember, class ViewType, class Comparator>
+KOKKOS_INLINE_FUNCTION void sort_team(const TeamMember& t, const ViewType& view,
+ const Comparator& comp) {
+ Impl::sort_nested_impl(t, view, nullptr, comp, Impl::NestedRange<true>());
+}
+
+template <class TeamMember, class KeyViewType, class ValueViewType>
+KOKKOS_INLINE_FUNCTION void sort_by_key_team(const TeamMember& t,
+ const KeyViewType& keyView,
+ const ValueViewType& valueView) {
+ Impl::sort_nested_impl(t, keyView, valueView,
+ Experimental::Impl::StdAlgoLessThanBinaryPredicate<
+ typename KeyViewType::non_const_value_type>(),
+ Impl::NestedRange<true>());
+}
+
+template <class TeamMember, class KeyViewType, class ValueViewType,
+ class Comparator>
+KOKKOS_INLINE_FUNCTION void sort_by_key_team(const TeamMember& t,
+ const KeyViewType& keyView,
+ const ValueViewType& valueView,
+ const Comparator& comp) {
+ Impl::sort_nested_impl(t, keyView, valueView, comp,
+ Impl::NestedRange<true>());
+}
+
+template <class TeamMember, class ViewType>
+KOKKOS_INLINE_FUNCTION void sort_thread(const TeamMember& t,
+ const ViewType& view) {
+ Impl::sort_nested_impl(t, view, nullptr,
+ Experimental::Impl::StdAlgoLessThanBinaryPredicate<
+ typename ViewType::non_const_value_type>(),
+ Impl::NestedRange<false>());
+}
+
+template <class TeamMember, class ViewType, class Comparator>
+KOKKOS_INLINE_FUNCTION void sort_thread(const TeamMember& t,
+ const ViewType& view,
+ const Comparator& comp) {
+ Impl::sort_nested_impl(t, view, nullptr, comp, Impl::NestedRange<false>());
+}
+
+template <class TeamMember, class KeyViewType, class ValueViewType>
+KOKKOS_INLINE_FUNCTION void sort_by_key_thread(const TeamMember& t,
+ const KeyViewType& keyView,
+ const ValueViewType& valueView) {
+ Impl::sort_nested_impl(t, keyView, valueView,
+ Experimental::Impl::StdAlgoLessThanBinaryPredicate<
+ typename KeyViewType::non_const_value_type>(),
+ Impl::NestedRange<false>());
+}
+
+template <class TeamMember, class KeyViewType, class ValueViewType,
+ class Comparator>
+KOKKOS_INLINE_FUNCTION void sort_by_key_thread(const TeamMember& t,
+ const KeyViewType& keyView,
+ const ValueViewType& valueView,
+ const Comparator& comp) {
+ Impl::sort_nested_impl(t, keyView, valueView, comp,
+ Impl::NestedRange<false>());
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SORT_BY_KEY_PUBLIC_API_HPP_
+#define KOKKOS_SORT_BY_KEY_PUBLIC_API_HPP_
+
+#include "./impl/Kokkos_SortByKeyImpl.hpp"
+#include <Kokkos_Core.hpp>
+#include <algorithm>
+
+namespace Kokkos::Experimental {
+
+// ---------------------------------------------------------------
+// basic overloads
+// ---------------------------------------------------------------
+
+template <class ExecutionSpace, class KeysDataType, class... KeysProperties,
+ class ValuesDataType, class... ValuesProperties>
+void sort_by_key(
+ const ExecutionSpace& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values) {
+ // constraints
+ using KeysType = Kokkos::View<KeysDataType, KeysProperties...>;
+ using ValuesType = Kokkos::View<ValuesDataType, ValuesProperties...>;
+ ::Kokkos::Impl::static_assert_is_admissible_to_kokkos_sort_by_key(keys);
+ ::Kokkos::Impl::static_assert_is_admissible_to_kokkos_sort_by_key(values);
+
+ static_assert(SpaceAccessibility<ExecutionSpace,
+ typename KeysType::memory_space>::accessible,
+ "Kokkos::sort: execution space instance is not able to access "
+ "the memory space of the keys View argument!");
+ static_assert(
+ SpaceAccessibility<ExecutionSpace,
+ typename ValuesType::memory_space>::accessible,
+ "Kokkos::sort: execution space instance is not able to access "
+ "the memory space of the values View argument!");
+
+ static_assert(KeysType::static_extent(0) == 0 ||
+ ValuesType::static_extent(0) == 0 ||
+ KeysType::static_extent(0) == ValuesType::static_extent(0));
+ if (values.size() != keys.size())
+ Kokkos::abort((std::string("values and keys extents must be the same. The "
+ "values extent is ") +
+ std::to_string(values.size()) + ", and the keys extent is " +
+ std::to_string(keys.size()) + ".")
+ .c_str());
+
+ if (keys.extent(0) <= 1) {
+ return;
+ }
+
+ ::Kokkos::Impl::sort_by_key_device_view_without_comparator(exec, keys,
+ values);
+}
+
+// ---------------------------------------------------------------
+// overloads supporting a custom comparator
+// ---------------------------------------------------------------
+
+template <class ExecutionSpace, class ComparatorType, class KeysDataType,
+ class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties>
+void sort_by_key(
+ const ExecutionSpace& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ const ComparatorType& comparator) {
+ // constraints
+ using KeysType = Kokkos::View<KeysDataType, KeysProperties...>;
+ using ValuesType = Kokkos::View<ValuesDataType, ValuesProperties...>;
+ ::Kokkos::Impl::static_assert_is_admissible_to_kokkos_sort_by_key(keys);
+ ::Kokkos::Impl::static_assert_is_admissible_to_kokkos_sort_by_key(values);
+
+ static_assert(SpaceAccessibility<ExecutionSpace,
+ typename KeysType::memory_space>::accessible,
+ "Kokkos::sort: execution space instance is not able to access "
+ "the memory space of the keys View argument!");
+ static_assert(
+ SpaceAccessibility<ExecutionSpace,
+ typename ValuesType::memory_space>::accessible,
+ "Kokkos::sort: execution space instance is not able to access "
+ "the memory space of the values View argument!");
+
+ static_assert(KeysType::static_extent(0) == 0 ||
+ ValuesType::static_extent(0) == 0 ||
+ KeysType::static_extent(0) == ValuesType::static_extent(0));
+ if (values.size() != keys.size())
+ Kokkos::abort((std::string("values and keys extents must be the same. The "
+ "values extent is ") +
+ std::to_string(values.size()) + ", and the keys extent is " +
+ std::to_string(keys.size()) + ".")
+ .c_str());
+
+ if (keys.extent(0) <= 1) {
+ return;
+ }
+
+ ::Kokkos::Impl::sort_by_key_device_view_with_comparator(exec, keys, values,
+ comparator);
+}
+
+} // namespace Kokkos::Experimental
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SORT_PUBLIC_API_HPP_
+#define KOKKOS_SORT_PUBLIC_API_HPP_
+
+#include "./impl/Kokkos_SortImpl.hpp"
+#include <std_algorithms/Kokkos_BeginEnd.hpp>
+#include <Kokkos_Core.hpp>
+#include <algorithm>
+
+namespace Kokkos {
+
+// ---------------------------------------------------------------
+// basic overloads
+// ---------------------------------------------------------------
+
+template <class ExecutionSpace, class DataType, class... Properties>
+void sort(const ExecutionSpace& exec,
+ const Kokkos::View<DataType, Properties...>& view) {
+ // constraints
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ using MemSpace = typename ViewType::memory_space;
+ static_assert(
+ ViewType::rank == 1 &&
+ (std::is_same_v<typename ViewType::array_layout, LayoutRight> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutLeft> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutStride>),
+ "Kokkos::sort without comparator: supports 1D Views with LayoutRight, "
+ "LayoutLeft or LayoutStride.");
+
+ static_assert(SpaceAccessibility<ExecutionSpace, MemSpace>::accessible,
+ "Kokkos::sort: execution space instance is not able to access "
+ "the memory space of the "
+ "View argument!");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ if constexpr (Impl::better_off_calling_std_sort_v<ExecutionSpace>) {
+ exec.fence("Kokkos::sort without comparator use std::sort");
+ if (view.span_is_contiguous()) {
+ std::sort(view.data(), view.data() + view.size());
+ } else {
+ auto first = ::Kokkos::Experimental::begin(view);
+ auto last = ::Kokkos::Experimental::end(view);
+ std::sort(first, last);
+ }
+ } else {
+ Impl::sort_device_view_without_comparator(exec, view);
+ }
+}
+
+template <class DataType, class... Properties>
+void sort(const Kokkos::View<DataType, Properties...>& view) {
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(ViewType::rank == 1,
+ "Kokkos::sort: currently only supports rank-1 Views.");
+
+ Kokkos::fence("Kokkos::sort: before");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ typename ViewType::execution_space exec;
+ sort(exec, view);
+ exec.fence("Kokkos::sort: fence after sorting");
+}
+
+// ---------------------------------------------------------------
+// overloads supporting a custom comparator
+// ---------------------------------------------------------------
+template <class ExecutionSpace, class ComparatorType, class DataType,
+ class... Properties>
+void sort(const ExecutionSpace& exec,
+ const Kokkos::View<DataType, Properties...>& view,
+ const ComparatorType& comparator) {
+ // constraints
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ using MemSpace = typename ViewType::memory_space;
+ static_assert(
+ ViewType::rank == 1 &&
+ (std::is_same_v<typename ViewType::array_layout, LayoutRight> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutLeft> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutStride>),
+ "Kokkos::sort with comparator: supports 1D Views with LayoutRight, "
+ "LayoutLeft or LayoutStride.");
+
+ static_assert(SpaceAccessibility<ExecutionSpace, MemSpace>::accessible,
+ "Kokkos::sort: execution space instance is not able to access "
+ "the memory space of the View argument!");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ if constexpr (Impl::better_off_calling_std_sort_v<ExecutionSpace>) {
+ exec.fence("Kokkos::sort with comparator use std::sort");
+ if (view.span_is_contiguous()) {
+ std::sort(view.data(), view.data() + view.size(), comparator);
+ } else {
+ auto first = ::Kokkos::Experimental::begin(view);
+ auto last = ::Kokkos::Experimental::end(view);
+ std::sort(first, last, comparator);
+ }
+ } else {
+ Impl::sort_device_view_with_comparator(exec, view, comparator);
+ }
+}
+
+template <class ComparatorType, class DataType, class... Properties>
+void sort(const Kokkos::View<DataType, Properties...>& view,
+ const ComparatorType& comparator) {
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(
+ ViewType::rank == 1 &&
+ (std::is_same_v<typename ViewType::array_layout, LayoutRight> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutLeft> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutStride>),
+ "Kokkos::sort with comparator: supports 1D Views with LayoutRight, "
+ "LayoutLeft or LayoutStride.");
+
+ Kokkos::fence("Kokkos::sort with comparator: before");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ typename ViewType::execution_space exec;
+ sort(exec, view, comparator);
+ exec.fence("Kokkos::sort with comparator: fence after sorting");
+}
+
+// ---------------------------------------------------------------
+// overloads for sorting a view with a subrange
+// specified via integers begin, end
+// ---------------------------------------------------------------
+
+template <class ExecutionSpace, class ViewType>
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value> sort(
+ const ExecutionSpace& exec, ViewType view, size_t const begin,
+ size_t const end) {
+ // view must be rank-1 because the Impl::min_max_functor
+ // used below only works for rank-1 views for now
+ static_assert(ViewType::rank == 1,
+ "Kokkos::sort: currently only supports rank-1 Views.");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ using range_policy = Kokkos::RangePolicy<typename ViewType::execution_space>;
+ using CompType = BinOp1D<ViewType>;
+
+ Kokkos::MinMaxScalar<typename ViewType::non_const_value_type> result;
+ Kokkos::MinMax<typename ViewType::non_const_value_type> reducer(result);
+
+ parallel_reduce("Kokkos::Sort::FindExtent", range_policy(exec, begin, end),
+ Impl::min_max_functor<ViewType>(view), reducer);
+
+ if (result.min_val == result.max_val) return;
+
+ BinSort<ViewType, CompType> bin_sort(
+ exec, view, begin, end,
+ CompType((end - begin) / 2, result.min_val, result.max_val), true);
+
+ bin_sort.create_permute_vector(exec);
+ bin_sort.sort(exec, view, begin, end);
+}
+
+template <class ViewType>
+void sort(ViewType view, size_t const begin, size_t const end) {
+ // same constraints as the overload above which this gets dispatched to
+ static_assert(ViewType::rank == 1,
+ "Kokkos::sort: currently only supports rank-1 Views.");
+
+ Kokkos::fence("Kokkos::sort: before");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ typename ViewType::execution_space exec;
+ sort(exec, view, begin, end);
+ exec.fence("Kokkos::Sort: fence after sorting");
+}
+
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_COPY_OPS_FOR_BINSORT_IMPL_HPP_
+#define KOKKOS_COPY_OPS_FOR_BINSORT_IMPL_HPP_
+
+#include <Kokkos_Macros.hpp>
+#include <cstddef>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class DstViewType, class SrcViewType, int Rank = DstViewType::rank>
+struct CopyOp;
+
+template <class DstViewType, class SrcViewType>
+struct CopyOp<DstViewType, SrcViewType, 1> {
+ KOKKOS_INLINE_FUNCTION
+ static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
+ size_t i_src) {
+ dst(i_dst) = src(i_src);
+ }
+};
+
+template <class DstViewType, class SrcViewType>
+struct CopyOp<DstViewType, SrcViewType, 2> {
+ KOKKOS_INLINE_FUNCTION
+ static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
+ size_t i_src) {
+ for (int j = 0; j < (int)dst.extent(1); j++) dst(i_dst, j) = src(i_src, j);
+ }
+};
+
+template <class DstViewType, class SrcViewType>
+struct CopyOp<DstViewType, SrcViewType, 3> {
+ KOKKOS_INLINE_FUNCTION
+ static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
+ size_t i_src) {
+ for (int j = 0; j < dst.extent(1); j++)
+ for (int k = 0; k < dst.extent(2); k++)
+ dst(i_dst, j, k) = src(i_src, j, k);
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_NESTED_SORT_IMPL_HPP_
+#define KOKKOS_NESTED_SORT_IMPL_HPP_
+
+#include <Kokkos_Core.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+// true for TeamVectorRange, false for ThreadVectorRange
+template <bool teamLevel>
+struct NestedRange {};
+
+// Specialization for team-level
+template <>
+struct NestedRange<true> {
+ template <typename TeamMember, typename SizeType>
+ KOKKOS_FUNCTION static auto create(const TeamMember& t, SizeType len) {
+ return Kokkos::TeamVectorRange(t, len);
+ }
+ template <typename TeamMember>
+ KOKKOS_FUNCTION static void barrier(const TeamMember& t) {
+ t.team_barrier();
+ }
+};
+
+// Specialization for thread-level
+template <>
+struct NestedRange<false> {
+ template <typename TeamMember, typename SizeType>
+ KOKKOS_FUNCTION static auto create(const TeamMember& t, SizeType len) {
+ return Kokkos::ThreadVectorRange(t, len);
+ }
+ // Barrier is no-op, as vector lanes of a thread are implicitly synchronized
+ // after parallel region
+ template <typename TeamMember>
+ KOKKOS_FUNCTION static void barrier(const TeamMember&) {}
+};
+
+// When just doing sort (not sort_by_key), use nullptr_t for ValueViewType.
+// This only takes the NestedRange instance for template arg deduction.
+template <class TeamMember, class KeyViewType, class ValueViewType,
+ class Comparator, bool useTeamLevel>
+KOKKOS_INLINE_FUNCTION void sort_nested_impl(
+ const TeamMember& t, const KeyViewType& keyView,
+ [[maybe_unused]] const ValueViewType& valueView, const Comparator& comp,
+ const NestedRange<useTeamLevel>) {
+ using SizeType = typename KeyViewType::size_type;
+ using KeyType = typename KeyViewType::non_const_value_type;
+ using Range = NestedRange<useTeamLevel>;
+ SizeType n = keyView.extent(0);
+ SizeType npot = 1;
+ SizeType levels = 0;
+ // FIXME: ceiling power-of-two is a common thing to need - make it a utility
+ while (npot < n) {
+ levels++;
+ npot <<= 1;
+ }
+ for (SizeType i = 0; i < levels; i++) {
+ for (SizeType j = 0; j <= i; j++) {
+ // n/2 pairs of items are compared in parallel
+ Kokkos::parallel_for(Range::create(t, npot / 2), [=](const SizeType k) {
+ // How big are the brown/pink boxes?
+ // (Terminology comes from Wikipedia diagram)
+ // https://commons.wikimedia.org/wiki/File:BitonicSort.svg#/media/File:BitonicSort.svg
+ SizeType boxSize = SizeType(2) << (i - j);
+ // Which box contains this thread?
+ SizeType boxID = k >> (i - j); // k * 2 / boxSize;
+ SizeType boxStart = boxID << (1 + i - j); // boxID * boxSize
+ SizeType boxOffset = k - (boxStart >> 1); // k - boxID * boxSize / 2;
+ SizeType elem1 = boxStart + boxOffset;
+ // In first phase (j == 0, brown box): within a box, compare with the
+ // opposite value in the box.
+ // In later phases (j > 0, pink box): within a box, compare with fixed
+ // distance (boxSize / 2) apart.
+ SizeType elem2 = (j == 0) ? (boxStart + boxSize - 1 - boxOffset)
+ : (elem1 + boxSize / 2);
+ if (elem2 < n) {
+ KeyType key1 = keyView(elem1);
+ KeyType key2 = keyView(elem2);
+ if (comp(key2, key1)) {
+ keyView(elem1) = key2;
+ keyView(elem2) = key1;
+ if constexpr (!std::is_same_v<ValueViewType, std::nullptr_t>) {
+ Kokkos::kokkos_swap(valueView(elem1), valueView(elem2));
+ }
+ }
+ }
+ });
+ Range::barrier(t);
+ }
+ }
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SORT_BY_KEY_FREE_FUNCS_IMPL_HPP_
+#define KOKKOS_SORT_BY_KEY_FREE_FUNCS_IMPL_HPP_
+
+#include <Kokkos_Core.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA)
+
+// Workaround for `Instruction 'shfl' without '.sync' is not supported on
+// .target sm_70 and higher from PTX ISA version 6.4`.
+// Also see https://github.com/NVIDIA/cub/pull/170.
+#if !defined(CUB_USE_COOPERATIVE_GROUPS)
+#define CUB_USE_COOPERATIVE_GROUPS
+#endif
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+
+#if defined(KOKKOS_COMPILER_CLANG)
+// Some versions of Clang fail to compile Thrust, failing with errors like
+// this:
+// <snip>/thrust/system/cuda/detail/core/agent_launcher.h:557:11:
+// error: use of undeclared identifier 'va_printf'
+// The exact combination of versions for Clang and Thrust (or CUDA) for this
+// failure was not investigated, however even very recent version combination
+// (Clang 10.0.0 and Cuda 10.0) demonstrated failure.
+//
+// Defining _CubLog here locally allows us to avoid that code path, however
+// disabling some debugging diagnostics
+#pragma push_macro("_CubLog")
+#ifdef _CubLog
+#undef _CubLog
+#endif
+#define _CubLog
+#include <thrust/device_ptr.h>
+#include <thrust/sort.h>
+#pragma pop_macro("_CubLog")
+#else
+#include <thrust/device_ptr.h>
+#include <thrust/sort.h>
+#endif
+
+#pragma GCC diagnostic pop
+
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+#include <thrust/device_ptr.h>
+#include <thrust/sort.h>
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL) && \
+ (ONEDPL_VERSION_MAJOR > 2022 || \
+ (ONEDPL_VERSION_MAJOR == 2022 && ONEDPL_VERSION_MINOR >= 2))
+#define KOKKOS_ONEDPL_HAS_SORT_BY_KEY
+#include <oneapi/dpl/execution>
+#include <oneapi/dpl/algorithm>
+#endif
+
+namespace Kokkos::Impl {
+
+template <typename T>
+constexpr inline bool is_admissible_to_kokkos_sort_by_key =
+ ::Kokkos::is_view<T>::value && T::rank() == 1 &&
+ (std::is_same_v<typename T::traits::array_layout, Kokkos::LayoutLeft> ||
+ std::is_same_v<typename T::traits::array_layout, Kokkos::LayoutRight> ||
+ std::is_same_v<typename T::traits::array_layout, Kokkos::LayoutStride>);
+
+template <class ViewType>
+KOKKOS_INLINE_FUNCTION constexpr void
+static_assert_is_admissible_to_kokkos_sort_by_key(const ViewType& /* view */) {
+ static_assert(is_admissible_to_kokkos_sort_by_key<ViewType>,
+ "Kokkos::sort_by_key only accepts 1D values View with "
+ "LayoutRight, LayoutLeft or LayoutStride.");
+}
+
+// For the fallback implementation for sort_by_key using Kokkos::sort, we need
+// to consider if Kokkos::sort defers to the fallback implementation that copies
+// the array to the host and uses std::sort, see
+// copy_to_host_run_stdsort_copy_back() in impl/Kokkos_SortImpl.hpp. If
+// sort_on_device_v is true, we assume that std::sort doesn't copy data.
+// Otherwise, we manually copy all data to the host and provide Kokkos::sort
+// with a host execution space.
+template <class ExecutionSpace, class Layout>
+inline constexpr bool sort_on_device_v = false;
+
+#if defined(KOKKOS_ENABLE_CUDA)
+template <class Layout>
+inline constexpr bool sort_on_device_v<Kokkos::Cuda, Layout> = true;
+
+template <class KeysDataType, class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties, class... MaybeComparator>
+void sort_by_key_cudathrust(
+ const Kokkos::Cuda& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ MaybeComparator&&... maybeComparator) {
+ const auto policy = thrust::cuda::par.on(exec.cuda_stream());
+ auto keys_first = ::Kokkos::Experimental::begin(keys);
+ auto keys_last = ::Kokkos::Experimental::end(keys);
+ auto values_first = ::Kokkos::Experimental::begin(values);
+ thrust::sort_by_key(policy, keys_first, keys_last, values_first,
+ std::forward<MaybeComparator>(maybeComparator)...);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+template <class Layout>
+inline constexpr bool sort_on_device_v<Kokkos::HIP, Layout> = true;
+
+template <class KeysDataType, class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties, class... MaybeComparator>
+void sort_by_key_rocthrust(
+ const Kokkos::HIP& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ MaybeComparator&&... maybeComparator) {
+ const auto policy = thrust::hip::par.on(exec.hip_stream());
+ auto keys_first = ::Kokkos::Experimental::begin(keys);
+ auto keys_last = ::Kokkos::Experimental::end(keys);
+ auto values_first = ::Kokkos::Experimental::begin(values);
+ thrust::sort_by_key(policy, keys_first, keys_last, values_first,
+ std::forward<MaybeComparator>(maybeComparator)...);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL)
+template <class Layout>
+inline constexpr bool sort_on_device_v<Kokkos::SYCL, Layout> =
+ std::is_same_v<Layout, Kokkos::LayoutLeft> ||
+ std::is_same_v<Layout, Kokkos::LayoutRight>;
+
+#ifdef KOKKOS_ONEDPL_HAS_SORT_BY_KEY
+template <class KeysDataType, class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties, class... MaybeComparator>
+void sort_by_key_onedpl(
+ const Kokkos::SYCL& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ MaybeComparator&&... maybeComparator) {
+ if (keys.stride(0) != 1 && values.stride(0) != 1) {
+ Kokkos::abort(
+ "SYCL sort_by_key only supports rank-1 Views with stride(0) = 1.");
+ }
+
+ // Can't use Experimental::begin/end here since the oneDPL then assumes that
+ // the data is on the host.
+ auto queue = exec.sycl_queue();
+ auto policy = oneapi::dpl::execution::make_device_policy(queue);
+ const int n = keys.extent(0);
+ oneapi::dpl::sort_by_key(policy, keys.data(), keys.data() + n, values.data(),
+ std::forward<MaybeComparator>(maybeComparator)...);
+}
+#endif
+#endif
+
+template <typename ExecutionSpace, typename PermutationView, typename ViewType>
+void applyPermutation(const ExecutionSpace& space,
+ const PermutationView& permutation,
+ const ViewType& view) {
+ static_assert(std::is_integral_v<typename PermutationView::value_type>);
+
+ auto view_copy = Kokkos::create_mirror(
+ Kokkos::view_alloc(space, typename ExecutionSpace::memory_space{},
+ Kokkos::WithoutInitializing),
+ view);
+ Kokkos::deep_copy(space, view_copy, view);
+ Kokkos::parallel_for(
+ "Kokkos::sort_by_key_via_sort::permute_" + view.label(),
+ Kokkos::RangePolicy<ExecutionSpace>(space, 0, view.extent(0)),
+ KOKKOS_LAMBDA(int i) { view(i) = view_copy(permutation(i)); });
+}
+
+// FIXME_NVCC: nvcc has trouble compiling lambdas inside a function with
+// variadic templates (sort_by_key_via_sort). Switch to using functors instead.
+template <typename Permute>
+struct IotaFunctor {
+ Permute _permute;
+ KOKKOS_FUNCTION void operator()(int i) const { _permute(i) = i; }
+};
+template <typename Keys>
+struct LessFunctor {
+ Keys _keys;
+ KOKKOS_FUNCTION bool operator()(int i, int j) const {
+ return _keys(i) < _keys(j);
+ }
+};
+
+// FIXME_NVCC+MSVC: We can't use a lambda instead of a functor which gave us
+// "For this host platform/dialect, an extended lambda cannot be defined inside
+// the 'if' or 'else' block of a constexpr if statement"
+template <typename Keys, typename Comparator>
+struct KeyComparisonFunctor {
+ Keys m_keys;
+ Comparator m_comparator;
+ KOKKOS_FUNCTION bool operator()(int i, int j) const {
+ return m_comparator(m_keys(i), m_keys(j));
+ }
+};
+
+template <class ExecutionSpace, class KeysDataType, class... KeysProperties,
+ class ValuesDataType, class... ValuesProperties,
+ class... MaybeComparator>
+void sort_by_key_via_sort(
+ const ExecutionSpace& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ MaybeComparator&&... maybeComparator) {
+ static_assert(sizeof...(MaybeComparator) <= 1);
+
+ auto const n = keys.size();
+
+ Kokkos::View<unsigned int*, ExecutionSpace> permute(
+ Kokkos::view_alloc(exec, Kokkos::WithoutInitializing,
+ "Kokkos::sort_by_key_via_sort::permute"),
+ n);
+
+ // iota
+ Kokkos::parallel_for("Kokkos::sort_by_key_via_sort::iota",
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, n),
+ IotaFunctor<decltype(permute)>{permute});
+
+ using Layout =
+ typename Kokkos::View<unsigned int*, ExecutionSpace>::array_layout;
+ if constexpr (!sort_on_device_v<ExecutionSpace, Layout>) {
+ auto host_keys = Kokkos::create_mirror_view(
+ Kokkos::view_alloc(Kokkos::HostSpace{}, Kokkos::WithoutInitializing),
+ keys);
+ auto host_permute = Kokkos::create_mirror_view(
+ Kokkos::view_alloc(Kokkos::HostSpace{}, Kokkos::WithoutInitializing),
+ permute);
+ Kokkos::deep_copy(exec, host_keys, keys);
+ Kokkos::deep_copy(exec, host_permute, permute);
+
+ exec.fence("Kokkos::Impl::sort_by_key_via_sort: before host sort");
+ Kokkos::DefaultHostExecutionSpace host_exec;
+
+ if constexpr (sizeof...(MaybeComparator) == 0) {
+ Kokkos::sort(host_exec, host_permute,
+ LessFunctor<decltype(host_keys)>{host_keys});
+ } else {
+ auto keys_comparator =
+ std::get<0>(std::tuple<MaybeComparator...>(maybeComparator...));
+ Kokkos::sort(
+ host_exec, host_permute,
+ KeyComparisonFunctor<decltype(host_keys), decltype(keys_comparator)>{
+ host_keys, keys_comparator});
+ }
+ host_exec.fence("Kokkos::Impl::sort_by_key_via_sort: after host sort");
+ Kokkos::deep_copy(exec, permute, host_permute);
+ } else {
+#ifdef KOKKOS_ENABLE_SYCL
+ auto* raw_keys_in_comparator = keys.data();
+ auto stride = keys.stride(0);
+ if constexpr (sizeof...(MaybeComparator) == 0) {
+ Kokkos::sort(
+ exec, permute, KOKKOS_LAMBDA(int i, int j) {
+ return raw_keys_in_comparator[i * stride] <
+ raw_keys_in_comparator[j * stride];
+ });
+ } else {
+ auto keys_comparator =
+ std::get<0>(std::tuple<MaybeComparator...>(maybeComparator...));
+ Kokkos::sort(
+ exec, permute, KOKKOS_LAMBDA(int i, int j) {
+ return keys_comparator(raw_keys_in_comparator[i * stride],
+ raw_keys_in_comparator[j * stride]);
+ });
+ }
+#else
+ if constexpr (sizeof...(MaybeComparator) == 0) {
+ Kokkos::sort(exec, permute, LessFunctor<decltype(keys)>{keys});
+ } else {
+ auto keys_comparator =
+ std::get<0>(std::tuple<MaybeComparator...>(maybeComparator...));
+ Kokkos::sort(
+ exec, permute,
+ KeyComparisonFunctor<decltype(keys), decltype(keys_comparator)>{
+ keys, keys_comparator});
+ }
+#endif
+ }
+
+ applyPermutation(exec, permute, keys);
+ applyPermutation(exec, permute, values);
+}
+
+// ------------------------------------------------------
+//
+// specialize cases for sorting by key without comparator
+//
+// ------------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+template <class KeysDataType, class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties>
+void sort_by_key_device_view_without_comparator(
+ const Kokkos::Cuda& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values) {
+ sort_by_key_cudathrust(exec, keys, values);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+template <class KeysDataType, class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties>
+void sort_by_key_device_view_without_comparator(
+ const Kokkos::HIP& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values) {
+ sort_by_key_rocthrust(exec, keys, values);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL)
+template <class KeysDataType, class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties>
+void sort_by_key_device_view_without_comparator(
+ const Kokkos::SYCL& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values) {
+#ifdef KOKKOS_ONEDPL_HAS_SORT_BY_KEY
+ if (keys.stride(0) == 1 && values.stride(0) == 1)
+ sort_by_key_onedpl(exec, keys, values);
+ else
+#endif
+ sort_by_key_via_sort(exec, keys, values);
+}
+#endif
+
+// fallback case
+template <class ExecutionSpace, class KeysDataType, class... KeysProperties,
+ class ValuesDataType, class... ValuesProperties>
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value>
+sort_by_key_device_view_without_comparator(
+ const ExecutionSpace& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values) {
+ sort_by_key_via_sort(exec, keys, values);
+}
+
+// ---------------------------------------------------
+//
+// specialize cases for sorting by key with comparator
+//
+// ---------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+template <class ComparatorType, class KeysDataType, class... KeysProperties,
+ class ValuesDataType, class... ValuesProperties>
+void sort_by_key_device_view_with_comparator(
+ const Kokkos::Cuda& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ const ComparatorType& comparator) {
+ sort_by_key_cudathrust(exec, keys, values, comparator);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+template <class ComparatorType, class KeysDataType, class... KeysProperties,
+ class ValuesDataType, class... ValuesProperties>
+void sort_by_key_device_view_with_comparator(
+ const Kokkos::HIP& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ const ComparatorType& comparator) {
+ sort_by_key_rocthrust(exec, keys, values, comparator);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL)
+template <class ComparatorType, class KeysDataType, class... KeysProperties,
+ class ValuesDataType, class... ValuesProperties>
+void sort_by_key_device_view_with_comparator(
+ const Kokkos::SYCL& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ const ComparatorType& comparator) {
+#ifdef KOKKOS_ONEDPL_HAS_SORT_BY_KEY
+ if (keys.stride(0) == 1 && values.stride(0) == 1)
+ sort_by_key_onedpl(exec, keys, values, comparator);
+ else
+#endif
+ sort_by_key_via_sort(exec, keys, values, comparator);
+}
+#endif
+
+// fallback case
+template <class ComparatorType, class ExecutionSpace, class KeysDataType,
+ class... KeysProperties, class ValuesDataType,
+ class... ValuesProperties>
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value>
+sort_by_key_device_view_with_comparator(
+ const ExecutionSpace& exec,
+ const Kokkos::View<KeysDataType, KeysProperties...>& keys,
+ const Kokkos::View<ValuesDataType, ValuesProperties...>& values,
+ const ComparatorType& comparator) {
+ sort_by_key_via_sort(exec, keys, values, comparator);
+}
+
+#undef KOKKOS_ONEDPL_HAS_SORT_BY_KEY
+
+} // namespace Kokkos::Impl
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SORT_FREE_FUNCS_IMPL_HPP_
+#define KOKKOS_SORT_FREE_FUNCS_IMPL_HPP_
+
+#include "../Kokkos_BinOpsPublicAPI.hpp"
+#include "../Kokkos_BinSortPublicAPI.hpp"
+#include <std_algorithms/Kokkos_BeginEnd.hpp>
+#include <std_algorithms/Kokkos_Copy.hpp>
+#include <Kokkos_Core.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA)
+
+// Workaround for `Instruction 'shfl' without '.sync' is not supported on
+// .target sm_70 and higher from PTX ISA version 6.4`.
+// Also see https://github.com/NVIDIA/cub/pull/170.
+#if !defined(CUB_USE_COOPERATIVE_GROUPS)
+#define CUB_USE_COOPERATIVE_GROUPS
+#endif
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+
+#if defined(KOKKOS_COMPILER_CLANG)
+// Some versions of Clang fail to compile Thrust, failing with errors like
+// this:
+// <snip>/thrust/system/cuda/detail/core/agent_launcher.h:557:11:
+// error: use of undeclared identifier 'va_printf'
+// The exact combination of versions for Clang and Thrust (or CUDA) for this
+// failure was not investigated, however even very recent version combination
+// (Clang 10.0.0 and Cuda 10.0) demonstrated failure.
+//
+// Defining _CubLog here locally allows us to avoid that code path, however
+// disabling some debugging diagnostics
+#pragma push_macro("_CubLog")
+#ifdef _CubLog
+#undef _CubLog
+#endif
+#define _CubLog
+#include <thrust/device_ptr.h>
+#include <thrust/sort.h>
+#pragma pop_macro("_CubLog")
+#else
+#include <thrust/device_ptr.h>
+#include <thrust/sort.h>
+#endif
+
+#pragma GCC diagnostic pop
+
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+#include <thrust/device_ptr.h>
+#include <thrust/sort.h>
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL)
+#include <oneapi/dpl/execution>
+#include <oneapi/dpl/algorithm>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecutionSpace>
+struct better_off_calling_std_sort : std::false_type {};
+
+#if defined KOKKOS_ENABLE_SERIAL
+template <>
+struct better_off_calling_std_sort<Kokkos::Serial> : std::true_type {};
+#endif
+
+#if defined KOKKOS_ENABLE_OPENMP
+template <>
+struct better_off_calling_std_sort<Kokkos::OpenMP> : std::true_type {};
+#endif
+
+#if defined KOKKOS_ENABLE_THREADS
+template <>
+struct better_off_calling_std_sort<Kokkos::Threads> : std::true_type {};
+#endif
+
+#if defined KOKKOS_ENABLE_HPX
+template <>
+struct better_off_calling_std_sort<Kokkos::Experimental::HPX> : std::true_type {
+};
+#endif
+
+template <class T>
+inline constexpr bool better_off_calling_std_sort_v =
+ better_off_calling_std_sort<T>::value;
+
+template <class ViewType>
+struct min_max_functor {
+ using minmax_scalar =
+ Kokkos::MinMaxScalar<typename ViewType::non_const_value_type>;
+
+ ViewType view;
+ min_max_functor(const ViewType& view_) : view(view_) {}
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const size_t& i, minmax_scalar& minmax) const {
+ if (view(i) < minmax.min_val) minmax.min_val = view(i);
+ if (view(i) > minmax.max_val) minmax.max_val = view(i);
+ }
+};
+
+template <class ExecutionSpace, class DataType, class... Properties>
+void sort_via_binsort(const ExecutionSpace& exec,
+ const Kokkos::View<DataType, Properties...>& view) {
+ // Although we are using BinSort below, which could work on rank-2 views,
+ // for now view must be rank-1 because the min_max_functor
+ // used below only works for rank-1 views
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(ViewType::rank == 1,
+ "Kokkos::sort: currently only supports rank-1 Views.");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ Kokkos::MinMaxScalar<typename ViewType::non_const_value_type> result;
+ Kokkos::MinMax<typename ViewType::non_const_value_type> reducer(result);
+ parallel_reduce("Kokkos::Sort::FindExtent",
+ Kokkos::RangePolicy<typename ViewType::execution_space>(
+ exec, 0, view.extent(0)),
+ min_max_functor<ViewType>(view), reducer);
+ if (result.min_val == result.max_val) return;
+ // For integral types the number of bins may be larger than the range
+ // in which case we can exactly have one unique value per bin
+ // and then don't need to sort bins.
+ bool sort_in_bins = true;
+ // TODO: figure out better max_bins then this ...
+ int64_t max_bins = view.extent(0) / 2;
+ if (std::is_integral_v<typename ViewType::non_const_value_type>) {
+ // Cast to double to avoid possible overflow when using integer
+ auto const max_val = static_cast<double>(result.max_val);
+ auto const min_val = static_cast<double>(result.min_val);
+ // using 10M as the cutoff for special behavior (roughly 40MB for the count
+ // array)
+ if ((max_val - min_val) < 10000000) {
+ max_bins = max_val - min_val + 1;
+ sort_in_bins = false;
+ }
+ }
+ if (std::is_floating_point_v<typename ViewType::non_const_value_type>) {
+ KOKKOS_ASSERT(std::isfinite(static_cast<double>(result.max_val) -
+ static_cast<double>(result.min_val)));
+ }
+
+ using CompType = BinOp1D<ViewType>;
+ BinSort<ViewType, CompType> bin_sort(
+ view, CompType(max_bins, result.min_val, result.max_val), sort_in_bins);
+ bin_sort.create_permute_vector(exec);
+ bin_sort.sort(exec, view);
+}
+
+#if defined(KOKKOS_ENABLE_CUDA)
+template <class DataType, class... Properties, class... MaybeComparator>
+void sort_cudathrust(const Cuda& space,
+ const Kokkos::View<DataType, Properties...>& view,
+ MaybeComparator&&... maybeComparator) {
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(ViewType::rank == 1,
+ "Kokkos::sort: currently only supports rank-1 Views.");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+ const auto exec = thrust::cuda::par.on(space.cuda_stream());
+ auto first = ::Kokkos::Experimental::begin(view);
+ auto last = ::Kokkos::Experimental::end(view);
+ thrust::sort(exec, first, last,
+ std::forward<MaybeComparator>(maybeComparator)...);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+template <class DataType, class... Properties, class... MaybeComparator>
+void sort_rocthrust(const HIP& space,
+ const Kokkos::View<DataType, Properties...>& view,
+ MaybeComparator&&... maybeComparator) {
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(ViewType::rank == 1,
+ "Kokkos::sort: currently only supports rank-1 Views.");
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+ const auto exec = thrust::hip::par.on(space.hip_stream());
+ auto first = ::Kokkos::Experimental::begin(view);
+ auto last = ::Kokkos::Experimental::end(view);
+ thrust::sort(exec, first, last,
+ std::forward<MaybeComparator>(maybeComparator)...);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL)
+template <class DataType, class... Properties, class... MaybeComparator>
+void sort_onedpl(const Kokkos::SYCL& space,
+ const Kokkos::View<DataType, Properties...>& view,
+ MaybeComparator&&... maybeComparator) {
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(SpaceAccessibility<Kokkos::SYCL,
+ typename ViewType::memory_space>::accessible,
+ "SYCL execution space is not able to access the memory space "
+ "of the View argument!");
+
+ static_assert(
+ (ViewType::rank == 1) &&
+ (std::is_same_v<typename ViewType::array_layout, LayoutRight> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutLeft> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutStride>),
+ "SYCL sort only supports contiguous rank-1 Views with LayoutLeft, "
+ "LayoutRight or LayoutStride"
+ "For the latter, this means the View must have stride(0) = 1, enforced "
+ "at runtime.");
+
+ if (view.stride(0) != 1) {
+ Kokkos::abort("SYCL sort only supports rank-1 Views with stride(0) = 1.");
+ }
+
+ if (view.extent(0) <= 1) {
+ return;
+ }
+
+ // Can't use Experimental::begin/end here since the oneDPL then assumes that
+ // the data is on the host.
+ auto queue = space.sycl_queue();
+ auto policy = oneapi::dpl::execution::make_device_policy(queue);
+ const int n = view.extent(0);
+ oneapi::dpl::sort(policy, view.data(), view.data() + n,
+ std::forward<MaybeComparator>(maybeComparator)...);
+}
+#endif
+
+template <class ExecutionSpace, class DataType, class... Properties,
+ class... MaybeComparator>
+void copy_to_host_run_stdsort_copy_back(
+ const ExecutionSpace& exec,
+ const Kokkos::View<DataType, Properties...>& view,
+ MaybeComparator&&... maybeComparator) {
+ namespace KE = ::Kokkos::Experimental;
+
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ using layout = typename ViewType::array_layout;
+ if constexpr (std::is_same_v<LayoutStride, layout>) {
+ // for strided views we cannot just deep_copy from device to host,
+ // so we need to do a few more jumps
+ using view_value_type = typename ViewType::non_const_value_type;
+ using view_exespace = typename ViewType::execution_space;
+ using view_deep_copyable_t = Kokkos::View<view_value_type*, view_exespace>;
+ view_deep_copyable_t view_dc("view_dc", view.extent(0));
+ KE::copy(exec, view, view_dc);
+
+ // run sort on the mirror of view_dc
+ auto mv_h = create_mirror_view_and_copy(Kokkos::HostSpace(), view_dc);
+ if (view.span_is_contiguous()) {
+ std::sort(mv_h.data(), mv_h.data() + mv_h.size(),
+ std::forward<MaybeComparator>(maybeComparator)...);
+ } else {
+ auto first = KE::begin(mv_h);
+ auto last = KE::end(mv_h);
+ std::sort(first, last, std::forward<MaybeComparator>(maybeComparator)...);
+ }
+ Kokkos::deep_copy(exec, view_dc, mv_h);
+
+ // copy back to argument view
+ KE::copy(exec, KE::cbegin(view_dc), KE::cend(view_dc), KE::begin(view));
+ } else {
+ auto view_h = create_mirror_view_and_copy(Kokkos::HostSpace(), view);
+ if (view.span_is_contiguous()) {
+ std::sort(view_h.data(), view_h.data() + view_h.size(),
+ std::forward<MaybeComparator>(maybeComparator)...);
+ } else {
+ auto first = KE::begin(view_h);
+ auto last = KE::end(view_h);
+ std::sort(first, last, std::forward<MaybeComparator>(maybeComparator)...);
+ }
+ Kokkos::deep_copy(exec, view, view_h);
+ }
+}
+
+// --------------------------------------------------
+//
+// specialize cases for sorting without comparator
+//
+// --------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+template <class DataType, class... Properties>
+void sort_device_view_without_comparator(
+ const Cuda& exec, const Kokkos::View<DataType, Properties...>& view) {
+ sort_cudathrust(exec, view);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+template <class DataType, class... Properties>
+void sort_device_view_without_comparator(
+ const HIP& exec, const Kokkos::View<DataType, Properties...>& view) {
+ sort_rocthrust(exec, view);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL)
+template <class DataType, class... Properties>
+void sort_device_view_without_comparator(
+ const Kokkos::SYCL& exec,
+ const Kokkos::View<DataType, Properties...>& view) {
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(
+ (ViewType::rank == 1) &&
+ (std::is_same_v<typename ViewType::array_layout, LayoutRight> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutLeft> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutStride>),
+ "sort_device_view_without_comparator: supports rank-1 Views "
+ "with LayoutLeft, LayoutRight or LayoutStride");
+
+ if (view.stride(0) == 1) {
+ sort_onedpl(exec, view);
+ } else {
+ copy_to_host_run_stdsort_copy_back(exec, view);
+ }
+}
+#endif
+
+// fallback case
+template <class ExecutionSpace, class DataType, class... Properties>
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value>
+sort_device_view_without_comparator(
+ const ExecutionSpace& exec,
+ const Kokkos::View<DataType, Properties...>& view) {
+ sort_via_binsort(exec, view);
+}
+
+// --------------------------------------------------
+//
+// specialize cases for sorting with comparator
+//
+// --------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+template <class ComparatorType, class DataType, class... Properties>
+void sort_device_view_with_comparator(
+ const Cuda& exec, const Kokkos::View<DataType, Properties...>& view,
+ const ComparatorType& comparator) {
+ sort_cudathrust(exec, view, comparator);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+template <class ComparatorType, class DataType, class... Properties>
+void sort_device_view_with_comparator(
+ const HIP& exec, const Kokkos::View<DataType, Properties...>& view,
+ const ComparatorType& comparator) {
+ sort_rocthrust(exec, view, comparator);
+}
+#endif
+
+#if defined(KOKKOS_ENABLE_ONEDPL)
+template <class ComparatorType, class DataType, class... Properties>
+void sort_device_view_with_comparator(
+ const Kokkos::SYCL& exec, const Kokkos::View<DataType, Properties...>& view,
+ const ComparatorType& comparator) {
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ static_assert(
+ (ViewType::rank == 1) &&
+ (std::is_same_v<typename ViewType::array_layout, LayoutRight> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutLeft> ||
+ std::is_same_v<typename ViewType::array_layout, LayoutStride>),
+ "sort_device_view_with_comparator: supports rank-1 Views "
+ "with LayoutLeft, LayoutRight or LayoutStride");
+
+ if (view.stride(0) == 1) {
+ sort_onedpl(exec, view, comparator);
+ } else {
+ copy_to_host_run_stdsort_copy_back(exec, view, comparator);
+ }
+}
+#endif
+
+template <class ExecutionSpace, class ComparatorType, class DataType,
+ class... Properties>
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value>
+sort_device_view_with_comparator(
+ const ExecutionSpace& exec,
+ const Kokkos::View<DataType, Properties...>& view,
+ const ComparatorType& comparator) {
+ // This is a fallback case if a more specialized overload does not exist:
+ // for now, this fallback copies data to host, runs std::sort
+ // and then copies data back. Potentially, this can later be changed
+ // with a better solution like our own quicksort on device or similar.
+
+// Note with HIP unified memory this code path is still the right thing to do
+// if we end up here when RocThrust is not enabled.
+// The create_mirror_view_and_copy will do the right thing (no copy).
+#ifndef KOKKOS_IMPL_HIP_UNIFIED_MEMORY
+ using ViewType = Kokkos::View<DataType, Properties...>;
+ using MemSpace = typename ViewType::memory_space;
+ static_assert(!SpaceAccessibility<HostSpace, MemSpace>::accessible,
+ "Impl::sort_device_view_with_comparator: should not be called "
+ "on a view that is already accessible on the host");
+#endif
+
+ copy_to_host_run_stdsort_copy_back(exec, view, comparator);
+}
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_HPP
+
+#include "impl/Kokkos_AdjacentDifference.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType,
+ std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value &&
+ ::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+OutputIteratorType adjacent_difference(const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest) {
+ using value_type1 = typename InputIteratorType::value_type;
+ using value_type2 = typename OutputIteratorType::value_type;
+ using binary_op =
+ Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+ value_type2>;
+ return Impl::adjacent_difference_exespace_impl(
+ "Kokkos::adjacent_difference_iterator_api", ex, first_from, last_from,
+ first_dest, binary_op());
+}
+
+template <
+ typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp,
+ std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value &&
+ ::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+OutputIteratorType adjacent_difference(const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ BinaryOp bin_op) {
+ return Impl::adjacent_difference_exespace_impl(
+ "Kokkos::adjacent_difference_iterator_api", ex, first_from, last_from,
+ first_dest, bin_op);
+}
+
+template <
+ typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType,
+ std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value &&
+ ::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+OutputIteratorType adjacent_difference(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest) {
+ using value_type1 = typename InputIteratorType::value_type;
+ using value_type2 = typename OutputIteratorType::value_type;
+ using binary_op =
+ Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+ value_type2>;
+ return Impl::adjacent_difference_exespace_impl(
+ label, ex, first_from, last_from, first_dest, binary_op());
+}
+
+template <
+ typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp,
+ std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value &&
+ ::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+OutputIteratorType adjacent_difference(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ BinaryOp bin_op) {
+ return Impl::adjacent_difference_exespace_impl(label, ex, first_from,
+ last_from, first_dest, bin_op);
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto adjacent_difference(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ using view_type1 = ::Kokkos::View<DataType1, Properties1...>;
+ using view_type2 = ::Kokkos::View<DataType2, Properties2...>;
+ using value_type1 = typename view_type1::value_type;
+ using value_type2 = typename view_type2::value_type;
+ using binary_op =
+ Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+ value_type2>;
+ return Impl::adjacent_difference_exespace_impl(
+ "Kokkos::adjacent_difference_view_api", ex, KE::cbegin(view_from),
+ KE::cend(view_from), KE::begin(view_dest), binary_op());
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto adjacent_difference(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp bin_op) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ return Impl::adjacent_difference_exespace_impl(
+ "Kokkos::adjacent_difference_view_api", ex, KE::cbegin(view_from),
+ KE::cend(view_from), KE::begin(view_dest), bin_op);
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto adjacent_difference(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ using view_type1 = ::Kokkos::View<DataType1, Properties1...>;
+ using view_type2 = ::Kokkos::View<DataType2, Properties2...>;
+ using value_type1 = typename view_type1::value_type;
+ using value_type2 = typename view_type2::value_type;
+ using binary_op =
+ Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+ value_type2>;
+
+ return Impl::adjacent_difference_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op());
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto adjacent_difference(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp bin_op) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ return Impl::adjacent_difference_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), bin_op);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType,
+ std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value &&
+ ::Kokkos::is_team_handle<TeamHandleType>::value,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType adjacent_difference(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest) {
+ using value_type1 = typename InputIteratorType::value_type;
+ using value_type2 = typename OutputIteratorType::value_type;
+ using binary_op =
+ Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+ value_type2>;
+ return Impl::adjacent_difference_team_impl(teamHandle, first_from, last_from,
+ first_dest, binary_op());
+}
+
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp,
+ std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value &&
+ ::Kokkos::is_team_handle<TeamHandleType>::value,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType
+adjacent_difference(const TeamHandleType& teamHandle,
+ InputIteratorType first_from, InputIteratorType last_from,
+ OutputIteratorType first_dest, BinaryOp bin_op) {
+ return Impl::adjacent_difference_team_impl(teamHandle, first_from, last_from,
+ first_dest, bin_op);
+}
+
+template <
+ typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto adjacent_difference(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ using view_type1 = ::Kokkos::View<DataType1, Properties1...>;
+ using view_type2 = ::Kokkos::View<DataType2, Properties2...>;
+ using value_type1 = typename view_type1::value_type;
+ using value_type2 = typename view_type2::value_type;
+ using binary_op =
+ Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+ value_type2>;
+ return Impl::adjacent_difference_team_impl(teamHandle, KE::cbegin(view_from),
+ KE::cend(view_from),
+ KE::begin(view_dest), binary_op());
+}
+
+template <
+ typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto adjacent_difference(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp bin_op) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ return Impl::adjacent_difference_team_impl(teamHandle, KE::cbegin(view_from),
+ KE::cend(view_from),
+ KE::begin(view_dest), bin_op);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_HPP
+
+#include "impl/Kokkos_AdjacentFind.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set1
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType adjacent_find(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
+ return Impl::adjacent_find_exespace_impl(
+ "Kokkos::adjacent_find_iterator_api_default", ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType adjacent_find(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ return Impl::adjacent_find_exespace_impl(label, ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto adjacent_find(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::adjacent_find_exespace_impl(
+ "Kokkos::adjacent_find_view_api_default", ex, KE::begin(v), KE::end(v));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto adjacent_find(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::adjacent_find_exespace_impl(label, ex, KE::begin(v), KE::end(v));
+}
+
+// overload set2
+template <
+ typename ExecutionSpace, typename IteratorType,
+ typename BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType adjacent_find(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, BinaryPredicateType pred) {
+ return Impl::adjacent_find_exespace_impl(
+ "Kokkos::adjacent_find_iterator_api_default", ex, first, last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ typename BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType adjacent_find(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ BinaryPredicateType pred) {
+ return Impl::adjacent_find_exespace_impl(label, ex, first, last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto adjacent_find(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ BinaryPredicateType pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::adjacent_find_exespace_impl(
+ "Kokkos::adjacent_find_view_api_default", ex, KE::begin(v), KE::end(v),
+ pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto adjacent_find(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ BinaryPredicateType pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::adjacent_find_exespace_impl(label, ex, KE::begin(v), KE::end(v),
+ pred);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set1
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType adjacent_find(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last) {
+ return Impl::adjacent_find_team_impl(teamHandle, first, last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto adjacent_find(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::adjacent_find_team_impl(teamHandle, KE::begin(v), KE::end(v));
+}
+
+// overload set2
+template <typename TeamHandleType, typename IteratorType,
+ typename BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType adjacent_find(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last,
+ BinaryPredicateType pred) {
+ return Impl::adjacent_find_team_impl(teamHandle, first, last, pred);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto adjacent_find(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ BinaryPredicateType pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::adjacent_find_team_impl(teamHandle, KE::begin(v), KE::end(v),
+ pred);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ALL_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_ALL_OF_HPP
+
+#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool all_of(const ExecutionSpace& ex, InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return Impl::all_of_exespace_impl("Kokkos::all_of_iterator_api_default", ex,
+ first, last, predicate);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool all_of(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last, Predicate predicate) {
+ return Impl::all_of_exespace_impl(label, ex, first, last, predicate);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool all_of(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::all_of_exespace_impl("Kokkos::all_of_view_api_default", ex,
+ KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool all_of(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::all_of_exespace_impl(label, ex, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator, typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool all_of(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return Impl::all_of_team_impl(teamHandle, first, last, predicate);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool all_of(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::all_of_team_impl(teamHandle, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ANY_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_ANY_OF_HPP
+
+#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool any_of(const ExecutionSpace& ex, InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return Impl::any_of_exespace_impl("Kokkos::any_of_view_api_default", ex,
+ first, last, predicate);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool any_of(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last, Predicate predicate) {
+ return Impl::any_of_exespace_impl(label, ex, first, last, predicate);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool any_of(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::any_of_exespace_impl("Kokkos::any_of_view_api_default", ex,
+ KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool any_of(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::any_of_exespace_impl(label, ex, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator, typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool any_of(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return Impl::any_of_team_impl(teamHandle, first, last, predicate);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool any_of(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::any_of_team_impl(teamHandle, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_BEGIN_END_HPP
#define KOKKOS_BEGIN_END_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_HPP
+
+#include "impl/Kokkos_CopyCopyN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator copy(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ return Impl::copy_exespace_impl("Kokkos::copy_iterator_api_default", ex,
+ first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator copy(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first) {
+ return Impl::copy_exespace_impl(label, ex, first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::copy_exespace_impl("Kokkos::copy_view_api_default", ex,
+ KE::cbegin(source), KE::cend(source),
+ KE::begin(dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::copy_exespace_impl(label, ex, KE::cbegin(source),
+ KE::cend(source), KE::begin(dest));
+}
+
+//
+// overload set accepting team handle
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator copy(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first) {
+ return Impl::copy_team_impl(teamHandle, first, last, d_first);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::copy_team_impl(teamHandle, KE::cbegin(source), KE::cend(source),
+ KE::begin(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_HPP
+
+#include "impl/Kokkos_CopyBackward.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType2 copy_backward(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 d_last) {
+ return Impl::copy_backward_exespace_impl(
+ "Kokkos::copy_backward_iterator_api_default", ex, first, last, d_last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType2 copy_backward(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 d_last) {
+ return Impl::copy_backward_exespace_impl(label, ex, first, last, d_last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy_backward(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::copy_backward_exespace_impl(
+ "Kokkos::copy_backward_view_api_default", ex, cbegin(source),
+ cend(source), end(dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy_backward(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::copy_backward_exespace_impl(label, ex, cbegin(source),
+ cend(source), end(dest));
+}
+
+//
+// overload set accepting team handle
+//
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType2 copy_backward(const TeamHandleType& teamHandle,
+ IteratorType1 first,
+ IteratorType1 last,
+ IteratorType2 d_last) {
+ return Impl::copy_backward_team_impl(teamHandle, first, last, d_last);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto copy_backward(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::copy_backward_team_impl(teamHandle, cbegin(source), cend(source),
+ end(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_IF_HPP
+
+#include "impl/Kokkos_CopyIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator copy_if(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first,
+ Predicate pred) {
+ return Impl::copy_if_exespace_impl("Kokkos::copy_if_iterator_api_default", ex,
+ first, last, d_first, std::move(pred));
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator copy_if(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first, Predicate pred) {
+ return Impl::copy_if_exespace_impl(label, ex, first, last, d_first,
+ std::move(pred));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy_if(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ Predicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::copy_if_exespace_impl("Kokkos::copy_if_view_api_default", ex,
+ cbegin(source), cend(source), begin(dest),
+ std::move(pred));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy_if(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ Predicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::copy_if_exespace_impl(label, ex, cbegin(source), cend(source),
+ begin(dest), std::move(pred));
+}
+
+//
+// overload set accepting team handle
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator, typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator copy_if(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first, Predicate pred) {
+ return Impl::copy_if_team_impl(teamHandle, first, last, d_first,
+ std::move(pred));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto copy_if(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest, Predicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::copy_if_team_impl(teamHandle, cbegin(source), cend(source),
+ begin(dest), std::move(pred));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_N_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_N_HPP
+
+#include "impl/Kokkos_CopyCopyN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename Size,
+ typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator copy_n(const ExecutionSpace& ex, InputIterator first, Size count,
+ OutputIterator result) {
+ return Impl::copy_n_exespace_impl("Kokkos::copy_n_iterator_api_default", ex,
+ first, count, result);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename Size,
+ typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator copy_n(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, Size count, OutputIterator result) {
+ return Impl::copy_n_exespace_impl(label, ex, first, count, result);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename Size, typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy_n(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source, Size count,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::copy_n_exespace_impl("Kokkos::copy_n_view_api_default", ex,
+ KE::cbegin(source), count, KE::begin(dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename Size, typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto copy_n(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source, Size count,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::copy_n_exespace_impl(label, ex, KE::cbegin(source), count,
+ KE::begin(dest));
+}
+
+//
+// overload set accepting team handle
+//
+template <typename TeamHandleType, typename InputIterator, typename Size,
+ typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator copy_n(const TeamHandleType& teamHandle,
+ InputIterator first, Size count,
+ OutputIterator result) {
+ return Impl::copy_n_team_impl(teamHandle, first, count, result);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename Size, typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto copy_n(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source, Size count,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::copy_n_team_impl(teamHandle, KE::cbegin(source), count,
+ KE::begin(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COUNT_HPP
+#define KOKKOS_STD_ALGORITHMS_COUNT_HPP
+
+#include "impl/Kokkos_CountCountIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+typename IteratorType::difference_type count(const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last,
+ const T& value) {
+ return Impl::count_exespace_impl("Kokkos::count_iterator_api_default", ex,
+ first, last, value);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+typename IteratorType::difference_type count(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last,
+ const T& value) {
+ return Impl::count_exespace_impl(label, ex, first, last, value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto count(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v, const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::count_exespace_impl("Kokkos::count_view_api_default", ex,
+ KE::cbegin(v), KE::cend(v), value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto count(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v, const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::count_exespace_impl(label, ex, KE::cbegin(v), KE::cend(v),
+ value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+template <typename TeamHandleType, typename IteratorType, typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION typename IteratorType::difference_type count(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ const T& value) {
+ return Impl::count_team_impl(teamHandle, first, last, value);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto count(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::count_team_impl(teamHandle, KE::cbegin(v), KE::cend(v), value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COUNT_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_COUNT_IF_HPP
+
+#include "impl/Kokkos_CountCountIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+typename IteratorType::difference_type count_if(const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last,
+ Predicate predicate) {
+ return Impl::count_if_exespace_impl("Kokkos::count_if_iterator_api_default",
+ ex, first, last, std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+typename IteratorType::difference_type count_if(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last,
+ Predicate predicate) {
+ return Impl::count_if_exespace_impl(label, ex, first, last,
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto count_if(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::count_if_exespace_impl("Kokkos::count_if_view_api_default", ex,
+ KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto count_if(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::count_if_exespace_impl(label, ex, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+//
+// overload set accepting team handle
+//
+template <typename TeamHandleType, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION typename IteratorType::difference_type count_if(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ Predicate predicate) {
+ return Impl::count_if_team_impl(teamHandle, first, last,
+ std::move(predicate));
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto count_if(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::count_if_team_impl(teamHandle, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_DISTANCE_HPP
+#define KOKKOS_STD_ALGORITHMS_DISTANCE_HPP
+
+#include "impl/Kokkos_Constraints.hpp"
+#include "impl/Kokkos_RandomAccessIterator.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class IteratorType>
+KOKKOS_INLINE_FUNCTION constexpr typename IteratorType::difference_type
+distance(IteratorType first, IteratorType last) {
+ static_assert(
+ ::Kokkos::Experimental::Impl::are_random_access_iterators<
+ IteratorType>::value,
+ "Kokkos::Experimental::distance: only implemented for random access "
+ "iterators.");
+
+ return last - first;
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_EQUAL_HPP
+#define KOKKOS_STD_ALGORITHMS_EQUAL_HPP
+
+#include "impl/Kokkos_Equal.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2> &&
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2) {
+ return Impl::equal_exespace_impl("Kokkos::equal_iterator_api_default", ex,
+ first1, last1, first2);
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1, IteratorType2 first2) {
+ return Impl::equal_exespace_impl(label, ex, first1, last1, first2);
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, BinaryPredicateType predicate) {
+ return Impl::equal_exespace_impl("Kokkos::equal_iterator_api_default", ex,
+ first1, last1, first2, std::move(predicate));
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
+ BinaryPredicateType predicate) {
+ return Impl::equal_exespace_impl(label, ex, first1, last1, first2,
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool equal(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::equal_exespace_impl("Kokkos::equal_view_api_default", ex,
+ KE::cbegin(view1), KE::cend(view1),
+ KE::cbegin(view2));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::equal_exespace_impl(label, ex, KE::cbegin(view1),
+ KE::cend(view1), KE::cbegin(view2));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool equal(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ BinaryPredicateType predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::equal_exespace_impl("Kokkos::equal_view_api_default", ex,
+ KE::cbegin(view1), KE::cend(view1),
+ KE::cbegin(view2), std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ BinaryPredicateType predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::equal_exespace_impl(label, ex, KE::cbegin(view1),
+ KE::cend(view1), KE::cbegin(view2),
+ std::move(predicate));
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2) {
+ return Impl::equal_exespace_impl("Kokkos::equal_iterator_api_default", ex,
+ first1, last1, first2, last2);
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
+ IteratorType2 last2) {
+ return Impl::equal_exespace_impl(label, ex, first1, last1, first2, last2);
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType predicate) {
+ return Impl::equal_exespace_impl("Kokkos::equal_iterator_api_default", ex,
+ first1, last1, first2, last2,
+ std::move(predicate));
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
+ IteratorType2 last2, BinaryPredicateType predicate) {
+ return Impl::equal_exespace_impl(label, ex, first1, last1, first2, last2,
+ std::move(predicate));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION bool equal(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2) {
+ return Impl::equal_team_impl(teamHandle, first1, last1, first2);
+}
+
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION bool equal(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2,
+ BinaryPredicateType predicate) {
+ return Impl::equal_team_impl(teamHandle, first1, last1, first2,
+ std::move(predicate));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool equal(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::equal_team_impl(teamHandle, KE::cbegin(view1), KE::cend(view1),
+ KE::cbegin(view2));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool equal(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ BinaryPredicateType predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::equal_team_impl(teamHandle, KE::cbegin(view1), KE::cend(view1),
+ KE::cbegin(view2), std::move(predicate));
+}
+
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION bool equal(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2) {
+ return Impl::equal_team_impl(teamHandle, first1, last1, first2, last2);
+}
+
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators_v<
+ IteratorType1, IteratorType2>&& ::Kokkos::
+ is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION bool equal(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType predicate) {
+ return Impl::equal_team_impl(teamHandle, first1, last1, first2, last2,
+ std::move(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_ExclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set 1
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType exclusive_scan(const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ ValueType init_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::exclusive_scan_default_op_exespace_impl(
+ "Kokkos::exclusive_scan_default_functors_iterator_api", ex, first, last,
+ first_dest, std::move(init_value));
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType exclusive_scan(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ ValueType init_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::exclusive_scan_default_op_exespace_impl(
+ label, ex, first, last, first_dest, std::move(init_value));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto exclusive_scan(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::exclusive_scan_default_op_exespace_impl(
+ "Kokkos::exclusive_scan_default_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+ std::move(init_value));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto exclusive_scan(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::exclusive_scan_default_op_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), std::move(init_value));
+}
+
+// overload set 2
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ typename BinaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType exclusive_scan(const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ ValueType init_value, BinaryOpType bop) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::exclusive_scan_custom_op_exespace_impl(
+ "Kokkos::exclusive_scan_custom_functors_iterator_api", ex, first, last,
+ first_dest, std::move(init_value), bop);
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ typename BinaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType exclusive_scan(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ ValueType init_value, BinaryOpType bop) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::exclusive_scan_custom_op_exespace_impl(
+ label, ex, first, last, first_dest, std::move(init_value), bop);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryOpType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto exclusive_scan(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value, BinaryOpType bop) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::exclusive_scan_custom_op_exespace_impl(
+ "Kokkos::exclusive_scan_custom_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+ std::move(init_value), bop);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryOpType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto exclusive_scan(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value, BinaryOpType bop) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::exclusive_scan_custom_op_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), std::move(init_value), bop);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set 1
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType> &&
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType
+exclusive_scan(const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest,
+ ValueType init_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::exclusive_scan_default_op_team_impl(
+ teamHandle, first, last, first_dest, std::move(init_value));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto exclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::exclusive_scan_default_op_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), std::move(init_value));
+}
+
+// overload set 2
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ typename BinaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType> &&
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType
+exclusive_scan(const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest,
+ ValueType init_value, BinaryOpType bop) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::exclusive_scan_custom_op_team_impl(
+ teamHandle, first, last, first_dest, std::move(init_value), bop);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryOpType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto exclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value, BinaryOpType bop) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::exclusive_scan_custom_op_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), std::move(init_value), bop);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FILL_HPP
+#define KOKKOS_STD_ALGORITHMS_FILL_HPP
+
+#include "impl/Kokkos_FillFillN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void fill(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+ const T& value) {
+ Impl::fill_exespace_impl("Kokkos::fill_iterator_api_default", ex, first, last,
+ value);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void fill(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, const T& value) {
+ Impl::fill_exespace_impl(label, ex, first, last, value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void fill(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::fill_exespace_impl("Kokkos::fill_view_api_default", ex, begin(view),
+ end(view), value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void fill(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::fill_exespace_impl(label, ex, begin(view), end(view), value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType, typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void fill(const TeamHandleType& th, IteratorType first,
+ IteratorType last, const T& value) {
+ Impl::fill_team_impl(th, first, last, value);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void fill(const TeamHandleType& th,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::fill_team_impl(th, begin(view), end(view), value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FILL_N_HPP
+#define KOKKOS_STD_ALGORITHMS_FILL_N_HPP
+
+#include "impl/Kokkos_FillFillN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename SizeType,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType fill_n(const ExecutionSpace& ex, IteratorType first, SizeType n,
+ const T& value) {
+ return Impl::fill_n_exespace_impl("Kokkos::fill_n_iterator_api_default", ex,
+ first, n, value);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename SizeType,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType fill_n(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, SizeType n, const T& value) {
+ return Impl::fill_n_exespace_impl(label, ex, first, n, value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename SizeType, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto fill_n(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, SizeType n,
+ const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::fill_n_exespace_impl("Kokkos::fill_n_view_api_default", ex,
+ begin(view), n, value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename SizeType, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto fill_n(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, SizeType n,
+ const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::fill_n_exespace_impl(label, ex, begin(view), n, value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType, typename SizeType,
+ typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType fill_n(const TeamHandleType& th,
+ IteratorType first, SizeType n,
+ const T& value) {
+ return Impl::fill_n_team_impl(th, first, n, value);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename SizeType, typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto fill_n(const TeamHandleType& th,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ SizeType n, const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::fill_n_team_impl(th, begin(view), n, value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_HPP
+
+#include "impl/Kokkos_FindIfOrNot.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+InputIterator find(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, const T& value) {
+ return Impl::find_exespace_impl("Kokkos::find_iterator_api_default", ex,
+ first, last, value);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+InputIterator find(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last, const T& value) {
+ return Impl::find_exespace_impl(label, ex, first, last, value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_exespace_impl("Kokkos::find_view_api_default", ex,
+ KE::begin(view), KE::end(view), value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator, typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION InputIterator find(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ const T& value) {
+ return Impl::find_team_impl(teamHandle, first, last, value);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename T,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto find(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ const T& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_team_impl(teamHandle, KE::begin(view), KE::end(view),
+ value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_END_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_END_HPP
+
+#include "impl/Kokkos_FindEnd.hpp"
+#include "Kokkos_Equal.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set 1: no binary predicate passed
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_end(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last) {
+ return Impl::find_end_exespace_impl("Kokkos::find_end_iterator_api_default",
+ ex, first, last, s_first, s_last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_end(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last) {
+ return Impl::find_end_exespace_impl(label, ex, first, last, s_first, s_last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_end(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_end_exespace_impl("Kokkos::find_end_view_api_default", ex,
+ KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_end(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_end_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_end(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last, const BinaryPredicateType& pred) {
+ return Impl::find_end_exespace_impl("Kokkos::find_end_iterator_api_default",
+ ex, first, last, s_first, s_last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_end(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
+ return Impl::find_end_exespace_impl(label, ex, first, last, s_first, s_last,
+ pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_end(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_end_exespace_impl("Kokkos::find_end_view_api_default", ex,
+ KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view), pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_end(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_end_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view), pred);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set 1: no binary predicate passed
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType1 find_end(const TeamHandleType& teamHandle,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last) {
+ return Impl::find_end_team_impl(teamHandle, first, last, s_first, s_last);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto find_end(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_end_team_impl(teamHandle, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+
+KOKKOS_FUNCTION IteratorType1 find_end(const TeamHandleType& teamHandle,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
+ return Impl::find_end_team_impl(teamHandle, first, last, s_first, s_last,
+ pred);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto find_end(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_end_team_impl(teamHandle, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view), pred);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_HPP
+
+#include "impl/Kokkos_FindFirstOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set 1: no binary predicate passed
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_first_of(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last) {
+ return Impl::find_first_of_exespace_impl(
+ "Kokkos::find_first_of_iterator_api_default", ex, first, last, s_first,
+ s_last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_first_of(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last) {
+ return Impl::find_first_of_exespace_impl(label, ex, first, last, s_first,
+ s_last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_first_of(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_first_of_exespace_impl(
+ "Kokkos::find_first_of_view_api_default", ex, KE::begin(view),
+ KE::end(view), KE::begin(s_view), KE::end(s_view));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_first_of(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_first_of_exespace_impl(label, ex, KE::begin(view),
+ KE::end(view), KE::begin(s_view),
+ KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_first_of(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
+ return Impl::find_first_of_exespace_impl(
+ "Kokkos::find_first_of_iterator_api_default", ex, first, last, s_first,
+ s_last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 find_first_of(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
+ return Impl::find_first_of_exespace_impl(label, ex, first, last, s_first,
+ s_last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_first_of(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_first_of_exespace_impl(
+ "Kokkos::find_first_of_view_api_default", ex, KE::begin(view),
+ KE::end(view), KE::begin(s_view), KE::end(s_view), pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto find_first_of(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_first_of_exespace_impl(label, ex, KE::begin(view),
+ KE::end(view), KE::begin(s_view),
+ KE::end(s_view), pred);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set 1: no binary predicate passed
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType1 find_first_of(const TeamHandleType& teamHandle,
+ IteratorType1 first,
+ IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last) {
+ return Impl::find_first_of_team_impl(teamHandle, first, last, s_first,
+ s_last);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto find_first_of(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_first_of_team_impl(teamHandle, KE::begin(view),
+ KE::end(view), KE::begin(s_view),
+ KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+
+KOKKOS_FUNCTION IteratorType1 find_first_of(const TeamHandleType& teamHandle,
+ IteratorType1 first,
+ IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
+ return Impl::find_first_of_team_impl(teamHandle, first, last, s_first, s_last,
+ pred);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto find_first_of(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_first_of_team_impl(teamHandle, KE::begin(view),
+ KE::end(view), KE::begin(s_view),
+ KE::end(s_view), pred);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_IF_HPP
+
+#include "impl/Kokkos_FindIfOrNot.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType find_if(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, PredicateType predicate) {
+ return Impl::find_if_or_not_exespace_impl<true>(
+ "Kokkos::find_if_iterator_api_default", ex, first, last,
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType find_if(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ PredicateType predicate) {
+ return Impl::find_if_or_not_exespace_impl<true>(label, ex, first, last,
+ std::move(predicate));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto find_if(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_if_or_not_exespace_impl<true>(
+ "Kokkos::find_if_view_api_default", ex, KE::begin(v), KE::end(v),
+ std::move(predicate));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto find_if(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_if_or_not_exespace_impl<true>(
+ label, ex, KE::begin(v), KE::end(v), std::move(predicate));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ typename PredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType find_if(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ PredicateType predicate) {
+ return Impl::find_if_or_not_team_impl<true>(teamHandle, first, last,
+ std::move(predicate));
+}
+
+template <
+ typename TeamHandleType, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto find_if(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_if_or_not_team_impl<true>(teamHandle, KE::begin(v),
+ KE::end(v), std::move(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_NOT_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_IF_NOT_HPP
+
+#include "impl/Kokkos_FindIfOrNot.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType find_if_not(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, Predicate predicate) {
+ return Impl::find_if_or_not_exespace_impl<false>(
+ "Kokkos::find_if_not_iterator_api_default", ex, first, last,
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType find_if_not(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ Predicate predicate) {
+ return Impl::find_if_or_not_exespace_impl<false>(label, ex, first, last,
+ std::move(predicate));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto find_if_not(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_if_or_not_exespace_impl<false>(
+ "Kokkos::find_if_not_view_api_default", ex, KE::begin(v), KE::end(v),
+ std::move(predicate));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto find_if_not(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_if_or_not_exespace_impl<false>(
+ label, ex, KE::begin(v), KE::end(v), std::move(predicate));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType find_if_not(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ Predicate predicate) {
+ return Impl::find_if_or_not_team_impl<false>(teamHandle, first, last,
+ std::move(predicate));
+}
+
+template <
+ typename TeamHandleType, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto find_if_not(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v, Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::find_if_or_not_team_impl<false>(
+ teamHandle, KE::begin(v), KE::end(v), std::move(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_HPP
+#define KOKKOS_STD_ALGORITHMS_FOR_EACH_HPP
+
+#include "impl/Kokkos_ForEachForEachN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ class ExecutionSpace, class IteratorType, class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void for_each(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, UnaryFunctorType functor) {
+ Impl::for_each_exespace_impl(label, ex, first, last, std::move(functor));
+}
+
+template <
+ class ExecutionSpace, class IteratorType, class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void for_each(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+ UnaryFunctorType functor) {
+ Impl::for_each_exespace_impl("Kokkos::for_each_iterator_api_default", ex,
+ first, last, std::move(functor));
+}
+
+template <
+ class ExecutionSpace, class DataType, class... Properties,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void for_each(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ UnaryFunctorType functor) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::for_each_exespace_impl(label, ex, KE::begin(v), KE::end(v),
+ std::move(functor));
+}
+
+template <
+ class ExecutionSpace, class DataType, class... Properties,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void for_each(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ UnaryFunctorType functor) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::for_each_exespace_impl("Kokkos::for_each_view_api_default", ex,
+ KE::begin(v), KE::end(v), std::move(functor));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+template <class TeamHandleType, class IteratorType, class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void for_each(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ UnaryFunctorType functor) {
+ Impl::for_each_team_impl(teamHandle, first, last, std::move(functor));
+}
+
+template <class TeamHandleType, class DataType, class... Properties,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void for_each(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ UnaryFunctorType functor) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::for_each_team_impl(teamHandle, KE::begin(v), KE::end(v),
+ std::move(functor));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_N_HPP
+#define KOKKOS_STD_ALGORITHMS_FOR_EACH_N_HPP
+
+#include "impl/Kokkos_ForEachForEachN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ class ExecutionSpace, class IteratorType, class SizeType,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType for_each_n(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, SizeType n,
+ UnaryFunctorType functor) {
+ return Impl::for_each_n_exespace_impl(label, ex, first, n,
+ std::move(functor));
+}
+
+template <
+ class ExecutionSpace, class IteratorType, class SizeType,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType for_each_n(const ExecutionSpace& ex, IteratorType first,
+ SizeType n, UnaryFunctorType functor) {
+ return Impl::for_each_n_exespace_impl(
+ "Kokkos::for_each_n_iterator_api_default", ex, first, n,
+ std::move(functor));
+}
+
+template <
+ class ExecutionSpace, class DataType, class... Properties, class SizeType,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto for_each_n(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v, SizeType n,
+ UnaryFunctorType functor) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::for_each_n_exespace_impl(label, ex, KE::begin(v), n,
+ std::move(functor));
+}
+
+template <
+ class ExecutionSpace, class DataType, class... Properties, class SizeType,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto for_each_n(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v, SizeType n,
+ UnaryFunctorType functor) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::for_each_n_exespace_impl("Kokkos::for_each_n_view_api_default",
+ ex, KE::begin(v), n,
+ std::move(functor));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+template <class TeamHandleType, class IteratorType, class SizeType,
+ class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType for_each_n(const TeamHandleType& teamHandle,
+ IteratorType first, SizeType n,
+ UnaryFunctorType functor) {
+ return Impl::for_each_n_team_impl(teamHandle, first, n, std::move(functor));
+}
+
+template <class TeamHandleType, class DataType, class... Properties,
+ class SizeType, class UnaryFunctorType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto for_each_n(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v, SizeType n,
+ UnaryFunctorType functor) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::for_each_n_team_impl(teamHandle, KE::begin(v), n,
+ std::move(functor));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_HPP
+#define KOKKOS_STD_ALGORITHMS_GENERATE_HPP
+
+#include "impl/Kokkos_GenerateGenerateN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <typename ExecutionSpace, typename IteratorType, typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+void generate(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+ Generator g) {
+ Impl::generate_exespace_impl("Kokkos::generate_iterator_api_default", ex,
+ first, last, std::move(g));
+}
+
+template <typename ExecutionSpace, typename IteratorType, typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+void generate(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, Generator g) {
+ Impl::generate_exespace_impl(label, ex, first, last, std::move(g));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+void generate(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ Generator g) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ Impl::generate_exespace_impl("Kokkos::generate_view_api_default", ex,
+ begin(view), end(view), std::move(g));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+void generate(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ Generator g) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ Impl::generate_exespace_impl(label, ex, begin(view), end(view), std::move(g));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType, typename Generator,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void generate(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ Generator g) {
+ Impl::generate_team_impl(teamHandle, first, last, std::move(g));
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename Generator,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void generate(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view, Generator g) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::generate_team_impl(teamHandle, begin(view), end(view), std::move(g));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_N_HPP
+#define KOKKOS_STD_ALGORITHMS_GENERATE_N_HPP
+
+#include "impl/Kokkos_GenerateGenerateN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <typename ExecutionSpace, typename IteratorType, typename Size,
+ typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType generate_n(const ExecutionSpace& ex, IteratorType first,
+ Size count, Generator g) {
+ return Impl::generate_n_exespace_impl(
+ "Kokkos::generate_n_iterator_api_default", ex, first, count,
+ std::move(g));
+}
+
+template <typename ExecutionSpace, typename IteratorType, typename Size,
+ typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType generate_n(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, Size count, Generator g) {
+ return Impl::generate_n_exespace_impl(label, ex, first, count, std::move(g));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Size, typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto generate_n(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, Size count,
+ Generator g) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::generate_n_exespace_impl("Kokkos::generate_n_view_api_default",
+ ex, begin(view), count, std::move(g));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Size, typename Generator,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto generate_n(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view, Size count,
+ Generator g) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::generate_n_exespace_impl(label, ex, begin(view), count,
+ std::move(g));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType, typename Size,
+ typename Generator,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType generate_n(const TeamHandleType& teamHandle,
+ IteratorType first, Size count,
+ Generator g) {
+ return Impl::generate_n_team_impl(teamHandle, first, count, std::move(g));
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename Size, typename Generator,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto generate_n(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view, Size count,
+ Generator g) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::generate_n_team_impl(teamHandle, begin(view), count,
+ std::move(g));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_InclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set 1
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType inclusive_scan(const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest) {
+ return Impl::inclusive_scan_default_op_exespace_impl(
+ "Kokkos::inclusive_scan_default_functors_iterator_api", ex, first, last,
+ first_dest);
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType inclusive_scan(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest) {
+ return Impl::inclusive_scan_default_op_exespace_impl(label, ex, first, last,
+ first_dest);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto inclusive_scan(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_default_op_exespace_impl(
+ "Kokkos::inclusive_scan_default_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto inclusive_scan(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_default_op_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest));
+}
+
+// overload set 2 (accepting custom binary op)
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType inclusive_scan(const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ BinaryOp binary_op) {
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ "Kokkos::inclusive_scan_custom_functors_iterator_api", ex, first, last,
+ first_dest, binary_op);
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType inclusive_scan(
+ const std::string& label, const ExecutionSpace& ex, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest, BinaryOp binary_op) {
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ label, ex, first, last, first_dest, binary_op);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto inclusive_scan(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp binary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ "Kokkos::inclusive_scan_custom_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+ binary_op);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp binary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op);
+}
+
+// overload set 3
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp, typename ValueType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType inclusive_scan(const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ BinaryOp binary_op, ValueType init_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ "Kokkos::inclusive_scan_custom_functors_iterator_api", ex, first, last,
+ first_dest, binary_op, std::move(init_value));
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp, typename ValueType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType inclusive_scan(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ BinaryOp binary_op, ValueType init_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ label, ex, first, last, first_dest, binary_op, std::move(init_value));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto inclusive_scan(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp binary_op, ValueType init_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ "Kokkos::inclusive_scan_custom_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+ binary_op, std::move(init_value));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp binary_op, ValueType init_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_custom_binary_op_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op, std::move(init_value));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set 1
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType
+inclusive_scan(const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest) {
+ return Impl::inclusive_scan_default_op_team_impl(teamHandle, first, last,
+ first_dest);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto inclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_default_op_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest));
+}
+
+// overload set 2 (accepting custom binary op)
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType inclusive_scan(
+ const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest, BinaryOp binary_op) {
+ return Impl::inclusive_scan_custom_binary_op_team_impl(
+ teamHandle, first, last, first_dest, binary_op);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto inclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp binary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_custom_binary_op_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op);
+}
+
+// overload set 3
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOp, typename ValueType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+
+KOKKOS_FUNCTION OutputIteratorType
+inclusive_scan(const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest,
+ BinaryOp binary_op, ValueType init_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::inclusive_scan_custom_binary_op_team_impl(
+ teamHandle, first, last, first_dest, binary_op, std::move(init_value));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOp,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto inclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOp binary_op, ValueType init_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::inclusive_scan_custom_binary_op_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op, std::move(init_value));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_HPP
+
+#include "impl/Kokkos_IsPartitioned.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_partitioned(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, PredicateType p) {
+ return Impl::is_partitioned_exespace_impl(
+ "Kokkos::is_partitioned_iterator_api_default", ex, first, last,
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_partitioned(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, PredicateType p) {
+ return Impl::is_partitioned_exespace_impl(label, ex, first, last,
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename PredicateType, typename DataType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_partitioned(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ PredicateType p) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::is_partitioned_exespace_impl(
+ "Kokkos::is_partitioned_view_api_default", ex, cbegin(v), cend(v),
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename PredicateType, typename DataType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_partitioned(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ PredicateType p) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::is_partitioned_exespace_impl(label, ex, cbegin(v), cend(v),
+ std::move(p));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ typename PredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool is_partitioned(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ PredicateType p) {
+ return Impl::is_partitioned_team_impl(teamHandle, first, last, std::move(p));
+}
+
+template <typename TeamHandleType, typename PredicateType, typename DataType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool is_partitioned(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v, PredicateType p) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ return Impl::is_partitioned_team_impl(teamHandle, cbegin(v), cend(v),
+ std::move(p));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_SORTED_HPP
+
+#include "impl/Kokkos_IsSorted.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
+ return Impl::is_sorted_exespace_impl("Kokkos::is_sorted_iterator_api_default",
+ ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ return Impl::is_sorted_exespace_impl(label, ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_exespace_impl("Kokkos::is_sorted_view_api_default", ex,
+ KE::cbegin(view), KE::cend(view));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_exespace_impl(label, ex, KE::cbegin(view),
+ KE::cend(view));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ return Impl::is_sorted_exespace_impl("Kokkos::is_sorted_iterator_api_default",
+ ex, first, last, std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ return Impl::is_sorted_exespace_impl(label, ex, first, last, std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_exespace_impl("Kokkos::is_sorted_view_api_default", ex,
+ KE::cbegin(view), KE::cend(view),
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_exespace_impl(label, ex, KE::cbegin(view),
+ KE::cend(view), std::move(comp));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool is_sorted(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last) {
+ return Impl::is_sorted_team_impl(teamHandle, first, last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool is_sorted(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_team_impl(teamHandle, KE::cbegin(view),
+ KE::cend(view));
+}
+
+template <typename TeamHandleType, typename IteratorType,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool is_sorted(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ return Impl::is_sorted_team_impl(teamHandle, first, last, std::move(comp));
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool is_sorted(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view, ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_team_impl(teamHandle, KE::cbegin(view), KE::cend(view),
+ std::move(comp));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_HPP
+
+#include "impl/Kokkos_IsSortedUntil.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType is_sorted_until(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
+ return Impl::is_sorted_until_exespace_impl(
+ "Kokkos::is_sorted_until_iterator_api_default", ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ return Impl::is_sorted_until_exespace_impl(label, ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto is_sorted_until(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_until_exespace_impl(
+ "Kokkos::is_sorted_until_view_api_default", ex, KE::begin(view),
+ KE::end(view));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_until_exespace_impl(label, ex, KE::begin(view),
+ KE::end(view));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType is_sorted_until(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ return Impl::is_sorted_until_exespace_impl(
+ "Kokkos::is_sorted_until_iterator_api_default", ex, first, last,
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::is_sorted_until_exespace_impl(label, ex, first, last,
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto is_sorted_until(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_until_exespace_impl(
+ "Kokkos::is_sorted_until_view_api_default", ex, KE::begin(view),
+ KE::end(view), std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_until_exespace_impl(label, ex, KE::begin(view),
+ KE::end(view), std::move(comp));
+}
+
+//
+// overload set accepting team handle
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType is_sorted_until(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last) {
+ return Impl::is_sorted_until_team_impl(teamHandle, first, last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto is_sorted_until(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_until_team_impl(teamHandle, KE::begin(view),
+ KE::end(view));
+}
+
+template <typename TeamHandleType, typename IteratorType,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType is_sorted_until(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ return Impl::is_sorted_until_team_impl(teamHandle, first, last,
+ std::move(comp));
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto is_sorted_until(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view, ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::is_sorted_until_team_impl(teamHandle, KE::begin(view),
+ KE::end(view), std::move(comp));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ITER_SWAP_HPP
+#define KOKKOS_STD_ALGORITHMS_ITER_SWAP_HPP
+
+#include <Kokkos_Core.hpp>
+#include "impl/Kokkos_Constraints.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType1, class IteratorType2>
+struct StdIterSwapFunctor {
+ IteratorType1 m_a;
+ IteratorType2 m_b;
+
+ KOKKOS_FUNCTION
+ void operator()(int i) const {
+ (void)i;
+ ::Kokkos::kokkos_swap(*m_a, *m_b);
+ }
+
+ KOKKOS_FUNCTION
+ StdIterSwapFunctor(IteratorType1 _a, IteratorType2 _b)
+ : m_a(std::move(_a)), m_b(std::move(_b)) {}
+};
+
+template <class IteratorType1, class IteratorType2>
+void iter_swap_impl(IteratorType1 a, IteratorType2 b) {
+ // is there a better way to do this maybe?
+ ::Kokkos::parallel_for(
+ 1, StdIterSwapFunctor<IteratorType1, IteratorType2>(a, b));
+ Kokkos::DefaultExecutionSpace().fence(
+ "Kokkos::iter_swap: fence after operation");
+}
+} // namespace Impl
+//----------------------------------------------------------------------------
+
+// iter_swap
+template <class IteratorType1, class IteratorType2>
+void iter_swap(IteratorType1 a, IteratorType2 b) {
+ Impl::iter_swap_impl(a, b);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+template <class T>
+KOKKOS_DEPRECATED_WITH_COMMENT("Use Kokkos::kokkos_swap instead!")
+KOKKOS_FUNCTION
+ void swap(T& a, T& b) noexcept(::Kokkos::kokkos_swap(std::declval<T&>(),
+ std::declval<T&>())) {
+ ::Kokkos::kokkos_swap(a, b);
+}
+#endif
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_HPP
+#define KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_HPP
+
+#include "impl/Kokkos_LexicographicalCompare.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2,
+ IteratorType2 last2) {
+ return Impl::lexicographical_compare_exespace_impl(
+ "Kokkos::lexicographical_compare_iterator_api_default", ex, first1, last1,
+ first2, last2);
+}
+
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2) {
+ return Impl::lexicographical_compare_exespace_impl(label, ex, first1, last1,
+ first2, last2);
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::lexicographical_compare_exespace_impl(
+ "Kokkos::lexicographical_compare_view_api_default", ex, KE::cbegin(view1),
+ KE::cend(view1), KE::cbegin(view2), KE::cend(view2));
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::lexicographical_compare_exespace_impl(
+ label, ex, KE::cbegin(view1), KE::cend(view1), KE::cbegin(view2),
+ KE::cend(view2));
+}
+
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ class ComparatorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2,
+ IteratorType2 last2, ComparatorType comp) {
+ return Impl::lexicographical_compare_exespace_impl(
+ "Kokkos::lexicographical_compare_iterator_api_default", ex, first1, last1,
+ first2, last2, comp);
+}
+
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ class ComparatorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2,
+ ComparatorType comp) {
+ return Impl::lexicographical_compare_exespace_impl(label, ex, first1, last1,
+ first2, last2, comp);
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2, class ComparatorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::lexicographical_compare_exespace_impl(
+ "Kokkos::lexicographical_compare_view_api_default", ex, KE::cbegin(view1),
+ KE::cend(view1), KE::cbegin(view2), KE::cend(view2), comp);
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2, class ComparatorType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool lexicographical_compare(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::lexicographical_compare_exespace_impl(
+ label, ex, KE::cbegin(view1), KE::cend(view1), KE::cbegin(view2),
+ KE::cend(view2), comp);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool lexicographical_compare(const TeamHandleType& teamHandle,
+ IteratorType1 first1,
+ IteratorType1 last1,
+ IteratorType2 first2,
+ IteratorType2 last2) {
+ return Impl::lexicographical_compare_team_impl(teamHandle, first1, last1,
+ first2, last2);
+}
+
+template <class TeamHandleType, class DataType1, class... Properties1,
+ class DataType2, class... Properties2,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool lexicographical_compare(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::lexicographical_compare_team_impl(
+ teamHandle, KE::cbegin(view1), KE::cend(view1), KE::cbegin(view2),
+ KE::cend(view2));
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class ComparatorType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool lexicographical_compare(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2, ComparatorType comp) {
+ return Impl::lexicographical_compare_team_impl(teamHandle, first1, last1,
+ first2, last2, comp);
+}
+
+template <class TeamHandleType, class DataType1, class... Properties1,
+ class DataType2, class... Properties2, class ComparatorType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION bool lexicographical_compare(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::lexicographical_compare_team_impl(
+ teamHandle, KE::cbegin(view1), KE::cend(view1), KE::cbegin(view2),
+ KE::cend(view2), comp);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MAX_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_MAX_ELEMENT_HPP
+
+#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLoc>(
+ "Kokkos::max_element_iterator_api_default", ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLoc>(label, ex, first,
+ last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLocCustomComparator>(
+ "Kokkos::max_element_iterator_api_default", ex, first, last,
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLocCustomComparator>(
+ label, ex, first, last, std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLoc>(
+ "Kokkos::max_element_view_api_default", ex, begin(v), end(v));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLoc>(label, ex,
+ begin(v), end(v));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLocCustomComparator>(
+ "Kokkos::max_element_view_api_default", ex, begin(v), end(v),
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MaxFirstLocCustomComparator>(
+ label, ex, begin(v), end(v), std::move(comp));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto max_element(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last) {
+ return Impl::min_or_max_element_team_impl<MaxFirstLoc>(teamHandle, first,
+ last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto max_element(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::min_or_max_element_team_impl<MaxFirstLoc>(teamHandle, begin(v),
+ end(v));
+}
+
+template <typename TeamHandleType, typename IteratorType,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto max_element(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ return Impl::min_or_max_element_team_impl<MaxFirstLocCustomComparator>(
+ teamHandle, first, last, std::move(comp));
+}
+
+template <typename TeamHandleType, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto max_element(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v, ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ return Impl::min_or_max_element_team_impl<MaxFirstLocCustomComparator>(
+ teamHandle, begin(v), end(v), std::move(comp));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MIN_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_MIN_ELEMENT_HPP
+
+#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
+ return Impl::min_or_max_element_exespace_impl<MinFirstLoc>(
+ "Kokkos::min_element_iterator_api_default", ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ return Impl::min_or_max_element_exespace_impl<MinFirstLoc>(label, ex, first,
+ last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MinFirstLocCustomComparator>(
+ "Kokkos::min_element_iterator_api_default", ex, first, last,
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MinFirstLocCustomComparator>(
+ label, ex, first, last, std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::min_or_max_element_exespace_impl<MinFirstLoc>(
+ "Kokkos::min_element_view_api_default", ex, begin(v), end(v));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MinFirstLocCustomComparator>(
+ "Kokkos::min_element_view_api_default", ex, begin(v), end(v),
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::min_or_max_element_exespace_impl<MinFirstLoc>(label, ex,
+ begin(v), end(v));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::min_or_max_element_exespace_impl<MinFirstLocCustomComparator>(
+ label, ex, begin(v), end(v), std::move(comp));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto min_element(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last) {
+ return Impl::min_or_max_element_team_impl<MinFirstLoc>(teamHandle, first,
+ last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto min_element(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::min_or_max_element_team_impl<MinFirstLoc>(teamHandle, begin(v),
+ end(v));
+}
+
+template <typename TeamHandleType, typename IteratorType,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto min_element(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ return Impl::min_or_max_element_team_impl<MinFirstLocCustomComparator>(
+ teamHandle, first, last, std::move(comp));
+}
+
+template <typename TeamHandleType, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto min_element(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ return Impl::min_or_max_element_team_impl<MinFirstLocCustomComparator>(
+ teamHandle, begin(v), end(v), std::move(comp));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MINMAX_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_MINMAX_ELEMENT_HPP
+
+#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLoc>(
+ "Kokkos::minmax_element_iterator_api_default", ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLoc>(label, ex,
+ first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLocCustomComparator>(
+ "Kokkos::minmax_element_iterator_api_default", ex, first, last,
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLocCustomComparator>(
+ label, ex, first, last, std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLoc>(
+ "Kokkos::minmax_element_view_api_default", ex, begin(v), end(v));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLoc>(
+ label, ex, begin(v), end(v));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLocCustomComparator>(
+ "Kokkos::minmax_element_view_api_default", ex, begin(v), end(v),
+ std::move(comp));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::minmax_element_exespace_impl<MinMaxFirstLastLocCustomComparator>(
+ label, ex, begin(v), end(v), std::move(comp));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto minmax_element(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last) {
+ return Impl::minmax_element_team_impl<MinMaxFirstLastLoc>(teamHandle, first,
+ last);
+}
+
+template <typename TeamHandleType, typename IteratorType,
+ typename ComparatorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto minmax_element(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+
+ return Impl::minmax_element_team_impl<MinMaxFirstLastLocCustomComparator>(
+ teamHandle, first, last, std::move(comp));
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto minmax_element(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ return Impl::minmax_element_team_impl<MinMaxFirstLastLoc>(teamHandle,
+ begin(v), end(v));
+}
+
+template <typename TeamHandleType, typename DataType, typename ComparatorType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto minmax_element(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v, ComparatorType comp) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+
+ return Impl::minmax_element_team_impl<MinMaxFirstLastLocCustomComparator>(
+ teamHandle, begin(v), end(v), std::move(comp));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MISMATCH_HPP
+#define KOKKOS_STD_ALGORITHMS_MISMATCH_HPP
+
+#include "impl/Kokkos_Mismatch.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// FIXME: add mismatch overloads accepting 3 iterators.
+// An overload consistent with other algorithms:
+//
+// auto mismatch(const ExecSpace& ex, It1 first1, It1 last1, It2 first2) {...}
+//
+// makes API ambiguous (with the overload accepting views).
+
+//
+// overload set accepting execution space
+//
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(const ExecutionSpace& ex,
+ IteratorType1 first1,
+ IteratorType1 last1,
+ IteratorType2 first2,
+ IteratorType2 last2) {
+ return Impl::mismatch_exespace_impl("Kokkos::mismatch_iterator_api_default",
+ ex, first1, last1, first2, last2);
+}
+
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+ const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType&& predicate) {
+ return Impl::mismatch_exespace_impl(
+ "Kokkos::mismatch_iterator_api_default", ex, first1, last1, first2, last2,
+ std::forward<BinaryPredicateType>(predicate));
+}
+
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
+ return Impl::mismatch_exespace_impl(label, ex, first1, last1, first2, last2);
+}
+
+template <
+ class ExecutionSpace, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType&& predicate) {
+ return Impl::mismatch_exespace_impl(
+ label, ex, first1, last1, first2, last2,
+ std::forward<BinaryPredicateType>(predicate));
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto mismatch(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::mismatch_exespace_impl("Kokkos::mismatch_view_api_default", ex,
+ KE::begin(view1), KE::end(view1),
+ KE::begin(view2), KE::end(view2));
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2, class BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto mismatch(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ BinaryPredicateType&& predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::mismatch_exespace_impl(
+ "Kokkos::mismatch_view_api_default", ex, KE::begin(view1), KE::end(view1),
+ KE::begin(view2), KE::end(view2),
+ std::forward<BinaryPredicateType>(predicate));
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto mismatch(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::mismatch_exespace_impl(label, ex, KE::begin(view1),
+ KE::end(view1), KE::begin(view2),
+ KE::end(view2));
+}
+
+template <
+ class ExecutionSpace, class DataType1, class... Properties1,
+ class DataType2, class... Properties2, class BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto mismatch(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ BinaryPredicateType&& predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::mismatch_exespace_impl(
+ label, ex, KE::begin(view1), KE::end(view1), KE::begin(view2),
+ KE::end(view2), std::forward<BinaryPredicateType>(predicate));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION ::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2) {
+ return Impl::mismatch_team_impl(teamHandle, first1, last1, first2, last2);
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION ::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType&& predicate) {
+ return Impl::mismatch_team_impl(teamHandle, first1, last1, first2, last2,
+ std::forward<BinaryPredicateType>(predicate));
+}
+
+template <class TeamHandleType, class DataType1, class... Properties1,
+ class DataType2, class... Properties2,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto mismatch(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::mismatch_team_impl(teamHandle, KE::begin(view1), KE::end(view1),
+ KE::begin(view2), KE::end(view2));
+}
+
+template <class TeamHandleType, class DataType1, class... Properties1,
+ class DataType2, class... Properties2, class BinaryPredicateType,
+ std::enable_if_t<Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto mismatch(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view1,
+ const ::Kokkos::View<DataType2, Properties2...>& view2,
+ BinaryPredicateType&& predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::mismatch_team_impl(teamHandle, KE::begin(view1), KE::end(view1),
+ KE::begin(view2), KE::end(view2),
+ std::forward<BinaryPredicateType>(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_HPP
+
+#include "impl/Kokkos_Move.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator move(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ return Impl::move_exespace_impl("Kokkos::move_iterator_api_default", ex,
+ first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator move(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first) {
+ return Impl::move_exespace_impl(label, ex, first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto move(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::move_exespace_impl("Kokkos::move_view_api_default", ex,
+ begin(source), end(source), begin(dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto move(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::move_exespace_impl(label, ex, begin(source), end(source),
+ begin(dest));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator move(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first) {
+ return Impl::move_team_impl(teamHandle, first, last, d_first);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto move(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::move_team_impl(teamHandle, begin(source), end(source),
+ begin(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_HPP
+
+#include "impl/Kokkos_MoveBackward.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType2 move_backward(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 d_last) {
+ return Impl::move_backward_exespace_impl(
+ "Kokkos::move_backward_iterator_api_default", ex, first, last, d_last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto move_backward(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::move_backward_exespace_impl(
+ "Kokkos::move_backward_view_api_default", ex, begin(source), end(source),
+ end(dest));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType2 move_backward(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 d_last) {
+ return Impl::move_backward_exespace_impl(label, ex, first, last, d_last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto move_backward(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::move_backward_exespace_impl(label, ex, begin(source),
+ end(source), end(dest));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType2 move_backward(const TeamHandleType& teamHandle,
+ IteratorType1 first,
+ IteratorType1 last,
+ IteratorType2 d_last) {
+ return Impl::move_backward_team_impl(teamHandle, first, last, d_last);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto move_backward(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::move_backward_team_impl(teamHandle, begin(source), end(source),
+ end(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_NONE_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_NONE_OF_HPP
+
+#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool none_of(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+ Predicate predicate) {
+ return Impl::none_of_exespace_impl("Kokkos::none_of_iterator_api_default", ex,
+ first, last, predicate);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool none_of(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, Predicate predicate) {
+ return Impl::none_of_exespace_impl(label, ex, first, last, predicate);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool none_of(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::none_of_exespace_impl("Kokkos::none_of_view_api_default", ex,
+ KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename Predicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+bool none_of(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::none_of_exespace_impl(label, ex, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType, typename Predicate>
+KOKKOS_FUNCTION
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, bool>
+ none_of(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, Predicate predicate) {
+ return Impl::none_of_team_impl(teamHandle, first, last, predicate);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename Predicate>
+KOKKOS_FUNCTION
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, bool>
+ none_of(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ Predicate predicate) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::none_of_team_impl(teamHandle, KE::cbegin(v), KE::cend(v),
+ std::move(predicate));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_PARTITION_COPY_HPP
+
+#include "impl/Kokkos_PartitionCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorTrueType, typename OutputIteratorFalseType,
+ typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType> partition_copy(
+ const ExecutionSpace& ex, InputIteratorType from_first,
+ InputIteratorType from_last, OutputIteratorTrueType to_first_true,
+ OutputIteratorFalseType to_first_false, PredicateType p) {
+ return Impl::partition_copy_exespace_impl(
+ "Kokkos::partition_copy_iterator_api_default", ex, from_first, from_last,
+ to_first_true, to_first_false, std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorTrueType, typename OutputIteratorFalseType,
+ typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType> partition_copy(
+ const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType from_first, InputIteratorType from_last,
+ OutputIteratorTrueType to_first_true,
+ OutputIteratorFalseType to_first_false, PredicateType p) {
+ return Impl::partition_copy_exespace_impl(label, ex, from_first, from_last,
+ to_first_true, to_first_false,
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename DataType3,
+ typename... Properties3, typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto partition_copy(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest_true,
+ const ::Kokkos::View<DataType3, Properties3...>& view_dest_false,
+ PredicateType p) {
+ return Impl::partition_copy_exespace_impl(
+ "Kokkos::partition_copy_view_api_default", ex, cbegin(view_from),
+ cend(view_from), begin(view_dest_true), begin(view_dest_false),
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename DataType3,
+ typename... Properties3, typename PredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto partition_copy(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest_true,
+ const ::Kokkos::View<DataType3, Properties3...>& view_dest_false,
+ PredicateType p) {
+ return Impl::partition_copy_exespace_impl(
+ label, ex, cbegin(view_from), cend(view_from), begin(view_dest_true),
+ begin(view_dest_false), std::move(p));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorTrueType, typename OutputIteratorFalseType,
+ typename PredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION ::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType>
+partition_copy(const TeamHandleType& teamHandle, InputIteratorType from_first,
+ InputIteratorType from_last,
+ OutputIteratorTrueType to_first_true,
+ OutputIteratorFalseType to_first_false, PredicateType p) {
+ return Impl::partition_copy_team_impl(teamHandle, from_first, from_last,
+ to_first_true, to_first_false,
+ std::move(p));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename DataType3,
+ typename... Properties3, typename PredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto partition_copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest_true,
+ const ::Kokkos::View<DataType3, Properties3...>& view_dest_false,
+ PredicateType p) {
+ return Impl::partition_copy_team_impl(teamHandle, cbegin(view_from),
+ cend(view_from), begin(view_dest_true),
+ begin(view_dest_false), std::move(p));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_POINT_HPP
+#define KOKKOS_STD_ALGORITHMS_PARTITION_POINT_HPP
+
+#include "impl/Kokkos_PartitionPoint.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType partition_point(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, UnaryPredicate p) {
+ return Impl::partition_point_exespace_impl(
+ "Kokkos::partitioned_point_iterator_api_default", ex, first, last,
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType partition_point(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ UnaryPredicate p) {
+ return Impl::partition_point_exespace_impl(label, ex, first, last,
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename UnaryPredicate, typename DataType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto partition_point(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ UnaryPredicate p) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ return Impl::partition_point_exespace_impl(label, ex, begin(v), end(v),
+ std::move(p));
+}
+
+template <
+ typename ExecutionSpace, typename UnaryPredicate, typename DataType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto partition_point(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& v,
+ UnaryPredicate p) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ return Impl::partition_point_exespace_impl(
+ "Kokkos::partition_point_view_api_default", ex, begin(v), end(v),
+ std::move(p));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType partition_point(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last,
+ UnaryPredicate p) {
+ return Impl::partition_point_team_impl(teamHandle, first, last, std::move(p));
+}
+
+template <typename TeamHandleType, typename UnaryPredicate, typename DataType,
+ typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto partition_point(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& v, UnaryPredicate p) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+ return Impl::partition_point_team_impl(teamHandle, begin(v), end(v),
+ std::move(p));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REDUCE_HPP
+#define KOKKOS_STD_ALGORITHMS_REDUCE_HPP
+
+#include "impl/Kokkos_Reduce.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+//
+// overload set 1
+//
+template <typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+typename IteratorType::value_type reduce(const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last) {
+ return Impl::reduce_default_functors_exespace_impl(
+ "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
+ typename IteratorType::value_type());
+}
+
+template <typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+typename IteratorType::value_type reduce(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last) {
+ return Impl::reduce_default_functors_exespace_impl(
+ label, ex, first, last, typename IteratorType::value_type());
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto reduce(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ using view_type = ::Kokkos::View<DataType, Properties...>;
+ using value_type = typename view_type::value_type;
+
+ return Impl::reduce_default_functors_exespace_impl(
+ "Kokkos::reduce_default_functors_view_api", ex, KE::cbegin(view),
+ KE::cend(view), value_type());
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto reduce(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ using view_type = ::Kokkos::View<DataType, Properties...>;
+ using value_type = typename view_type::value_type;
+
+ return Impl::reduce_default_functors_exespace_impl(
+ label, ex, KE::cbegin(view), KE::cend(view), value_type());
+}
+
+//
+// overload set2:
+//
+template <typename ExecutionSpace, typename IteratorType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ValueType init_reduction_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::reduce_default_functors_exespace_impl(
+ "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
+ init_reduction_value);
+}
+
+template <typename ExecutionSpace, typename IteratorType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ ValueType init_reduction_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::reduce_default_functors_exespace_impl(label, ex, first, last,
+ init_reduction_value);
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::reduce_default_functors_exespace_impl(
+ "Kokkos::reduce_default_functors_view_api", ex, KE::cbegin(view),
+ KE::cend(view), init_reduction_value);
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::reduce_default_functors_exespace_impl(
+ label, ex, KE::cbegin(view), KE::cend(view), init_reduction_value);
+}
+
+//
+// overload set 3
+//
+template <typename ExecutionSpace, typename IteratorType, typename ValueType,
+ typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ValueType init_reduction_value,
+ BinaryOp joiner) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::reduce_custom_functors_exespace_impl(
+ "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
+ init_reduction_value, joiner);
+}
+
+template <typename ExecutionSpace, typename IteratorType, typename ValueType,
+ typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ ValueType init_reduction_value, BinaryOp joiner) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::reduce_custom_functors_exespace_impl(
+ label, ex, first, last, init_reduction_value, joiner);
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value, BinaryOp joiner) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::reduce_custom_functors_exespace_impl(
+ "Kokkos::reduce_custom_functors_view_api", ex, KE::cbegin(view),
+ KE::cend(view), init_reduction_value, joiner);
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value, BinaryOp joiner) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::reduce_custom_functors_exespace_impl(
+ label, ex, KE::cbegin(view), KE::cend(view), init_reduction_value,
+ joiner);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+//
+// overload set 1
+//
+template <
+ typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION typename IteratorType::value_type reduce(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last) {
+ return Impl::reduce_default_functors_team_impl(
+ teamHandle, first, last, typename IteratorType::value_type());
+}
+
+template <
+ typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto reduce(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ using view_type = ::Kokkos::View<DataType, Properties...>;
+ using value_type = typename view_type::value_type;
+
+ return Impl::reduce_default_functors_team_impl(teamHandle, KE::cbegin(view),
+ KE::cend(view), value_type());
+}
+
+//
+// overload set2:
+//
+template <
+ typename TeamHandleType, typename IteratorType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType reduce(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ ValueType init_reduction_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::reduce_default_functors_team_impl(teamHandle, first, last,
+ init_reduction_value);
+}
+
+template <
+ typename TeamHandleType, typename DataType, typename... Properties,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType
+reduce(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::reduce_default_functors_team_impl(
+ teamHandle, KE::cbegin(view), KE::cend(view), init_reduction_value);
+}
+
+//
+// overload set 3
+//
+template <
+ typename TeamHandleType, typename IteratorType, typename ValueType,
+ typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType reduce(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ ValueType init_reduction_value,
+ BinaryOp joiner) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::reduce_custom_functors_team_impl(teamHandle, first, last,
+ init_reduction_value, joiner);
+}
+
+template <
+ typename TeamHandleType, typename DataType, typename... Properties,
+ typename ValueType, typename BinaryOp,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType
+reduce(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value, BinaryOp joiner) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::reduce_custom_functors_team_impl(teamHandle, KE::cbegin(view),
+ KE::cend(view),
+ init_reduction_value, joiner);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename Iterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+Iterator remove(const ExecutionSpace& ex, Iterator first, Iterator last,
+ const ValueType& value) {
+ return Impl::remove_exespace_impl("Kokkos::remove_iterator_api_default", ex,
+ first, last, value);
+}
+
+template <
+ typename ExecutionSpace, typename Iterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+Iterator remove(const std::string& label, const ExecutionSpace& ex,
+ Iterator first, Iterator last, const ValueType& value) {
+ return Impl::remove_exespace_impl(label, ex, first, last, value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::remove_exespace_impl("Kokkos::remove_iterator_api_default", ex,
+ ::Kokkos::Experimental::begin(view),
+ ::Kokkos::Experimental::end(view), value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::remove_exespace_impl(label, ex,
+ ::Kokkos::Experimental::begin(view),
+ ::Kokkos::Experimental::end(view), value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename Iterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION Iterator remove(const TeamHandleType& teamHandle,
+ Iterator first, Iterator last,
+ const ValueType& value) {
+ return Impl::remove_team_impl(teamHandle, first, last, value);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto remove(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::remove_team_impl(teamHandle, ::Kokkos::Experimental::begin(view),
+ ::Kokkos::Experimental::end(view), value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_COPY_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator remove_copy(const ExecutionSpace& ex, InputIterator first_from,
+ InputIterator last_from, OutputIterator first_dest,
+ const ValueType& value) {
+ return Impl::remove_copy_exespace_impl(
+ "Kokkos::remove_copy_iterator_api_default", ex, first_from, last_from,
+ first_dest, value);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator remove_copy(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first_from, InputIterator last_from,
+ OutputIterator first_dest, const ValueType& value) {
+ return Impl::remove_copy_exespace_impl(label, ex, first_from, last_from,
+ first_dest, value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove_copy(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ return Impl::remove_copy_exespace_impl(
+ "Kokkos::remove_copy_iterator_api_default", ex,
+ ::Kokkos::Experimental::cbegin(view_from),
+ ::Kokkos::Experimental::cend(view_from),
+ ::Kokkos::Experimental::begin(view_dest), value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove_copy(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ return Impl::remove_copy_exespace_impl(
+ label, ex, ::Kokkos::Experimental::cbegin(view_from),
+ ::Kokkos::Experimental::cend(view_from),
+ ::Kokkos::Experimental::begin(view_dest), value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator remove_copy(const TeamHandleType& teamHandle,
+ InputIterator first_from,
+ InputIterator last_from,
+ OutputIterator first_dest,
+ const ValueType& value) {
+ return Impl::remove_copy_team_impl(teamHandle, first_from, last_from,
+ first_dest, value);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto remove_copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ return Impl::remove_copy_team_impl(
+ teamHandle, ::Kokkos::Experimental::cbegin(view_from),
+ ::Kokkos::Experimental::cend(view_from),
+ ::Kokkos::Experimental::begin(view_dest), value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_COPY_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_COPY_IF_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator remove_copy_if(const ExecutionSpace& ex,
+ InputIterator first_from, InputIterator last_from,
+ OutputIterator first_dest,
+ const UnaryPredicate& pred) {
+ return Impl::remove_copy_if_exespace_impl(
+ "Kokkos::remove_copy_if_iterator_api_default", ex, first_from, last_from,
+ first_dest, pred);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator remove_copy_if(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIterator first_from, InputIterator last_from,
+ OutputIterator first_dest,
+ const UnaryPredicate& pred) {
+ return Impl::remove_copy_if_exespace_impl(label, ex, first_from, last_from,
+ first_dest, pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove_copy_if(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const UnaryPredicate& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ return Impl::remove_copy_if_exespace_impl(
+ "Kokkos::remove_copy_if_iterator_api_default", ex,
+ ::Kokkos::Experimental::cbegin(view_from),
+ ::Kokkos::Experimental::cend(view_from),
+ ::Kokkos::Experimental::begin(view_dest), pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove_copy_if(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const UnaryPredicate& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ return Impl::remove_copy_if_exespace_impl(
+ label, ex, ::Kokkos::Experimental::cbegin(view_from),
+ ::Kokkos::Experimental::cend(view_from),
+ ::Kokkos::Experimental::begin(view_dest), pred);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator remove_copy_if(const TeamHandleType& teamHandle,
+ InputIterator first_from,
+ InputIterator last_from,
+ OutputIterator first_dest,
+ const UnaryPredicate& pred) {
+ return Impl::remove_copy_if_team_impl(teamHandle, first_from, last_from,
+ first_dest, pred);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto remove_copy_if(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const UnaryPredicate& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+ return Impl::remove_copy_if_team_impl(
+ teamHandle, ::Kokkos::Experimental::cbegin(view_from),
+ ::Kokkos::Experimental::cend(view_from),
+ ::Kokkos::Experimental::begin(view_dest), pred);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_IF_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename Iterator, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+Iterator remove_if(const ExecutionSpace& ex, Iterator first, Iterator last,
+ UnaryPredicate pred) {
+ return Impl::remove_if_exespace_impl("Kokkos::remove_if_iterator_api_default",
+ ex, first, last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename Iterator, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+Iterator remove_if(const std::string& label, const ExecutionSpace& ex,
+ Iterator first, Iterator last, UnaryPredicate pred) {
+ return Impl::remove_if_exespace_impl(label, ex, first, last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove_if(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ UnaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::remove_if_exespace_impl("Kokkos::remove_if_iterator_api_default",
+ ex, ::Kokkos::Experimental::begin(view),
+ ::Kokkos::Experimental::end(view), pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto remove_if(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ UnaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::remove_if_exespace_impl(label, ex,
+ ::Kokkos::Experimental::begin(view),
+ ::Kokkos::Experimental::end(view), pred);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename Iterator, typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION Iterator remove_if(const TeamHandleType& teamHandle,
+ Iterator first, Iterator last,
+ UnaryPredicate pred) {
+ return Impl::remove_if_team_impl(teamHandle, first, last, pred);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename UnaryPredicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto remove_if(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view, UnaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::remove_if_team_impl(teamHandle,
+ ::Kokkos::Experimental::begin(view),
+ ::Kokkos::Experimental::end(view), pred);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_HPP
+
+#include "impl/Kokkos_Replace.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename Iterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace(const ExecutionSpace& ex, Iterator first, Iterator last,
+ const ValueType& old_value, const ValueType& new_value) {
+ Impl::replace_exespace_impl("Kokkos::replace_iterator_api", ex, first, last,
+ old_value, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename Iterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace(const std::string& label, const ExecutionSpace& ex, Iterator first,
+ Iterator last, const ValueType& old_value,
+ const ValueType& new_value) {
+ Impl::replace_exespace_impl(label, ex, first, last, old_value, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ValueType& old_value, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ Impl::replace_exespace_impl("Kokkos::replace_view_api", ex, KE::begin(view),
+ KE::end(view), old_value, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ValueType& old_value, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ Impl::replace_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ old_value, new_value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename Iterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void replace(const TeamHandleType& teamHandle, Iterator first,
+ Iterator last, const ValueType& old_value,
+ const ValueType& new_value) {
+ Impl::replace_team_impl(teamHandle, first, last, old_value, new_value);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void replace(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ValueType& old_value, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ Impl::replace_team_impl(teamHandle, KE::begin(view), KE::end(view), old_value,
+ new_value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_HPP
+
+#include "impl/Kokkos_ReplaceCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator replace_copy(const ExecutionSpace& ex, InputIterator first_from,
+ InputIterator last_from, OutputIterator first_dest,
+ const ValueType& old_value,
+ const ValueType& new_value) {
+ return Impl::replace_copy_exespace_impl("Kokkos::replace_copy_iterator_api",
+ ex, first_from, last_from, first_dest,
+ old_value, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator replace_copy(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first_from, InputIterator last_from,
+ OutputIterator first_dest,
+ const ValueType& old_value,
+ const ValueType& new_value) {
+ return Impl::replace_copy_exespace_impl(label, ex, first_from, last_from,
+ first_dest, old_value, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto replace_copy(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const ValueType& old_value, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::replace_copy_exespace_impl(
+ "Kokkos::replace_copy_view_api", ex, KE::cbegin(view_from),
+ KE::cend(view_from), KE::begin(view_dest), old_value, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto replace_copy(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const ValueType& old_value, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::replace_copy_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), old_value, new_value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator replace_copy(const TeamHandleType& teamHandle,
+ InputIterator first_from,
+ InputIterator last_from,
+ OutputIterator first_dest,
+ const ValueType& old_value,
+ const ValueType& new_value) {
+ return Impl::replace_copy_team_impl(teamHandle, first_from, last_from,
+ first_dest, old_value, new_value);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto replace_copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ const ValueType& old_value, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::replace_copy_team_impl(teamHandle, KE::cbegin(view_from),
+ KE::cend(view_from), KE::begin(view_dest),
+ old_value, new_value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_HPP
+
+#include "impl/Kokkos_ReplaceCopyIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename PredicateType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator replace_copy_if(const ExecutionSpace& ex,
+ InputIterator first_from,
+ InputIterator last_from,
+ OutputIterator first_dest, PredicateType pred,
+ const ValueType& new_value) {
+ return Impl::replace_copy_if_exespace_impl(
+ "Kokkos::replace_copy_if_iterator_api", ex, first_from, last_from,
+ first_dest, pred, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename PredicateType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator replace_copy_if(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIterator first_from,
+ InputIterator last_from,
+ OutputIterator first_dest, PredicateType pred,
+ const ValueType& new_value) {
+ return Impl::replace_copy_if_exespace_impl(label, ex, first_from, last_from,
+ first_dest, pred, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename PredicateType,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto replace_copy_if(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ PredicateType pred, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::replace_copy_if_exespace_impl(
+ "Kokkos::replace_copy_if_view_api", ex, KE::cbegin(view_from),
+ KE::cend(view_from), KE::begin(view_dest), pred, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename PredicateType,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto replace_copy_if(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ PredicateType pred, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::replace_copy_if_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), pred, new_value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator, typename PredicateType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator replace_copy_if(const TeamHandleType& teamHandle,
+ InputIterator first_from,
+ InputIterator last_from,
+ OutputIterator first_dest,
+ PredicateType pred,
+ const ValueType& new_value) {
+ return Impl::replace_copy_if_team_impl(teamHandle, first_from, last_from,
+ first_dest, pred, new_value);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename PredicateType,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto replace_copy_if(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ PredicateType pred, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::replace_copy_if_team_impl(teamHandle, KE::cbegin(view_from),
+ KE::cend(view_from),
+ KE::begin(view_dest), pred, new_value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_IF_HPP
+
+#include "impl/Kokkos_ReplaceIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename Predicate,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace_if(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, Predicate pred,
+ const ValueType& new_value) {
+ Impl::replace_if_exespace_impl("Kokkos::replace_if_iterator_api", ex, first,
+ last, pred, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename Predicate,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace_if(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last, Predicate pred,
+ const ValueType& new_value) {
+ Impl::replace_if_exespace_impl(label, ex, first, last, pred, new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename Predicate, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace_if(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ Predicate pred, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ Impl::replace_if_exespace_impl("Kokkos::replace_if_view_api", ex,
+ KE::begin(view), KE::end(view), pred,
+ new_value);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename Predicate, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void replace_if(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ Predicate pred, const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ Impl::replace_if_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ pred, new_value);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator, typename Predicate,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void replace_if(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ Predicate pred, const ValueType& new_value) {
+ Impl::replace_if_team_impl(teamHandle, first, last, pred, new_value);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename Predicate, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void replace_if(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view, Predicate pred,
+ const ValueType& new_value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ Impl::replace_if_team_impl(teamHandle, KE::begin(view), KE::end(view), pred,
+ new_value);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_HPP
+
+#include "impl/Kokkos_Reverse.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void reverse(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last) {
+ return Impl::reverse_exespace_impl("Kokkos::reverse_iterator_api_default", ex,
+ first, last);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void reverse(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last) {
+ return Impl::reverse_exespace_impl(label, ex, first, last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void reverse(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::reverse_exespace_impl("Kokkos::reverse_view_api_default", ex,
+ KE::begin(view), KE::end(view));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+void reverse(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::reverse_exespace_impl(label, ex, KE::begin(view), KE::end(view));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void reverse(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last) {
+ return Impl::reverse_team_impl(teamHandle, first, last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION void reverse(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::reverse_team_impl(teamHandle, KE::begin(view), KE::end(view));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_COPY_HPP
+
+#include "impl/Kokkos_ReverseCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator reverse_copy(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ return Impl::reverse_copy_exespace_impl(
+ "Kokkos::reverse_copy_iterator_api_default", ex, first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator reverse_copy(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first) {
+ return Impl::reverse_copy_exespace_impl(label, ex, first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto reverse_copy(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::reverse_copy_exespace_impl(
+ "Kokkos::reverse_copy_view_api_default", ex, cbegin(source), cend(source),
+ begin(dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto reverse_copy(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::reverse_copy_exespace_impl(label, ex, cbegin(source),
+ cend(source), begin(dest));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator reverse_copy(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator last,
+ OutputIterator d_first) {
+ return Impl::reverse_copy_team_impl(teamHandle, first, last, d_first);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto reverse_copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::reverse_copy_team_impl(teamHandle, cbegin(source), cend(source),
+ begin(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_HPP
+#define KOKKOS_STD_ALGORITHMS_ROTATE_HPP
+
+#include "impl/Kokkos_Rotate.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType rotate(const ExecutionSpace& ex, IteratorType first,
+ IteratorType n_first, IteratorType last) {
+ return Impl::rotate_exespace_impl("Kokkos::rotate_iterator_api_default", ex,
+ first, n_first, last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType rotate(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType n_first,
+ IteratorType last) {
+ return Impl::rotate_exespace_impl(label, ex, first, n_first, last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto rotate(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ std::size_t n_location) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::rotate_exespace_impl("Kokkos::rotate_view_api_default", ex,
+ begin(view), begin(view) + n_location,
+ end(view));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto rotate(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ std::size_t n_location) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::rotate_exespace_impl(label, ex, begin(view),
+ begin(view) + n_location, end(view));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType rotate(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType n_first,
+ IteratorType last) {
+ return Impl::rotate_team_impl(teamHandle, first, n_first, last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto rotate(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ std::size_t n_location) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::rotate_team_impl(teamHandle, begin(view),
+ begin(view) + n_location, end(view));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_ROTATE_COPY_HPP
+
+#include "impl/Kokkos_RotateCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator rotate_copy(const ExecutionSpace& ex, InputIterator first,
+ InputIterator n_first, InputIterator last,
+ OutputIterator d_first) {
+ return Impl::rotate_copy_exespace_impl(
+ "Kokkos::rotate_copy_iterator_api_default", ex, first, n_first, last,
+ d_first);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator rotate_copy(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator n_first,
+ InputIterator last, OutputIterator d_first) {
+ return Impl::rotate_copy_exespace_impl(label, ex, first, n_first, last,
+ d_first);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto rotate_copy(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ std::size_t n_location,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::rotate_copy_exespace_impl(
+ "Kokkos::rotate_copy_view_api_default", ex, cbegin(source),
+ cbegin(source) + n_location, cend(source), begin(dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto rotate_copy(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ std::size_t n_location,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::rotate_copy_exespace_impl(label, ex, cbegin(source),
+ cbegin(source) + n_location,
+ cend(source), begin(dest));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator rotate_copy(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator n_first,
+ InputIterator last,
+ OutputIterator d_first) {
+ return Impl::rotate_copy_team_impl(teamHandle, first, n_first, last, d_first);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto rotate_copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ std::size_t n_location,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::rotate_copy_team_impl(teamHandle, cbegin(source),
+ cbegin(source) + n_location, cend(source),
+ begin(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_HPP
+#define KOKKOS_STD_ALGORITHMS_SEARCH_HPP
+
+#include "impl/Kokkos_Search.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set 1: no binary predicate passed
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 search(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last) {
+ return Impl::search_exespace_impl("Kokkos::search_iterator_api_default", ex,
+ first, last, s_first, s_last);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 search(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last) {
+ return Impl::search_exespace_impl(label, ex, first, last, s_first, s_last);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto search(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_exespace_impl("Kokkos::search_view_api_default", ex,
+ KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto search(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 search(const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last, const BinaryPredicateType& pred) {
+ return Impl::search_exespace_impl("Kokkos::search_iterator_api_default", ex,
+ first, last, s_first, s_last, pred);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType1 search(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
+ return Impl::search_exespace_impl(label, ex, first, last, s_first, s_last,
+ pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto search(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_exespace_impl("Kokkos::search_view_api_default", ex,
+ KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view), pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto search(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view), pred);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set 1: no binary predicate passed
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType1 search(const TeamHandleType& teamHandle,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last) {
+ return Impl::search_team_impl(teamHandle, first, last, s_first, s_last);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto search(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_team_impl(teamHandle, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2, typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+
+KOKKOS_FUNCTION IteratorType1 search(const TeamHandleType& teamHandle,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
+ return Impl::search_team_impl(teamHandle, first, last, s_first, s_last, pred);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ typename BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto search(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view,
+ const ::Kokkos::View<DataType2, Properties2...>& s_view,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_team_impl(teamHandle, KE::begin(view), KE::end(view),
+ KE::begin(s_view), KE::end(s_view), pred);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_N_HPP
+#define KOKKOS_STD_ALGORITHMS_SEARCH_N_HPP
+
+#include "impl/Kokkos_SearchN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set 1: no binary predicate passed
+template <
+ class ExecutionSpace, class IteratorType, class SizeType, class ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType search_n(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, SizeType count,
+ const ValueType& value) {
+ return Impl::search_n_exespace_impl("Kokkos::search_n_iterator_api_default",
+ ex, first, last, count, value);
+}
+
+template <
+ class ExecutionSpace, class IteratorType, class SizeType, class ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType search_n(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, SizeType count,
+ const ValueType& value) {
+ return Impl::search_n_exespace_impl(label, ex, first, last, count, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+ class SizeType, class ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto search_n(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ SizeType count, const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_n_exespace_impl("Kokkos::search_n_view_api_default", ex,
+ KE::begin(view), KE::end(view), count,
+ value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+ class SizeType, class ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto search_n(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ SizeType count, const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_n_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ count, value);
+}
+
+// overload set 2: binary predicate passed
+template <
+ class ExecutionSpace, class IteratorType, class SizeType, class ValueType,
+ class BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType search_n(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, SizeType count, const ValueType& value,
+ const BinaryPredicateType& pred) {
+ return Impl::search_n_exespace_impl("Kokkos::search_n_iterator_api_default",
+ ex, first, last, count, value, pred);
+}
+
+template <
+ class ExecutionSpace, class IteratorType, class SizeType, class ValueType,
+ class BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType search_n(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, SizeType count,
+ const ValueType& value, const BinaryPredicateType& pred) {
+ return Impl::search_n_exespace_impl(label, ex, first, last, count, value,
+ pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+ class SizeType, class ValueType, class BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto search_n(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ SizeType count, const ValueType& value,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_n_exespace_impl("Kokkos::search_n_view_api_default", ex,
+ KE::begin(view), KE::end(view), count,
+ value, pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+ class SizeType, class ValueType, class BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+auto search_n(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ SizeType count, const ValueType& value,
+ const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_n_exespace_impl(label, ex, KE::begin(view), KE::end(view),
+ count, value, pred);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set 1: no binary predicate passed
+template <class TeamHandleType, class IteratorType, class SizeType,
+ class ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType search_n(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ SizeType count, const ValueType& value) {
+ return Impl::search_n_team_impl(teamHandle, first, last, count, value);
+}
+
+template <
+ class TeamHandleType, class DataType, class... Properties, class SizeType,
+ class ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto search_n(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view, SizeType count,
+ const ValueType& value) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_n_team_impl(teamHandle, KE::begin(view), KE::end(view),
+ count, value);
+}
+
+// overload set 2: binary predicate passed
+template <class TeamHandleType, class IteratorType, class SizeType,
+ class ValueType, class BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType search_n(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ SizeType count, const ValueType& value,
+ const BinaryPredicateType& pred) {
+ return Impl::search_n_team_impl(teamHandle, first, last, count, value, pred);
+}
+
+template <
+ class TeamHandleType, class DataType, class... Properties, class SizeType,
+ class ValueType, class BinaryPredicateType,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto search_n(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view, SizeType count,
+ const ValueType& value, const BinaryPredicateType& pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::search_n_team_impl(teamHandle, KE::begin(view), KE::end(view),
+ count, value, pred);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_HPP
+#define KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_HPP
+
+#include "impl/Kokkos_ShiftLeft.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType shift_left(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last,
+ typename IteratorType::difference_type n) {
+ return Impl::shift_left_exespace_impl(
+ "Kokkos::shift_left_iterator_api_default", ex, first, last, n);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType shift_left(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ typename IteratorType::difference_type n) {
+ return Impl::shift_left_exespace_impl(label, ex, first, last, n);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto shift_left(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ typename decltype(begin(view))::difference_type n) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::shift_left_exespace_impl("Kokkos::shift_left_view_api_default",
+ ex, begin(view), end(view), n);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto shift_left(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ typename decltype(begin(view))::difference_type n) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::shift_left_exespace_impl(label, ex, begin(view), end(view), n);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType
+shift_left(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, typename IteratorType::difference_type n) {
+ return Impl::shift_left_team_impl(teamHandle, first, last, n);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto shift_left(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ typename decltype(begin(view))::difference_type n) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::shift_left_team_impl(teamHandle, begin(view), end(view), n);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_HPP
+#define KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_HPP
+
+#include "impl/Kokkos_ShiftRight.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType shift_right(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last,
+ typename IteratorType::difference_type n) {
+ return Impl::shift_right_exespace_impl(
+ "Kokkos::shift_right_iterator_api_default", ex, first, last, n);
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType shift_right(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ typename IteratorType::difference_type n) {
+ return Impl::shift_right_exespace_impl(label, ex, first, last, n);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto shift_right(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ typename decltype(begin(view))::difference_type n) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::shift_right_exespace_impl("Kokkos::shift_right_view_api_default",
+ ex, begin(view), end(view), n);
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto shift_right(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ typename decltype(begin(view))::difference_type n) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::shift_right_exespace_impl(label, ex, begin(view), end(view), n);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType
+shift_right(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, typename IteratorType::difference_type n) {
+ return Impl::shift_right_team_impl(teamHandle, first, last, n);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto shift_right(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ typename decltype(begin(view))::difference_type n) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::shift_right_team_impl(teamHandle, begin(view), end(view), n);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_SWAP_RANGES_HPP
+#define KOKKOS_STD_ALGORITHMS_SWAP_RANGES_HPP
+
+#include "impl/Kokkos_SwapRanges.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType2 swap_ranges(const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2) {
+ return Impl::swap_ranges_exespace_impl(
+ "Kokkos::swap_ranges_iterator_api_default", ex, first1, last1, first2);
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto swap_ranges(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ assert(source.extent(0) == dest.extent(0));
+ return Impl::swap_ranges_exespace_impl("Kokkos::swap_ranges_view_api_default",
+ ex, begin(source), end(source),
+ begin(dest));
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+IteratorType2 swap_ranges(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2) {
+ return Impl::swap_ranges_exespace_impl(label, ex, first1, last1, first2);
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto swap_ranges(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ assert(source.extent(0) == dest.extent(0));
+ return Impl::swap_ranges_exespace_impl(label, ex, begin(source), end(source),
+ begin(dest));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType1,
+ typename IteratorType2,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION IteratorType2 swap_ranges(const TeamHandleType& teamHandle,
+ IteratorType1 first1,
+ IteratorType1 last1,
+ IteratorType2 first2) {
+ return Impl::swap_ranges_team_impl(teamHandle, first1, last1, first2);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto swap_ranges(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ assert(source.extent(0) == dest.extent(0));
+ return Impl::swap_ranges_team_impl(teamHandle, begin(source), end(source),
+ begin(dest));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_HPP
+
+#include "impl/Kokkos_Transform.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename UnaryOperation,
+ std::enable_if_t<Impl::are_iterators_v<InputIterator, OutputIterator> &&
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIterator transform(const ExecutionSpace& ex, InputIterator first1,
+ InputIterator last1, OutputIterator d_first,
+ UnaryOperation unary_op) {
+ return Impl::transform_exespace_impl("Kokkos::transform_iterator_api_default",
+ ex, first1, last1, d_first,
+ std::move(unary_op));
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename UnaryOperation,
+ std::enable_if_t<Impl::are_iterators_v<InputIterator, OutputIterator> &&
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIterator transform(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first1, InputIterator last1,
+ OutputIterator d_first, UnaryOperation unary_op) {
+ return Impl::transform_exespace_impl(label, ex, first1, last1, d_first,
+ std::move(unary_op));
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename UnaryOperation,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ UnaryOperation unary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::transform_exespace_impl("Kokkos::transform_view_api_default", ex,
+ begin(source), end(source), begin(dest),
+ std::move(unary_op));
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename UnaryOperation,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ UnaryOperation unary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::transform_exespace_impl(label, ex, begin(source), end(source),
+ begin(dest), std::move(unary_op));
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator1, typename InputIterator2,
+ typename OutputIterator, typename BinaryOperation,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIterator1, InputIterator2, OutputIterator> &&
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIterator transform(const ExecutionSpace& ex, InputIterator1 first1,
+ InputIterator1 last1, InputIterator2 first2,
+ OutputIterator d_first, BinaryOperation binary_op) {
+ return Impl::transform_exespace_impl("Kokkos::transform_iterator_api_default",
+ ex, first1, last1, first2, d_first,
+ std::move(binary_op));
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator1, typename InputIterator2,
+ typename OutputIterator, typename BinaryOperation,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIterator1, InputIterator2, OutputIterator> &&
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIterator transform(const std::string& label, const ExecutionSpace& ex,
+ InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, OutputIterator d_first,
+ BinaryOperation binary_op) {
+ return Impl::transform_exespace_impl(label, ex, first1, last1, first2,
+ d_first, std::move(binary_op));
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename DataType3,
+ typename... Properties3, typename BinaryOperation,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source1,
+ const ::Kokkos::View<DataType2, Properties2...>& source2,
+ const ::Kokkos::View<DataType3, Properties3...>& dest,
+ BinaryOperation binary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source2);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::transform_exespace_impl(
+ "Kokkos::transform_view_api_default", ex, begin(source1), end(source1),
+ begin(source2), begin(dest), std::move(binary_op));
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename DataType3,
+ typename... Properties3, typename BinaryOperation,
+ std::enable_if_t<is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source1,
+ const ::Kokkos::View<DataType2, Properties2...>& source2,
+ const ::Kokkos::View<DataType3, Properties3...>& dest,
+ BinaryOperation binary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source2);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::transform_exespace_impl(label, ex, begin(source1), end(source1),
+ begin(source2), begin(dest),
+ std::move(binary_op));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <
+ typename TeamHandleType, typename InputIterator, typename OutputIterator,
+ typename UnaryOperation,
+ std::enable_if_t<Impl::are_iterators_v<InputIterator, OutputIterator> &&
+ is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIterator transform(const TeamHandleType& teamHandle,
+ InputIterator first1,
+ InputIterator last1,
+ OutputIterator d_first,
+ UnaryOperation unary_op) {
+ return Impl::transform_team_impl(teamHandle, first1, last1, d_first,
+ std::move(unary_op));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename UnaryOperation,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto transform(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ UnaryOperation unary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::transform_team_impl(teamHandle, begin(source), end(source),
+ begin(dest), std::move(unary_op));
+}
+
+template <
+ typename TeamHandleType, typename InputIterator1, typename InputIterator2,
+ typename OutputIterator, typename BinaryOperation,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIterator1, InputIterator2, OutputIterator> &&
+ is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIterator transform(const TeamHandleType& teamHandle,
+ InputIterator1 first1,
+ InputIterator1 last1,
+ InputIterator2 first2,
+ OutputIterator d_first,
+ BinaryOperation binary_op) {
+ return Impl::transform_team_impl(teamHandle, first1, last1, first2, d_first,
+ std::move(binary_op));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename DataType3,
+ typename... Properties3, typename BinaryOperation,
+ std::enable_if_t<is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto transform(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source1,
+ const ::Kokkos::View<DataType2, Properties2...>& source2,
+ const ::Kokkos::View<DataType3, Properties3...>& dest,
+ BinaryOperation binary_op) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source1);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source2);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::transform_team_impl(teamHandle, begin(source1), end(source1),
+ begin(source2), begin(dest),
+ std::move(binary_op));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRASFORM_EXCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_TRASFORM_EXCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_TransformExclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ typename BinaryOpType, typename UnaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType transform_exclusive_scan(
+ const ExecutionSpace& ex, InputIteratorType first, InputIteratorType last,
+ OutputIteratorType first_dest, ValueType init_value, BinaryOpType binary_op,
+ UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::transform_exclusive_scan_exespace_impl(
+ "Kokkos::transform_exclusive_scan_custom_functors_iterator_api", ex,
+ first, last, first_dest, std::move(init_value), binary_op, unary_op);
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ typename BinaryOpType, typename UnaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType transform_exclusive_scan(
+ const std::string& label, const ExecutionSpace& ex, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest, ValueType init_value,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::transform_exclusive_scan_exespace_impl(
+ label, ex, first, last, first_dest, std::move(init_value), binary_op,
+ unary_op);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryOpType, typename UnaryOpType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform_exclusive_scan(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value, BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_exclusive_scan_exespace_impl(
+ "Kokkos::transform_exclusive_scan_custom_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+ std::move(init_value), binary_op, unary_op);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryOpType, typename UnaryOpType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform_exclusive_scan(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value, BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_exclusive_scan_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), std::move(init_value), binary_op, unary_op);
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename ValueType,
+ typename BinaryOpType, typename UnaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType transform_exclusive_scan(
+ const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest, ValueType init_value,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ return Impl::transform_exclusive_scan_team_impl(
+ teamHandle, first, last, first_dest, std::move(init_value), binary_op,
+ unary_op);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryOpType, typename UnaryOpType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto transform_exclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ ValueType init_value, BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_exclusive_scan_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), std::move(init_value), binary_op, unary_op);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_TransformInclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// overload set 1 (no init value)
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOpType,
+ typename UnaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType transform_inclusive_scan(const ExecutionSpace& ex,
+ InputIteratorType first,
+ InputIteratorType last,
+ OutputIteratorType first_dest,
+ BinaryOpType binary_op,
+ UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::transform_inclusive_scan_exespace_impl(
+ "Kokkos::transform_inclusive_scan_custom_functors_iterator_api", ex,
+ first, last, first_dest, binary_op, unary_op);
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOpType,
+ typename UnaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType transform_inclusive_scan(
+ const std::string& label, const ExecutionSpace& ex, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+
+ return Impl::transform_inclusive_scan_exespace_impl(
+ label, ex, first, last, first_dest, binary_op, unary_op);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOpType,
+ typename UnaryOpType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform_inclusive_scan(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_inclusive_scan_exespace_impl(
+ "Kokkos::transform_inclusive_scan_custom_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+ binary_op, unary_op);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOpType,
+ typename UnaryOpType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform_inclusive_scan(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_inclusive_scan_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op, unary_op);
+}
+
+// overload set 2 (init value)
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOpType,
+ typename UnaryOpType, typename ValueType,
+
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType transform_inclusive_scan(
+ const ExecutionSpace& ex, InputIteratorType first, InputIteratorType last,
+ OutputIteratorType first_dest, BinaryOpType binary_op, UnaryOpType unary_op,
+ ValueType init_value) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_inclusive_scan_exespace_impl(
+ "Kokkos::transform_inclusive_scan_custom_functors_iterator_api", ex,
+ first, last, first_dest, binary_op, unary_op, std::move(init_value));
+}
+
+template <typename ExecutionSpace, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOpType,
+ typename UnaryOpType, typename ValueType,
+
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType>&& ::
+ Kokkos::is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIteratorType transform_inclusive_scan(
+ const std::string& label, const ExecutionSpace& ex, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_inclusive_scan_exespace_impl(
+ label, ex, first, last, first_dest, binary_op, unary_op,
+ std::move(init_value));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOpType,
+ typename UnaryOpType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform_inclusive_scan(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_inclusive_scan_exespace_impl(
+ "Kokkos::transform_inclusive_scan_custom_functors_view_api", ex,
+ KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+ binary_op, unary_op, std::move(init_value));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOpType,
+ typename UnaryOpType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto transform_inclusive_scan(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+ Impl::static_assert_is_not_openmptarget(ex);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_inclusive_scan_exespace_impl(
+ label, ex, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op, unary_op, std::move(init_value));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// overload set 1 (no init value)
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOpType,
+ typename UnaryOpType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType> &&
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType transform_inclusive_scan(
+ const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+
+ return Impl::transform_inclusive_scan_team_impl(
+ teamHandle, first, last, first_dest, binary_op, unary_op);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOpType,
+ typename UnaryOpType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto transform_inclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_inclusive_scan_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op, unary_op);
+}
+
+// overload set 2 (init value)
+template <typename TeamHandleType, typename InputIteratorType,
+ typename OutputIteratorType, typename BinaryOpType,
+ typename UnaryOpType, typename ValueType,
+ std::enable_if_t<
+ Impl::are_iterators_v<InputIteratorType, OutputIteratorType> &&
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIteratorType transform_inclusive_scan(
+ const TeamHandleType& teamHandle, InputIteratorType first,
+ InputIteratorType last, OutputIteratorType first_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_inclusive_scan_team_impl(
+ teamHandle, first, last, first_dest, binary_op, unary_op,
+ std::move(init_value));
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryOpType,
+ typename UnaryOpType, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto transform_inclusive_scan(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& view_from,
+ const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ namespace KE = ::Kokkos::Experimental;
+ return Impl::transform_inclusive_scan_team_impl(
+ teamHandle, KE::cbegin(view_from), KE::cend(view_from),
+ KE::begin(view_dest), binary_op, unary_op, std::move(init_value));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_HPP
+
+#include "impl/Kokkos_TransformReduce.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set accepting execution space
+//
+
+// ----------------------------
+// overload set1:
+// no custom functors passed, so equivalent to
+// transform_reduce(first1, last1, first2, init, plus<>(), multiplies<>());
+// ----------------------------
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2,
+ ValueType init_reduction_value) {
+ return Impl::transform_reduce_default_functors_exespace_impl(
+ "Kokkos::transform_reduce_default_functors_iterator_api", ex, first1,
+ last1, first2, std::move(init_reduction_value));
+}
+
+template <typename ExecutionSpace, typename IteratorType1,
+ typename IteratorType2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2,
+ ValueType init_reduction_value) {
+ return Impl::transform_reduce_default_functors_exespace_impl(
+ label, ex, first1, last1, first2, std::move(init_reduction_value));
+}
+
+// overload1 accepting views
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& first_view,
+ const ::Kokkos::View<DataType2, Properties2...>& second_view,
+ ValueType init_reduction_value) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+ return Impl::transform_reduce_default_functors_exespace_impl(
+ "Kokkos::transform_reduce_default_functors_iterator_api", ex,
+ KE::cbegin(first_view), KE::cend(first_view), KE::cbegin(second_view),
+ std::move(init_reduction_value));
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& first_view,
+ const ::Kokkos::View<DataType2, Properties2...>& second_view,
+ ValueType init_reduction_value) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+ return Impl::transform_reduce_default_functors_exespace_impl(
+ label, ex, KE::cbegin(first_view), KE::cend(first_view),
+ KE::cbegin(second_view), std::move(init_reduction_value));
+}
+
+//
+// overload set2:
+// accepts a custom transform and joiner functor
+//
+
+// Note the std refers to the arg BinaryReductionOp
+// but in the Kokkos naming convention, it corresponds
+// to a "joiner" that knows how to join two values
+// NOTE: "joiner/transformer" need to be commutative.
+
+// https://en.cppreference.com/w/cpp/algorithm/transform_reduce
+
+// api accepting iterators
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename ValueType, typename BinaryJoinerType, typename BinaryTransform,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value, int> =
+ 0>
+ValueType transform_reduce(const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2,
+ ValueType init_reduction_value,
+ BinaryJoinerType joiner,
+ BinaryTransform transformer) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ "Kokkos::transform_reduce_custom_functors_iterator_api", ex, first1,
+ last1, first2, std::move(init_reduction_value), std::move(joiner),
+ std::move(transformer));
+}
+
+template <
+ typename ExecutionSpace, typename IteratorType1, typename IteratorType2,
+ typename ValueType, typename BinaryJoinerType, typename BinaryTransform,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value, int> =
+ 0>
+ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, ValueType init_reduction_value,
+ BinaryJoinerType joiner,
+ BinaryTransform transformer) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ label, ex, first1, last1, first2, std::move(init_reduction_value),
+ std::move(joiner), std::move(transformer));
+}
+
+// accepting views
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryJoinerType, typename BinaryTransform,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(
+ const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& first_view,
+ const ::Kokkos::View<DataType2, Properties2...>& second_view,
+ ValueType init_reduction_value, BinaryJoinerType joiner,
+ BinaryTransform transformer) {
+ namespace KE = ::Kokkos::Experimental;
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ "Kokkos::transform_reduce_custom_functors_view_api", ex,
+ KE::cbegin(first_view), KE::cend(first_view), KE::cbegin(second_view),
+ std::move(init_reduction_value), std::move(joiner),
+ std::move(transformer));
+}
+
+template <typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryJoinerType, typename BinaryTransform,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(
+ const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& first_view,
+ const ::Kokkos::View<DataType2, Properties2...>& second_view,
+ ValueType init_reduction_value, BinaryJoinerType joiner,
+ BinaryTransform transformer) {
+ namespace KE = ::Kokkos::Experimental;
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ label, ex, KE::cbegin(first_view), KE::cend(first_view),
+ KE::cbegin(second_view), std::move(init_reduction_value),
+ std::move(joiner), std::move(transformer));
+}
+
+//
+// overload set3:
+//
+// accepting iterators
+template <typename ExecutionSpace, typename IteratorType, typename ValueType,
+ typename BinaryJoinerType, typename UnaryTransform,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+ IteratorType>::value &&
+ is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(const ExecutionSpace& ex, IteratorType first1,
+ IteratorType last1, ValueType init_reduction_value,
+ BinaryJoinerType joiner,
+ UnaryTransform transformer) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ "Kokkos::transform_reduce_custom_functors_iterator_api", ex, first1,
+ last1, std::move(init_reduction_value), std::move(joiner),
+ std::move(transformer));
+}
+
+template <typename ExecutionSpace, typename IteratorType, typename ValueType,
+ typename BinaryJoinerType, typename UnaryTransform,
+ std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+ IteratorType>::value &&
+ is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first1, IteratorType last1,
+ ValueType init_reduction_value,
+ BinaryJoinerType joiner,
+ UnaryTransform transformer) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ label, ex, first1, last1, std::move(init_reduction_value),
+ std::move(joiner), std::move(transformer));
+}
+
+// accepting views
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType, typename BinaryJoinerType, typename UnaryTransform,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value, int> =
+ 0>
+ValueType transform_reduce(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value,
+ BinaryJoinerType joiner,
+ UnaryTransform transformer) {
+ namespace KE = ::Kokkos::Experimental;
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ "Kokkos::transform_reduce_custom_functors_view_api", ex, KE::cbegin(view),
+ KE::cend(view), std::move(init_reduction_value), std::move(joiner),
+ std::move(transformer));
+}
+
+template <
+ typename ExecutionSpace, typename DataType, typename... Properties,
+ typename ValueType, typename BinaryJoinerType, typename UnaryTransform,
+ std::enable_if_t<::Kokkos::is_execution_space<ExecutionSpace>::value, int> =
+ 0>
+ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value,
+ BinaryJoinerType joiner,
+ UnaryTransform transformer) {
+ namespace KE = ::Kokkos::Experimental;
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::transform_reduce_custom_functors_exespace_impl(
+ label, ex, KE::cbegin(view), KE::cend(view),
+ std::move(init_reduction_value), std::move(joiner),
+ std::move(transformer));
+}
+
+//
+// overload set accepting a team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+
+// ----------------------------
+// overload set1:
+// no custom functors passed, so equivalent to
+// transform_reduce(first1, last1, first2, init, plus<>(), multiplies<>());
+// ----------------------------
+template <
+ typename TeamHandleType, typename IteratorType1, typename IteratorType2,
+ typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType transform_reduce(const TeamHandleType& teamHandle,
+ IteratorType1 first1,
+ IteratorType1 last1,
+ IteratorType2 first2,
+ ValueType init_reduction_value) {
+ return Impl::transform_reduce_default_functors_team_impl(
+ teamHandle, first1, last1, first2, std::move(init_reduction_value));
+}
+
+// overload1 accepting views
+template <
+ typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType
+transform_reduce(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& first_view,
+ const ::Kokkos::View<DataType2, Properties2...>& second_view,
+ ValueType init_reduction_value) {
+ namespace KE = ::Kokkos::Experimental;
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+ return Impl::transform_reduce_default_functors_team_impl(
+ teamHandle, KE::cbegin(first_view), KE::cend(first_view),
+ KE::cbegin(second_view), std::move(init_reduction_value));
+}
+
+//
+// overload set2:
+// accepts a custom transform and joiner functor
+//
+
+// Note the std refers to the arg BinaryReductionOp
+// but in the Kokkos naming convention, it corresponds
+// to a "joiner" that knows how to join two values
+// NOTE: "joiner/transformer" need to be commutative.
+
+// https://en.cppreference.com/w/cpp/algorithm/transform_reduce
+
+// api accepting iterators
+template <
+ typename TeamHandleType, typename IteratorType1, typename IteratorType2,
+ typename ValueType, typename BinaryJoinerType, typename BinaryTransform,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType transform_reduce(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, ValueType init_reduction_value,
+ BinaryJoinerType joiner, BinaryTransform transformer) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_reduce_custom_functors_team_impl(
+ teamHandle, first1, last1, first2, std::move(init_reduction_value),
+ std::move(joiner), std::move(transformer));
+}
+
+// accepting views
+template <
+ typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename ValueType,
+ typename BinaryJoinerType, typename BinaryTransform,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType
+transform_reduce(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& first_view,
+ const ::Kokkos::View<DataType2, Properties2...>& second_view,
+ ValueType init_reduction_value, BinaryJoinerType joiner,
+ BinaryTransform transformer) {
+ namespace KE = ::Kokkos::Experimental;
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+ return Impl::transform_reduce_custom_functors_team_impl(
+ teamHandle, KE::cbegin(first_view), KE::cend(first_view),
+ KE::cbegin(second_view), std::move(init_reduction_value),
+ std::move(joiner), std::move(transformer));
+}
+
+//
+// overload set3:
+//
+// accepting iterators
+template <typename TeamHandleType, typename IteratorType, typename ValueType,
+ typename BinaryJoinerType, typename UnaryTransform,
+ std::enable_if_t<Impl::are_iterators<IteratorType>::value &&
+ is_team_handle<TeamHandleType>::value,
+ int> = 0>
+KOKKOS_FUNCTION ValueType transform_reduce(const TeamHandleType& teamHandle,
+ IteratorType first1,
+ IteratorType last1,
+ ValueType init_reduction_value,
+ BinaryJoinerType joiner,
+ UnaryTransform transformer) {
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ return Impl::transform_reduce_custom_functors_team_impl(
+ teamHandle, first1, last1, std::move(init_reduction_value),
+ std::move(joiner), std::move(transformer));
+}
+
+// accepting views
+template <
+ typename TeamHandleType, typename DataType, typename... Properties,
+ typename ValueType, typename BinaryJoinerType, typename UnaryTransform,
+ std::enable_if_t<::Kokkos::is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION ValueType
+transform_reduce(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ ValueType init_reduction_value, BinaryJoinerType joiner,
+ UnaryTransform transformer) {
+ namespace KE = ::Kokkos::Experimental;
+ static_assert(std::is_move_constructible_v<ValueType>,
+ "ValueType must be move constructible.");
+
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+ return Impl::transform_reduce_custom_functors_team_impl(
+ teamHandle, KE::cbegin(view), KE::cend(view),
+ std::move(init_reduction_value), std::move(joiner),
+ std::move(transformer));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_HPP
+#define KOKKOS_STD_ALGORITHMS_UNIQUE_HPP
+
+#include "impl/Kokkos_Unique.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set1: default predicate, accepting execution space
+//
+template <typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<Impl::is_iterator_v<IteratorType> &&
+ is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+IteratorType unique(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
+ return Impl::unique_exespace_impl("Kokkos::unique_iterator_api_default", ex,
+ first, last);
+}
+
+template <typename ExecutionSpace, typename IteratorType,
+ std::enable_if_t<Impl::is_iterator_v<IteratorType> &&
+ is_execution_space<ExecutionSpace>::value,
+ int> = 0>
+IteratorType unique(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ return Impl::unique_exespace_impl(label, ex, first, last);
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<is_execution_space<ExecutionSpace>::value, int> = 0>
+auto unique(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::unique_exespace_impl("Kokkos::unique_view_api_default", ex,
+ begin(view), end(view));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ std::enable_if_t<is_execution_space<ExecutionSpace>::value, int> = 0>
+auto unique(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::unique_exespace_impl(label, ex, begin(view), end(view));
+}
+
+//
+// overload set2: custom predicate, accepting execution space
+//
+template <typename ExecutionSpace, typename IteratorType,
+ typename BinaryPredicate,
+ std::enable_if_t<is_execution_space<ExecutionSpace>::value, int> = 0>
+IteratorType unique(const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, BinaryPredicate pred) {
+ return Impl::unique_exespace_impl("Kokkos::unique_iterator_api_default", ex,
+ first, last, pred);
+}
+
+template <typename ExecutionSpace, typename IteratorType,
+ typename BinaryPredicate,
+ std::enable_if_t<is_execution_space<ExecutionSpace>::value, int> = 0>
+IteratorType unique(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ BinaryPredicate pred) {
+ return Impl::unique_exespace_impl(label, ex, first, last, pred);
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename BinaryPredicate,
+ std::enable_if_t<is_execution_space<ExecutionSpace>::value, int> = 0>
+auto unique(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ BinaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::unique_exespace_impl("Kokkos::unique_view_api_default", ex,
+ begin(view), end(view), std::move(pred));
+}
+
+template <typename ExecutionSpace, typename DataType, typename... Properties,
+ typename BinaryPredicate,
+ std::enable_if_t<is_execution_space<ExecutionSpace>::value, int> = 0>
+auto unique(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ BinaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+ return Impl::unique_exespace_impl(label, ex, begin(view), end(view),
+ std::move(pred));
+}
+
+//
+// overload set3: default predicate, accepting team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ std::enable_if_t<Impl::is_iterator_v<IteratorType> &&
+ is_team_handle<TeamHandleType>::value,
+ int> = 0>
+KOKKOS_FUNCTION IteratorType unique(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last) {
+ return Impl::unique_team_impl(teamHandle, first, last);
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ std::enable_if_t<is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto unique(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view) {
+ return Impl::unique_team_impl(teamHandle, begin(view), end(view));
+}
+
+//
+// overload set4: custom predicate, accepting team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename IteratorType,
+ typename BinaryPredicate,
+ std::enable_if_t<is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION IteratorType unique(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ BinaryPredicate pred) {
+ return Impl::unique_team_impl(teamHandle, first, last, std::move(pred));
+}
+
+template <typename TeamHandleType, typename DataType, typename... Properties,
+ typename BinaryPredicate,
+ std::enable_if_t<is_team_handle<TeamHandleType>::value, int> = 0>
+KOKKOS_FUNCTION auto unique(const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType, Properties...>& view,
+ BinaryPredicate pred) {
+ return Impl::unique_team_impl(teamHandle, begin(view), end(view),
+ std::move(pred));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_HPP
+
+#include "impl/Kokkos_UniqueCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set1: default predicate, accepting execution space
+//
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<Impl::are_iterators_v<InputIterator, OutputIterator> &&
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIterator unique_copy(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ return Impl::unique_copy_exespace_impl(
+ "Kokkos::unique_copy_iterator_api_default", ex, first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<Impl::are_iterators_v<InputIterator, OutputIterator> &&
+ is_execution_space_v<ExecutionSpace>,
+ int> = 0>
+OutputIterator unique_copy(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first) {
+ return Impl::unique_copy_exespace_impl(label, ex, first, last, d_first);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto unique_copy(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::unique_copy_exespace_impl("Kokkos::unique_copy_view_api_default",
+ ex, cbegin(source), cend(source),
+ begin(dest));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto unique_copy(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::unique_copy_exespace_impl(label, ex, cbegin(source),
+ cend(source), begin(dest));
+}
+
+//
+// overload set2: custom predicate, accepting execution space
+//
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename BinaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator unique_copy(const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first,
+ BinaryPredicate pred) {
+ return Impl::unique_copy_exespace_impl(
+ "Kokkos::unique_copy_iterator_api_default", ex, first, last, d_first,
+ pred);
+}
+
+template <
+ typename ExecutionSpace, typename InputIterator, typename OutputIterator,
+ typename BinaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+OutputIterator unique_copy(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first, BinaryPredicate pred) {
+ return Impl::unique_copy_exespace_impl(label, ex, first, last, d_first, pred);
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto unique_copy(const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ BinaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::unique_copy_exespace_impl("Kokkos::unique_copy_view_api_default",
+ ex, cbegin(source), cend(source),
+ begin(dest), std::move(pred));
+}
+
+template <
+ typename ExecutionSpace, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicate,
+ std::enable_if_t<::Kokkos::is_execution_space_v<ExecutionSpace>, int> = 0>
+auto unique_copy(const std::string& label, const ExecutionSpace& ex,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ BinaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::unique_copy_exespace_impl(
+ label, ex, cbegin(source), cend(source), begin(dest), std::move(pred));
+}
+
+//
+// overload set3: default predicate, accepting team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <
+ typename TeamHandleType, typename InputIterator, typename OutputIterator,
+ std::enable_if_t<Impl::are_iterators_v<InputIterator, OutputIterator> &&
+ Kokkos::is_team_handle_v<TeamHandleType>,
+ int> = 0>
+KOKKOS_FUNCTION OutputIterator unique_copy(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator last,
+ OutputIterator d_first) {
+ return Impl::unique_copy_team_impl(teamHandle, first, last, d_first);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto unique_copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::unique_copy_team_impl(teamHandle, cbegin(source), cend(source),
+ begin(dest));
+}
+
+//
+// overload set4: custom predicate, accepting team handle
+// Note: for now omit the overloads accepting a label
+// since they cause issues on device because of the string allocation.
+//
+template <typename TeamHandleType, typename InputIterator,
+ typename OutputIterator, typename BinaryPredicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION OutputIterator unique_copy(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator last,
+ OutputIterator d_first,
+ BinaryPredicate pred) {
+ return Impl::unique_copy_team_impl(teamHandle, first, last, d_first, pred);
+}
+
+template <typename TeamHandleType, typename DataType1, typename... Properties1,
+ typename DataType2, typename... Properties2, typename BinaryPredicate,
+ std::enable_if_t<::Kokkos::is_team_handle_v<TeamHandleType>, int> = 0>
+KOKKOS_FUNCTION auto unique_copy(
+ const TeamHandleType& teamHandle,
+ const ::Kokkos::View<DataType1, Properties1...>& source,
+ const ::Kokkos::View<DataType2, Properties2...>& dest,
+ BinaryPredicate pred) {
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+ Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+ return Impl::unique_copy_team_impl(teamHandle, cbegin(source), cend(source),
+ begin(dest), std::move(pred));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ValueType1, class ValueType2, class RetType = ValueType2>
+struct StdAdjacentDifferenceDefaultBinaryOpFunctor {
+ KOKKOS_FUNCTION
+ constexpr RetType operator()(const ValueType1& a, const ValueType2& b) const {
+ return a - b;
+ }
+};
+
+template <class InputIteratorType, class OutputIteratorType,
+ class BinaryOperator>
+struct StdAdjacentDiffFunctor {
+ using index_type = typename InputIteratorType::difference_type;
+
+ const InputIteratorType m_first_from;
+ const OutputIteratorType m_first_dest;
+ BinaryOperator m_op;
+
+ KOKKOS_FUNCTION
+ void operator()(const index_type i) const {
+ const auto& my_value = m_first_from[i];
+ if (i == 0) {
+ m_first_dest[i] = my_value;
+ } else {
+ const auto& left_value = m_first_from[i - 1];
+ m_first_dest[i] = m_op(my_value, left_value);
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdAdjacentDiffFunctor(InputIteratorType first_from,
+ OutputIteratorType first_dest, BinaryOperator op)
+ : m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_op(std::move(op)) {}
+};
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class BinaryOp>
+OutputIteratorType adjacent_difference_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType first_from, InputIteratorType last_from,
+ OutputIteratorType first_dest, BinaryOp bin_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ if (first_from == last_from) {
+ return first_dest;
+ }
+
+#ifdef KOKKOS_ENABLE_DEBUG
+ // check for overlapping iterators
+ Impl::expect_no_overlap(first_from, last_from, first_dest);
+#endif
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_for(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdAdjacentDiffFunctor(first_from, first_dest, bin_op));
+ ex.fence("Kokkos::adjacent_difference: fence after operation");
+
+ // return
+ return first_dest + num_elements;
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class BinaryOp>
+KOKKOS_FUNCTION OutputIteratorType adjacent_difference_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ BinaryOp bin_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ if (first_from == last_from) {
+ return first_dest;
+ }
+
+#ifdef KOKKOS_ENABLE_DEBUG
+ // check for overlapping iterators
+ Impl::expect_no_overlap(first_from, last_from, first_dest);
+#endif
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_for(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ StdAdjacentDiffFunctor(first_from, first_dest, bin_op));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class ReducerType, class PredicateType>
+struct StdAdjacentFindFunctor {
+ using index_type = typename IteratorType::difference_type;
+ using red_value_type = typename ReducerType::value_type;
+
+ IteratorType m_first;
+ ReducerType m_reducer;
+ PredicateType m_p;
+
+ KOKKOS_FUNCTION
+ void operator()(const index_type i, red_value_type& red_value) const {
+ const auto& my_value = m_first[i];
+ const auto& next_value = m_first[i + 1];
+ const bool are_equal = m_p(my_value, next_value);
+
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type value = {::Kokkos::reduction_identity<index_type>::min()};
+ if (are_equal) {
+ value.min_loc_true = i;
+ }
+
+ m_reducer.join(red_value, value);
+ }
+
+ KOKKOS_FUNCTION
+ StdAdjacentFindFunctor(IteratorType first, ReducerType reducer,
+ PredicateType p)
+ : m_first(std::move(first)),
+ m_reducer(std::move(reducer)),
+ m_p(std::move(p)) {}
+};
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+IteratorType adjacent_find_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+
+ if (num_elements <= 1) {
+ return last;
+ }
+
+ using index_type = typename IteratorType::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+
+ // note that we use below num_elements-1 because
+ // each index i in the reduction checks i and (i+1).
+ ::Kokkos::parallel_reduce(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements - 1),
+ // use CTAD
+ StdAdjacentFindFunctor(first, reducer, pred), reducer);
+
+ // fence not needed because reducing into scalar
+ if (red_result.min_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ return last;
+ } else {
+ return first + red_result.min_loc_true;
+ }
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType adjacent_find_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last) {
+ using value_type = typename IteratorType::value_type;
+ using default_pred_t = StdAlgoEqualBinaryPredicate<value_type>;
+ return adjacent_find_exespace_impl(label, ex, first, last, default_pred_t());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType, class PredicateType>
+KOKKOS_FUNCTION IteratorType
+adjacent_find_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+
+ if (num_elements <= 1) {
+ return last;
+ }
+
+ using index_type = typename IteratorType::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+
+ // note that we use below num_elements-1 because
+ // each index i in the reduction checks i and (i+1).
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements - 1),
+ // use CTAD
+ StdAdjacentFindFunctor(first, reducer, pred),
+ reducer);
+
+ teamHandle.team_barrier();
+
+ if (red_result.min_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ return last;
+ } else {
+ return first + red_result.min_loc_true;
+ }
+}
+
+template <class TeamHandleType, class IteratorType>
+KOKKOS_FUNCTION IteratorType adjacent_find_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last) {
+ using value_type = typename IteratorType::value_type;
+ using default_pred_t = StdAlgoEqualBinaryPredicate<value_type>;
+ return adjacent_find_team_impl(teamHandle, first, last, default_pred_t());
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_ALL_OF_ANY_OF_NONE_OF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ALL_OF_ANY_OF_NONE_OF_IMPL_HPP
+
+#include "Kokkos_FindIfOrNot.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool all_of_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return (find_if_or_not_exespace_impl<false>(label, ex, first, last,
+ predicate) == last);
+}
+
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool any_of_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return (find_if_or_not_exespace_impl<true>(label, ex, first, last,
+ predicate) != last);
+}
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+bool none_of_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ Predicate predicate) {
+ return (find_if_or_not_exespace_impl<true>(label, ex, first, last,
+ predicate) == last);
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class InputIterator, class Predicate>
+KOKKOS_FUNCTION bool all_of_team_impl(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return (find_if_or_not_team_impl<false>(teamHandle, first, last, predicate) ==
+ last);
+}
+
+template <class TeamHandleType, class InputIterator, class Predicate>
+KOKKOS_FUNCTION bool any_of_team_impl(const TeamHandleType& teamHandle,
+ InputIterator first, InputIterator last,
+ Predicate predicate) {
+ return (find_if_or_not_team_impl<true>(teamHandle, first, last, predicate) !=
+ last);
+}
+
+template <class TeamHandleType, class IteratorType, class Predicate>
+KOKKOS_FUNCTION bool none_of_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ Predicate predicate) {
+ return (find_if_or_not_team_impl<true>(teamHandle, first, last, predicate) ==
+ last);
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_CONSTRAINTS_HPP_
#define KOKKOS_STD_ALGORITHMS_CONSTRAINTS_HPP_
namespace Experimental {
namespace Impl {
+template <class T>
+class RandomAccessIterator;
+
template <typename T, typename enable = void>
struct is_admissible_to_kokkos_std_algorithms : std::false_type {};
template <typename T>
struct is_admissible_to_kokkos_std_algorithms<
- T, std::enable_if_t< ::Kokkos::is_view<T>::value && T::rank == 1 &&
- (std::is_same<typename T::traits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename T::traits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename T::traits::array_layout,
- Kokkos::LayoutStride>::value)> >
+ T, std::enable_if_t<::Kokkos::is_view<T>::value && T::rank() == 1 &&
+ (std::is_same_v<typename T::traits::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename T::traits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename T::traits::array_layout,
+ Kokkos::LayoutStride>)>>
: std::true_type {};
template <class ViewType>
template <class T>
using is_iterator = Kokkos::is_detected<iterator_category_t, T>;
+template <class T>
+inline constexpr bool is_iterator_v = is_iterator<T>::value;
+
+template <typename ViewType>
+struct is_kokkos_iterator : std::false_type {};
+
+template <typename ViewType>
+struct is_kokkos_iterator<RandomAccessIterator<ViewType>> {
+ static constexpr bool value =
+ is_admissible_to_kokkos_std_algorithms<ViewType>::value;
+};
+
+template <class T>
+inline constexpr bool is_kokkos_iterator_v = is_kokkos_iterator<T>::value;
+
//
// are_iterators
//
template <class T>
struct are_iterators<T> {
- static constexpr bool value = is_iterator<T>::value;
+ static constexpr bool value = is_iterator_v<T>;
};
template <class Head, class... Tail>
struct are_iterators<Head, Tail...> {
static constexpr bool value =
- are_iterators<Head>::value && are_iterators<Tail...>::value;
+ are_iterators<Head>::value && (are_iterators<Tail>::value && ... && true);
};
+template <class... Ts>
+inline constexpr bool are_iterators_v = are_iterators<Ts...>::value;
+
//
// are_random_access_iterators
//
template <class T>
struct are_random_access_iterators<T> {
static constexpr bool value =
- is_iterator<T>::value &&
- std::is_base_of<std::random_access_iterator_tag,
- typename T::iterator_category>::value;
+ is_iterator_v<T> && std::is_base_of_v<std::random_access_iterator_tag,
+ typename T::iterator_category>;
};
template <class Head, class... Tail>
struct are_random_access_iterators<Head, Tail...> {
- static constexpr bool value = are_random_access_iterators<Head>::value &&
- are_random_access_iterators<Tail...>::value;
+ static constexpr bool value =
+ are_random_access_iterators<Head>::value &&
+ (are_random_access_iterators<Tail>::value && ... && true);
};
+template <class... Ts>
+inline constexpr bool are_random_access_iterators_v =
+ are_random_access_iterators<Ts...>::value;
+
//
// iterators_are_accessible_from
//
iterators_are_accessible_from<ExeSpace, Tail...>::value;
};
-template <class ExecutionSpace, class... IteratorTypes>
+template <class ExecutionSpaceOrTeamHandleType, class... IteratorTypes>
KOKKOS_INLINE_FUNCTION constexpr void
-static_assert_random_access_and_accessible(const ExecutionSpace& /* ex */,
- IteratorTypes... /* iterators */) {
+static_assert_random_access_and_accessible(
+ const ExecutionSpaceOrTeamHandleType& /* ex_or_th*/,
+ IteratorTypes... /* iterators */) {
static_assert(
are_random_access_iterators<IteratorTypes...>::value,
"Currently, Kokkos standard algorithms require random access iterators.");
- static_assert(
- iterators_are_accessible_from<ExecutionSpace, IteratorTypes...>::value,
- "Incompatible view/iterator and execution space");
+ static_assert(iterators_are_accessible_from<
+ typename ExecutionSpaceOrTeamHandleType::execution_space,
+ IteratorTypes...>::value,
+ "Incompatible view/iterator and execution space");
}
//
template <class T1, class T2>
struct iterators_have_matching_difference_type<T1, T2> {
- static constexpr bool value =
- std::is_same<typename T1::difference_type,
- typename T2::difference_type>::value;
+ static constexpr bool value = std::is_same_v<typename T1::difference_type,
+ typename T2::difference_type>;
};
template <class T1, class T2, class... Tail>
#endif
};
-template <class ExecutionSpace>
+template <class ExecutionSpaceOrTeamHandleType>
KOKKOS_INLINE_FUNCTION constexpr void static_assert_is_not_openmptarget(
- const ExecutionSpace&) {
- static_assert(not_openmptarget<ExecutionSpace>::value,
+ const ExecutionSpaceOrTeamHandleType& /*ex_or_th*/) {
+ static_assert(not_openmptarget<ExecutionSpaceOrTeamHandleType>::value,
"Currently, Kokkos standard algorithms do not support custom "
"comparators in OpenMPTarget");
}
// valid range
//
template <class IteratorType>
-void expect_valid_range(IteratorType first, IteratorType last) {
+KOKKOS_INLINE_FUNCTION void expect_valid_range(IteratorType first,
+ IteratorType last) {
// this is a no-op for release
KOKKOS_EXPECTS(last >= first);
// avoid compiler complaining when KOKKOS_EXPECTS is no-op
(void)last;
}
+//
+// Check if kokkos iterators are overlapping
+//
+template <typename IteratorType1, typename IteratorType2>
+KOKKOS_INLINE_FUNCTION void expect_no_overlap(
+ [[maybe_unused]] IteratorType1 first, [[maybe_unused]] IteratorType1 last,
+ [[maybe_unused]] IteratorType2 s_first) {
+ if constexpr (is_kokkos_iterator_v<IteratorType1> &&
+ is_kokkos_iterator_v<IteratorType2>) {
+ auto const view1 = first.view();
+ auto const view2 = s_first.view();
+
+ std::size_t stride1 = view1.stride(0);
+ std::size_t stride2 = view2.stride(0);
+ ptrdiff_t first_diff = view1.data() - view2.data();
+
+ // FIXME If strides are not identical, checks may not be made
+ // with the cost of O(1)
+ // Currently, checks are made only if strides are identical
+ // If first_diff == 0, there is already an overlap
+ if (stride1 == stride2 || first_diff == 0) {
+ [[maybe_unused]] bool is_no_overlap = (first_diff % stride1);
+ auto* first_pointer1 = view1.data();
+ auto* first_pointer2 = view2.data();
+ [[maybe_unused]] auto* last_pointer1 = first_pointer1 + (last - first);
+ [[maybe_unused]] auto* last_pointer2 = first_pointer2 + (last - first);
+ KOKKOS_EXPECTS(first_pointer1 >= last_pointer2 ||
+ last_pointer1 <= first_pointer2 || is_no_overlap);
+ }
+ }
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType1, class IteratorType2>
+struct StdCopyBackwardFunctor {
+ // we can use difference type from IteratorType1 since
+ // the calling functions below already static assert that
+ // the iterators have matching difference type
+ using index_type = typename IteratorType1::difference_type;
+
+ IteratorType1 m_last;
+ IteratorType2 m_dest_last;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const { m_dest_last[-i - 1] = m_last[-i - 1]; }
+
+ KOKKOS_FUNCTION
+ StdCopyBackwardFunctor(IteratorType1 _last, IteratorType2 _dest_last)
+ : m_last(std::move(_last)), m_dest_last(std::move(_dest_last)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 copy_backward_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType1 first,
+ IteratorType1 last,
+ IteratorType2 d_last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first, d_last);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ // use CTAD
+ StdCopyBackwardFunctor(last, d_last));
+ ex.fence("Kokkos::copy_backward: fence after operation");
+
+ // return
+ return d_last - num_elements;
+}
+
+//
+// team-level impl
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION IteratorType2
+copy_backward_team_impl(const TeamHandleType& teamHandle, IteratorType1 first,
+ IteratorType1 last, IteratorType2 d_last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_last);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ // use CTAD
+ StdCopyBackwardFunctor(last, d_last));
+ teamHandle.team_barrier();
+
+ // return
+ return d_last - num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class OutputIterator>
+struct StdCopyFunctor {
+ // we can use difference type from InputIterator since
+ // the calling functions below already static assert that
+ // the iterators have matching difference type
+ using index_type = typename InputIterator::difference_type;
+
+ InputIterator m_first;
+ OutputIterator m_dest_first;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const { m_dest_first[i] = m_first[i]; }
+
+ KOKKOS_FUNCTION
+ StdCopyFunctor(InputIterator _first, OutputIterator _dest_first)
+ : m_first(std::move(_first)), m_dest_first(std::move(_dest_first)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator copy_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ // use CTAD
+ StdCopyFunctor(first, d_first));
+ ex.fence("Kokkos::copy: fence after operation");
+
+ // return
+ return d_first + num_elements;
+}
+
+template <class ExecutionSpace, class InputIterator, class Size,
+ class OutputIterator>
+OutputIterator copy_n_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIterator first_from, Size count,
+ OutputIterator first_dest) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+
+ if (count > 0) {
+ return copy_exespace_impl(label, ex, first_from, first_from + count,
+ first_dest);
+ } else {
+ return first_dest;
+ }
+}
+
+//
+// team-level impl
+//
+template <class TeamHandleType, class InputIterator, class OutputIterator>
+KOKKOS_FUNCTION OutputIterator copy_team_impl(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator last,
+ OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ // use CTAD
+ StdCopyFunctor(first, d_first));
+ teamHandle.team_barrier();
+
+ // return
+ return d_first + num_elements;
+}
+
+template <class TeamHandleType, class InputIterator, class Size,
+ class OutputIterator>
+KOKKOS_FUNCTION OutputIterator
+copy_n_team_impl(const TeamHandleType& teamHandle, InputIterator first_from,
+ Size count, OutputIterator first_dest) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+
+ if (count > 0) {
+ return copy_team_impl(teamHandle, first_from, first_from + count,
+ first_dest);
+ } else {
+ return first_dest;
+ }
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_COPY_IF_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_COPY_IF_IMPL_HPP
#include <Kokkos_Core.hpp>
#include "Kokkos_Constraints.hpp"
#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_MustUseKokkosSingleInTeam.hpp"
#include <std_algorithms/Kokkos_Distance.hpp>
#include <string>
namespace Experimental {
namespace Impl {
-template <class IndexType, class FirstFrom, class FirstDest, class PredType>
+template <class FirstFrom, class FirstDest, class PredType>
struct StdCopyIfFunctor {
+ using index_type = typename FirstFrom::difference_type;
+
FirstFrom m_first_from;
FirstDest m_first_dest;
PredType m_pred;
m_pred(std::move(pred)) {}
KOKKOS_FUNCTION
- void operator()(const IndexType i, IndexType& update,
+ void operator()(const index_type i, index_type& update,
const bool final_pass) const {
const auto& myval = m_first_from[i];
if (final_pass) {
template <class ExecutionSpace, class InputIterator, class OutputIterator,
class PredicateType>
-OutputIterator copy_if_impl(const std::string& label, const ExecutionSpace& ex,
- InputIterator first, InputIterator last,
- OutputIterator d_first, PredicateType pred) {
+OutputIterator copy_if_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIterator first, InputIterator last,
+ OutputIterator d_first,
+ PredicateType pred) {
/*
To explain the impl, suppose that our data is:
if (first == last) {
return d_first;
} else {
- // aliases
- using index_type = typename InputIterator::difference_type;
- using func_type = StdCopyIfFunctor<index_type, InputIterator,
- OutputIterator, PredicateType>;
-
// run
const auto num_elements = Kokkos::Experimental::distance(first, last);
- index_type count = 0;
+
+ typename InputIterator::difference_type count = 0;
::Kokkos::parallel_scan(label,
RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_type(first, d_first, pred), count);
+ // use CTAD
+ StdCopyIfFunctor(first, d_first, pred), count);
// fence not needed because of the scan accumulating into count
return d_first + count;
}
}
+template <class TeamHandleType, class InputIterator, class OutputIterator,
+ class PredicateType>
+KOKKOS_FUNCTION OutputIterator copy_if_team_impl(
+ const TeamHandleType& teamHandle, InputIterator first, InputIterator last,
+ OutputIterator d_first, PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ return d_first;
+ }
+
+ const std::size_t num_elements = Kokkos::Experimental::distance(first, last);
+ if constexpr (stdalgo_must_use_kokkos_single_for_team_scan_v<
+ typename TeamHandleType::execution_space>) {
+ std::size_t count = 0;
+ Kokkos::single(
+ Kokkos::PerTeam(teamHandle),
+ [=](std::size_t& lcount) {
+ lcount = 0;
+ for (std::size_t i = 0; i < num_elements; ++i) {
+ const auto& myval = first[i];
+ if (pred(myval)) {
+ d_first[lcount++] = myval;
+ }
+ }
+ },
+ count);
+ // no barrier needed since single above broadcasts to all members
+ return d_first + count;
+
+ } else {
+ typename InputIterator::difference_type count = 0;
+ ::Kokkos::parallel_scan(TeamThreadRange(teamHandle, 0, num_elements),
+ StdCopyIfFunctor(first, d_first, pred), count);
+ // no barrier needed because of the scan accumulating into count
+ return d_first + count;
+ }
+
+#if defined KOKKOS_COMPILER_INTEL || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_COUNT_IF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_COUNT_IF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class Predicate>
+struct StdCountIfFunctor {
+ using index_type = typename IteratorType::difference_type;
+ IteratorType m_first;
+ Predicate m_predicate;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i, index_type& lsum) const {
+ if (m_predicate(m_first[i])) {
+ lsum++;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdCountIfFunctor(IteratorType _first, Predicate _predicate)
+ : m_first(std::move(_first)), m_predicate(std::move(_predicate)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+typename IteratorType::difference_type count_if_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, Predicate predicate) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ typename IteratorType::difference_type count = 0;
+ ::Kokkos::parallel_reduce(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ // use CTAD
+ StdCountIfFunctor(first, predicate), count);
+ ex.fence("Kokkos::count_if: fence after operation");
+
+ return count;
+}
+
+template <class ExecutionSpace, class IteratorType, class T>
+auto count_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ const T& value) {
+ return count_if_exespace_impl(
+ label, ex, first, last,
+ ::Kokkos::Experimental::Impl::StdAlgoEqualsValUnaryPredicate<T>(value));
+}
+
+//
+// team-level impl
+//
+template <class TeamHandleType, class IteratorType, class Predicate>
+KOKKOS_FUNCTION typename IteratorType::difference_type count_if_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ Predicate predicate) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ typename IteratorType::difference_type count = 0;
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ // use CTAD
+ StdCountIfFunctor(first, predicate), count);
+ teamHandle.team_barrier();
+
+ return count;
+}
+
+template <class TeamHandleType, class IteratorType, class T>
+KOKKOS_FUNCTION auto count_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ const T& value) {
+ return count_if_team_impl(
+ teamHandle, first, last,
+ ::Kokkos::Experimental::Impl::StdAlgoEqualsValUnaryPredicate<T>(value));
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_EQUAL_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_EQUAL_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType1, class IteratorType2, class BinaryPredicateType>
+struct StdEqualFunctor {
+ using index_type = typename IteratorType1::difference_type;
+
+ IteratorType1 m_first1;
+ IteratorType2 m_first2;
+ BinaryPredicateType m_predicate;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i, std::size_t& lsum) const {
+ if (!m_predicate(m_first1[i], m_first2[i])) {
+ lsum = 1;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdEqualFunctor(IteratorType1 _first1, IteratorType2 _first2,
+ BinaryPredicateType _predicate)
+ : m_first1(std::move(_first1)),
+ m_first2(std::move(_first2)),
+ m_predicate(std::move(_predicate)) {}
+};
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+bool equal_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, BinaryPredicateType predicate) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+ std::size_t different = 0;
+ ::Kokkos::parallel_reduce(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdEqualFunctor(first1, first2, predicate), different);
+ ex.fence("Kokkos::equal: fence after operation");
+
+ return !different;
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+bool equal_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2) {
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return equal_exespace_impl(label, ex, first1, last1, first2, pred_t());
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+bool equal_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType predicate) {
+ const auto d1 = ::Kokkos::Experimental::distance(first1, last1);
+ const auto d2 = ::Kokkos::Experimental::distance(first2, last2);
+ if (d1 != d2) {
+ return false;
+ }
+
+ return equal_exespace_impl(label, ex, first1, last1, first2, predicate);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+bool equal_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2) {
+ Impl::expect_valid_range(first1, last1);
+ Impl::expect_valid_range(first2, last2);
+
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return equal_exespace_impl(label, ex, first1, last1, first2, last2, pred_t());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+KOKKOS_FUNCTION bool equal_team_impl(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2,
+ BinaryPredicateType predicate) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, first2);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+ std::size_t different = 0;
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ StdEqualFunctor(first1, first2, predicate),
+ different);
+ teamHandle.team_barrier();
+
+ return !different;
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION bool equal_team_impl(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2) {
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return equal_team_impl(teamHandle, first1, last1, first2, pred_t());
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+KOKKOS_FUNCTION bool equal_team_impl(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType predicate) {
+ const auto d1 = ::Kokkos::Experimental::distance(first1, last1);
+ const auto d2 = ::Kokkos::Experimental::distance(first2, last2);
+ if (d1 != d2) {
+ return false;
+ }
+
+ return equal_team_impl(teamHandle, first1, last1, first2, predicate);
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION bool equal_team_impl(const TeamHandleType& teamHandle,
+ IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2,
+ IteratorType2 last2) {
+ Impl::expect_valid_range(first1, last1);
+ Impl::expect_valid_range(first2, last2);
+
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return equal_team_impl(teamHandle, first1, last1, first2, last2, pred_t());
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+#include "Kokkos_IdentityReferenceUnaryFunctor.hpp"
+#include "Kokkos_FunctorsForExclusiveScan.hpp"
+#include <std_algorithms/Kokkos_TransformExclusiveScan.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class ValueType>
+OutputIteratorType exclusive_scan_default_op_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType first_from, InputIteratorType last_from,
+ OutputIteratorType first_dest, ValueType init_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // does it make sense to do this static_assert too?
+ // using input_iterator_value_type = typename InputIteratorType::value_type;
+ // static_assert
+ // (std::is_convertible<std::remove_cv_t<input_iterator_value_type>,
+ // ValueType>::value,
+ // "exclusive_scan: InputIteratorType::value_type not convertible to
+ // ValueType");
+
+ // we are unnecessarily duplicating code, but this is on purpose
+ // so that we can use the default_op for OpenMPTarget.
+ // Originally, I had this implemented as:
+ // '''
+ // using bop_type = StdExclusiveScanDefaultJoinFunctor<ValueType>;
+ // call exclusive_scan_custom_op_impl(..., bop_type());
+ // '''
+ // which avoids duplicating the functors, but for OpenMPTarget
+ // I cannot use a custom binary op.
+ // This is the same problem that occurs for reductions.
+
+ // aliases
+ using index_type = typename InputIteratorType::difference_type;
+ using func_type = std::conditional_t<
+ ::Kokkos::is_detected<ex_scan_has_reduction_identity_sum_t,
+ ValueType>::value,
+ ExclusiveScanDefaultFunctorForKnownNeutralElement<
+ ExecutionSpace, index_type, ValueType, InputIteratorType,
+ OutputIteratorType>,
+ ExclusiveScanDefaultFunctorWithValueWrapper<ExecutionSpace, index_type,
+ ValueType, InputIteratorType,
+ OutputIteratorType>>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ func_type(std::move(init_value), first_from, first_dest));
+
+ ex.fence("Kokkos::exclusive_scan_default_op: fence after operation");
+
+ return first_dest + num_elements;
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class ValueType, class BinaryOpType>
+OutputIteratorType exclusive_scan_custom_op_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType first_from, InputIteratorType last_from,
+ OutputIteratorType first_dest, ValueType init_value, BinaryOpType bop) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // aliases
+ using index_type = typename InputIteratorType::difference_type;
+ using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<ValueType>;
+ using func_type = TransformExclusiveScanFunctorWithValueWrapper<
+ ExecutionSpace, index_type, ValueType, InputIteratorType,
+ OutputIteratorType, BinaryOpType, unary_op_type>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ func_type(std::move(init_value), first_from,
+ first_dest, bop, unary_op_type()));
+ ex.fence("Kokkos::exclusive_scan_custom_op: fence after operation");
+
+ // return
+ return first_dest + num_elements;
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class ValueType>
+KOKKOS_FUNCTION OutputIteratorType exclusive_scan_default_op_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ ValueType init_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ static_assert(
+ ::Kokkos::is_detected_v<ex_scan_has_reduction_identity_sum_t, ValueType>,
+ "The team-level impl of Kokkos::Experimental::exclusive_scan currently "
+ "does not support types without reduction identity");
+
+ // aliases
+ using exe_space = typename TeamHandleType::execution_space;
+ using index_type = typename InputIteratorType::difference_type;
+ using func_type = ExclusiveScanDefaultFunctorForKnownNeutralElement<
+ exe_space, index_type, ValueType, InputIteratorType, OutputIteratorType>;
+
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(std::move(init_value), first_from, first_dest));
+ teamHandle.team_barrier();
+ return first_dest + num_elements;
+}
+
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class ValueType, class BinaryOpType>
+KOKKOS_FUNCTION OutputIteratorType exclusive_scan_custom_op_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ ValueType init_value, BinaryOpType bop) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ static_assert(
+ ::Kokkos::is_detected_v<ex_scan_has_reduction_identity_sum_t, ValueType>,
+ "The team-level impl of Kokkos::Experimental::exclusive_scan currently "
+ "does not support types without reduction identity");
+
+ // aliases
+ using exe_space = typename TeamHandleType::execution_space;
+ using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<ValueType>;
+ using index_type = typename InputIteratorType::difference_type;
+ using func_type = TransformExclusiveScanFunctorWithoutValueWrapper<
+ exe_space, index_type, ValueType, InputIteratorType, OutputIteratorType,
+ BinaryOpType, unary_op_type>;
+
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(std::move(init_value), first_from,
+ first_dest, bop, unary_op_type()));
+ teamHandle.team_barrier();
+
+ return first_dest + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FILL_AND_FILL_N_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FILL_AND_FILL_N_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class T>
+struct StdFillFunctor {
+ using index_type = typename InputIterator::difference_type;
+ InputIterator m_first;
+ T m_value;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const { m_first[i] = m_value; }
+
+ KOKKOS_FUNCTION
+ StdFillFunctor(InputIterator _first, T _value)
+ : m_first(std::move(_first)), m_value(std::move(_value)) {}
+};
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class IteratorType, class T>
+void fill_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last, const T& value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdFillFunctor(first, value));
+ ex.fence("Kokkos::fill: fence after operation");
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType, class T>
+IteratorType fill_n_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, IteratorType first,
+ SizeType n, const T& value) {
+ auto last = first + n;
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ if (n <= 0) {
+ return first;
+ }
+
+ fill_exespace_impl(label, ex, first, last, value);
+ return last;
+}
+
+//
+// team-level impl
+//
+template <class TeamHandleType, class IteratorType, class T>
+KOKKOS_FUNCTION void fill_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ const T& value) {
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdFillFunctor(first, value));
+
+ teamHandle.team_barrier();
+}
+
+template <class TeamHandleType, class IteratorType, class SizeType, class T>
+KOKKOS_FUNCTION IteratorType fill_n_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, SizeType n,
+ const T& value) {
+ auto last = first + n;
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ if (n <= 0) {
+ return first;
+ }
+
+ fill_team_impl(teamHandle, first, last, value);
+ return last;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_FIND_END_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_FIND_END_IMPL_HPP
}
}
- const auto rv =
- found ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::max()};
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {::Kokkos::reduction_identity<IndexType>::max()};
+ if (found) {
+ rv.max_loc_true = i;
+ }
m_reducer.join(red_value, rv);
}
m_p(std::move(p)) {}
};
+//
+// exespace impl
+//
template <class ExecutionSpace, class IteratorType1, class IteratorType2,
class BinaryPredicateType>
-IteratorType1 find_end_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last,
- const BinaryPredicateType& pred) {
+IteratorType1 find_end_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first, s_first);
Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
const auto num_elements = KE::distance(first, last);
const auto s_count = KE::distance(s_first, s_last);
KOKKOS_EXPECTS(num_elements >= s_count);
- (void)s_count; // needed when macro above is a no-op
if (s_first == s_last) {
return last;
// special case where the two ranges have equal size
if (num_elements == s_count) {
- const auto equal_result = equal_impl(label, ex, first, last, s_first, pred);
+ const auto equal_result =
+ equal_exespace_impl(label, ex, first, last, s_first, pred);
return (equal_result) ? first : last;
} else {
using index_type = typename IteratorType1::difference_type;
}
template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 find_end_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last) {
+IteratorType1 find_end_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last) {
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return find_end_exespace_impl(label, ex, first, last, s_first, s_last,
+ predicate_type());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+KOKKOS_FUNCTION IteratorType1
+find_end_team_impl(const TeamHandleType& teamHandle, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last, const BinaryPredicateType& pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, s_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
+ Impl::expect_valid_range(first, last);
+ Impl::expect_valid_range(s_first, s_last);
+
+ // the target sequence should not be larger than the range [first, last)
+ namespace KE = ::Kokkos::Experimental;
+ const auto num_elements = KE::distance(first, last);
+ const auto s_count = KE::distance(s_first, s_last);
+ KOKKOS_EXPECTS(num_elements >= s_count);
+
+ if (s_first == s_last) {
+ return last;
+ }
+
+ if (first == last) {
+ return last;
+ }
+
+ // special case where the two ranges have equal size
+ if (num_elements == s_count) {
+ const auto equal_result =
+ equal_team_impl(teamHandle, first, last, s_first, pred);
+ return (equal_result) ? first : last;
+ } else {
+ using index_type = typename IteratorType1::difference_type;
+ using reducer_type = LastLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t = StdFindEndFunctor<index_type, IteratorType1, IteratorType2,
+ reducer_type, BinaryPredicateType>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+
+ // decide the size of the range policy of the par_red:
+ // note that the last feasible index to start looking is the index
+ // whose distance from the "last" is equal to the sequence count.
+ // the +1 is because we need to include that location too.
+ const auto range_size = num_elements - s_count + 1;
+
+ // run par reduce
+ ::Kokkos::parallel_reduce(
+ TeamThreadRange(teamHandle, 0, range_size),
+ func_t(first, last, s_first, s_last, reducer, pred), reducer);
+
+ teamHandle.team_barrier();
+
+ // decide and return
+ if (red_result.max_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::max()) {
+ // if here, a subrange has not been found
+ return last;
+ } else {
+ // a location has been found
+ return first + red_result.max_loc_true;
+ }
+ }
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION IteratorType1 find_end_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last) {
using value_type1 = typename IteratorType1::value_type;
using value_type2 = typename IteratorType2::value_type;
using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
- return find_end_impl(label, ex, first, last, s_first, s_last,
- predicate_type());
+ return find_end_team_impl(teamHandle, first, last, s_first, s_last,
+ predicate_type());
}
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_IMPL_HPP
}
}
- const auto rv =
- found ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
-
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {::Kokkos::reduction_identity<IndexType>::min()};
+ if (found) {
+ rv.min_loc_true = i;
+ }
m_reducer.join(red_value, rv);
}
m_p(std::move(p)) {}
};
+//
+// exespace impl
+//
template <class ExecutionSpace, class IteratorType1, class IteratorType2,
class BinaryPredicateType>
-IteratorType1 find_first_of_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last,
- const BinaryPredicateType& pred) {
+IteratorType1 find_first_of_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first, IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first, s_first);
Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
}
template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 find_first_of_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType1 first,
- IteratorType1 last, IteratorType2 s_first,
- IteratorType2 s_last) {
+IteratorType1 find_first_of_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first, IteratorType2 s_last) {
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return find_first_of_exespace_impl(label, ex, first, last, s_first, s_last,
+ predicate_type());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+KOKKOS_FUNCTION IteratorType1
+find_first_of_team_impl(const TeamHandleType& teamHandle, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last, const BinaryPredicateType& pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, s_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
+ Impl::expect_valid_range(first, last);
+ Impl::expect_valid_range(s_first, s_last);
+
+ if ((s_first == s_last) || (first == last)) {
+ return last;
+ }
+
+ using index_type = typename IteratorType1::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t = StdFindFirstOfFunctor<index_type, IteratorType1, IteratorType2,
+ reducer_type, BinaryPredicateType>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ func_t(first, s_first, s_last, reducer, pred),
+ reducer);
+
+ teamHandle.team_barrier();
+
+ // decide and return
+ if (red_result.min_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ // if here, nothing found
+ return last;
+ } else {
+ // a location has been found
+ return first + red_result.min_loc_true;
+ }
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION IteratorType1 find_first_of_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last) {
using value_type1 = typename IteratorType1::value_type;
using value_type2 = typename IteratorType2::value_type;
using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
- return find_first_of_impl(label, ex, first, last, s_first, s_last,
- predicate_type());
+ return find_first_of_team_impl(teamHandle, first, last, s_first, s_last,
+ predicate_type());
}
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_AND_FIND_IF_NOT_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_FIND_IF_AND_FIND_IF_NOT_IMPL_HPP
// if doing find_if_not, look for when predicate is false
const bool found_condition = is_find_if ? m_p(my_value) : !m_p(my_value);
- auto rv =
- found_condition
- ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {::Kokkos::reduction_identity<IndexType>::min()};
+ if (found_condition) {
+ rv.min_loc_true = i;
+ }
m_reducer.join(red_value, rv);
}
m_p(std::move(p)) {}
};
+//
+// exespace impl
+//
template <bool is_find_if, class ExecutionSpace, class IteratorType,
class PredicateType>
-IteratorType find_if_or_not_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last, PredicateType pred) {
+IteratorType find_if_or_not_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ PredicateType pred) {
// checks
Impl::static_assert_random_access_and_accessible(
ex, first); // only need one It per type
}
template <class ExecutionSpace, class InputIterator, class T>
-InputIterator find_impl(const std::string& label, ExecutionSpace ex,
- InputIterator first, InputIterator last,
- const T& value) {
- return find_if_or_not_impl<true>(
+InputIterator find_exespace_impl(const std::string& label, ExecutionSpace ex,
+ InputIterator first, InputIterator last,
+ const T& value) {
+ return find_if_or_not_exespace_impl<true>(
label, ex, first, last,
::Kokkos::Experimental::Impl::StdAlgoEqualsValUnaryPredicate<T>(value));
}
+//
+// team impl
+//
+template <bool is_find_if, class TeamHandleType, class IteratorType,
+ class PredicateType>
+KOKKOS_FUNCTION IteratorType
+find_if_or_not_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(
+ teamHandle, first); // only need one It per type
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ return last;
+ }
+
+ // aliases
+ using index_type = typename IteratorType::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t = StdFindIfOrNotFunctor<is_find_if, index_type, IteratorType,
+ reducer_type, PredicateType>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ func_t(first, reducer, pred), reducer);
+
+ teamHandle.team_barrier();
+
+ // decide and return
+ if (red_result.min_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ // here, it means a valid loc has not been found,
+ return last;
+ } else {
+ // a location has been found
+ return first + red_result.min_loc_true;
+ }
+}
+
+template <class TeamHandleType, class InputIterator, class T>
+KOKKOS_FUNCTION InputIterator find_team_impl(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator last,
+ const T& value) {
+ return find_if_or_not_team_impl<true>(
+ teamHandle, first, last,
+ ::Kokkos::Experimental::Impl::StdAlgoEqualsValUnaryPredicate<T>(value));
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FOR_EACH_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class UnaryFunctorType>
+struct StdForEachFunctor {
+ using index_type = typename IteratorType::difference_type;
+ IteratorType m_first;
+ UnaryFunctorType m_functor;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const { m_functor(m_first[i]); }
+
+ KOKKOS_FUNCTION
+ StdForEachFunctor(IteratorType _first, UnaryFunctorType _functor)
+ : m_first(std::move(_first)), m_functor(std::move(_functor)) {}
+};
+
+template <class HandleType, class IteratorType, class UnaryFunctorType>
+void for_each_exespace_impl(const std::string& label, const HandleType& handle,
+ IteratorType first, IteratorType last,
+ UnaryFunctorType functor) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(handle, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(
+ label, RangePolicy<HandleType>(handle, 0, num_elements),
+ StdForEachFunctor<IteratorType, UnaryFunctorType>(first, functor));
+ handle.fence("Kokkos::for_each: fence after operation");
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+ class UnaryFunctorType>
+IteratorType for_each_n_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first, SizeType n,
+ UnaryFunctorType functor) {
+ auto last = first + n;
+ Impl::static_assert_random_access_and_accessible(ex, first, last);
+ Impl::expect_valid_range(first, last);
+
+ if (n == 0) {
+ return first;
+ }
+
+ for_each_exespace_impl(label, ex, first, last, std::move(functor));
+ // no need to fence since for_each_exespace_impl fences already
+
+ return last;
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType, class UnaryFunctorType>
+KOKKOS_FUNCTION void for_each_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ UnaryFunctorType functor) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ StdForEachFunctor<IteratorType, UnaryFunctorType>(first, functor));
+ teamHandle.team_barrier();
+}
+
+template <class TeamHandleType, class IteratorType, class SizeType,
+ class UnaryFunctorType>
+KOKKOS_FUNCTION IteratorType
+for_each_n_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ SizeType n, UnaryFunctorType functor) {
+ auto last = first + n;
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, last);
+ Impl::expect_valid_range(first, last);
+
+ if (n == 0) {
+ return first;
+ }
+
+ for_each_team_impl(teamHandle, first, last, std::move(functor));
+ // no need to fence since for_each_team_impl fences already
+
+ return last;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_FUNCTORS_FOR_EXCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FUNCTORS_FOR_EXCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <typename ValueType>
+using ex_scan_has_reduction_identity_sum_t =
+ decltype(Kokkos::reduction_identity<ValueType>::sum());
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+ class FirstDest>
+struct ExclusiveScanDefaultFunctorForKnownNeutralElement {
+ using execution_space = ExeSpace;
+ ValueType m_init_value;
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+
+ KOKKOS_FUNCTION
+ ExclusiveScanDefaultFunctorForKnownNeutralElement(ValueType init,
+ FirstFrom first_from,
+ FirstDest first_dest)
+ : m_init_value(std::move(init)),
+ m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const IndexType i, ValueType& update,
+ const bool final_pass) const {
+ const auto tmp = m_first_from[i];
+ if (final_pass) m_first_dest[i] = update + m_init_value;
+ update += tmp;
+ }
+};
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+ class FirstDest>
+struct ExclusiveScanDefaultFunctorWithValueWrapper {
+ using execution_space = ExeSpace;
+ using value_type =
+ ::Kokkos::Experimental::Impl::ValueWrapperForNoNeutralElement<ValueType>;
+ ValueType m_init_value;
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+
+ KOKKOS_FUNCTION
+ ExclusiveScanDefaultFunctorWithValueWrapper(ValueType init,
+ FirstFrom first_from,
+ FirstDest first_dest)
+ : m_init_value(std::move(init)),
+ m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const IndexType i, value_type& update,
+ const bool final_pass) const {
+ const auto tmp = value_type{m_first_from[i], false};
+ if (final_pass) {
+ if (i == 0) {
+ m_first_dest[i] = m_init_value;
+ } else {
+ m_first_dest[i] = update.val + m_init_value;
+ }
+ }
+
+ this->join(update, tmp);
+ }
+
+ KOKKOS_FUNCTION
+ void init(value_type& update) const {
+ update.val = {};
+ update.is_initial = true;
+ }
+
+ KOKKOS_FUNCTION
+ void join(value_type& update, const value_type& input) const {
+ if (input.is_initial) return;
+
+ if (update.is_initial) {
+ update.val = input.val;
+ update.is_initial = false;
+ } else {
+ update.val = update.val + input.val;
+ }
+ }
+};
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+ class FirstDest, class BinaryOpType, class UnaryOpType>
+struct TransformExclusiveScanFunctorWithValueWrapper {
+ using execution_space = ExeSpace;
+ using value_type =
+ ::Kokkos::Experimental::Impl::ValueWrapperForNoNeutralElement<ValueType>;
+
+ ValueType m_init_value;
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+ BinaryOpType m_binary_op;
+ UnaryOpType m_unary_op;
+
+ KOKKOS_FUNCTION
+ TransformExclusiveScanFunctorWithValueWrapper(ValueType init,
+ FirstFrom first_from,
+ FirstDest first_dest,
+ BinaryOpType bop,
+ UnaryOpType uop)
+ : m_init_value(std::move(init)),
+ m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_binary_op(std::move(bop)),
+ m_unary_op(std::move(uop)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const IndexType i, value_type& update,
+ const bool final_pass) const {
+ const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
+ if (final_pass) {
+ if (i == 0) {
+ // for both ExclusiveScan and TransformExclusiveScan,
+ // init is unmodified
+ m_first_dest[i] = m_init_value;
+ } else {
+ m_first_dest[i] = m_binary_op(update.val, m_init_value);
+ }
+ }
+
+ this->join(update, tmp);
+ }
+
+ KOKKOS_FUNCTION void init(value_type& value) const {
+ value.val = {};
+ value.is_initial = true;
+ }
+
+ KOKKOS_FUNCTION
+ void join(value_type& update, const value_type& input) const {
+ if (input.is_initial) return;
+
+ if (update.is_initial) {
+ update.val = input.val;
+ } else {
+ update.val = m_binary_op(update.val, input.val);
+ }
+ update.is_initial = false;
+ }
+};
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+ class FirstDest, class BinaryOpType, class UnaryOpType>
+struct TransformExclusiveScanFunctorWithoutValueWrapper {
+ using execution_space = ExeSpace;
+
+ ValueType m_init_value;
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+ BinaryOpType m_binary_op;
+ UnaryOpType m_unary_op;
+
+ KOKKOS_FUNCTION
+ TransformExclusiveScanFunctorWithoutValueWrapper(ValueType init,
+ FirstFrom first_from,
+ FirstDest first_dest,
+ BinaryOpType bop,
+ UnaryOpType uop)
+ : m_init_value(std::move(init)),
+ m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_binary_op(std::move(bop)),
+ m_unary_op(std::move(uop)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const IndexType i, ValueType& update,
+ const bool final_pass) const {
+ const auto tmp = ValueType{m_unary_op(m_first_from[i])};
+ if (final_pass) {
+ if (i == 0) {
+ // for both ExclusiveScan and TransformExclusiveScan,
+ // init is unmodified
+ m_first_dest[i] = m_init_value;
+ } else {
+ m_first_dest[i] = m_binary_op(update, m_init_value);
+ }
+ }
+
+ this->join(update, tmp);
+ }
+
+ KOKKOS_FUNCTION
+ void init(ValueType& update) const { update = {}; }
+
+ KOKKOS_FUNCTION
+ void join(ValueType& update, const ValueType& input) const {
+ update = m_binary_op(update, input);
+ }
+};
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_GENERATE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class Generator>
+struct StdGenerateFunctor {
+ using index_type = typename IteratorType::difference_type;
+ IteratorType m_first;
+ Generator m_generator;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const { m_first[i] = m_generator(); }
+
+ KOKKOS_FUNCTION
+ StdGenerateFunctor(IteratorType _first, Generator _g)
+ : m_first(std::move(_first)), m_generator(std::move(_g)) {}
+};
+
+//
+// generate impl
+//
+template <class ExecutionSpace, class IteratorType, class Generator>
+void generate_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ Generator g) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdGenerateFunctor(first, g));
+ ex.fence("Kokkos::generate: fence after operation");
+}
+
+template <class TeamHandleType, class IteratorType, class Generator>
+KOKKOS_FUNCTION void generate_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ Generator g) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdGenerateFunctor(first, g));
+ teamHandle.team_barrier();
+}
+
+//
+// generate_n impl
+//
+template <class ExecutionSpace, class IteratorType, class Size, class Generator>
+IteratorType generate_n_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first, Size count,
+ Generator g) {
+ if (count <= 0) {
+ return first;
+ }
+
+ generate_exespace_impl(label, ex, first, first + count, g);
+ return first + count;
+}
+
+template <class TeamHandleType, class IteratorType, class Size, class Generator>
+KOKKOS_FUNCTION IteratorType
+generate_n_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ Size count, Generator g) {
+ if (count <= 0) {
+ return first;
+ }
+
+ generate_team_impl(teamHandle, first, first + count, g);
+ return first + count;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_HELPER_PREDICATES_HPP
#define KOKKOS_STD_ALGORITHMS_HELPER_PREDICATES_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_NUMERIC_IDENTITY_REFERENCE_UNARY_FUNCTOR_HPP
+#define KOKKOS_STD_ALGORITHMS_NUMERIC_IDENTITY_REFERENCE_UNARY_FUNCTOR_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ValueType>
+struct StdNumericScanIdentityReferenceUnaryFunctor {
+ KOKKOS_FUNCTION
+ constexpr const ValueType& operator()(const ValueType& a) const { return a; }
+};
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_IMPL_HPP
KOKKOS_FUNCTION
void join(value_type& update, const value_type& input) const {
+ if (input.is_initial) return;
+
if (update.is_initial) {
update.val = input.val;
} else {
}
};
+//
+// exespace impl
+//
template <class ExecutionSpace, class InputIteratorType,
class OutputIteratorType>
-OutputIteratorType inclusive_scan_default_op_impl(
+OutputIteratorType inclusive_scan_default_op_exespace_impl(
const std::string& label, const ExecutionSpace& ex,
InputIteratorType first_from, InputIteratorType last_from,
OutputIteratorType first_dest) {
// -------------------------------------------------------------
template <class ExecutionSpace, class InputIteratorType,
class OutputIteratorType, class BinaryOpType>
-OutputIteratorType inclusive_scan_custom_binary_op_impl(
+OutputIteratorType inclusive_scan_custom_binary_op_exespace_impl(
const std::string& label, const ExecutionSpace& ex,
InputIteratorType first_from, InputIteratorType last_from,
OutputIteratorType first_dest, BinaryOpType binary_op) {
using value_type =
std::remove_const_t<typename InputIteratorType::value_type>;
using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<value_type>;
- using func_type = TransformInclusiveScanNoInitValueFunctor<
+ using func_type = ExeSpaceTransformInclusiveScanNoInitValueFunctor<
ExecutionSpace, index_type, value_type, InputIteratorType,
OutputIteratorType, BinaryOpType, unary_op_type>;
// -------------------------------------------------------------
template <class ExecutionSpace, class InputIteratorType,
class OutputIteratorType, class BinaryOpType, class ValueType>
-OutputIteratorType inclusive_scan_custom_binary_op_impl(
+OutputIteratorType inclusive_scan_custom_binary_op_exespace_impl(
const std::string& label, const ExecutionSpace& ex,
InputIteratorType first_from, InputIteratorType last_from,
OutputIteratorType first_dest, BinaryOpType binary_op,
// aliases
using index_type = typename InputIteratorType::difference_type;
using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<ValueType>;
- using func_type = TransformInclusiveScanWithInitValueFunctor<
+ using func_type = ExeSpaceTransformInclusiveScanWithInitValueFunctor<
ExecutionSpace, index_type, ValueType, InputIteratorType,
OutputIteratorType, BinaryOpType, unary_op_type>;
::Kokkos::parallel_scan(label,
RangePolicy<ExecutionSpace>(ex, 0, num_elements),
func_type(first_from, first_dest, binary_op,
- unary_op_type(), init_value));
+ unary_op_type(), std::move(init_value)));
ex.fence("Kokkos::inclusive_scan_custom_binary_op: fence after operation");
// return
return first_dest + num_elements;
}
+//
+// team impl
+//
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType>
+KOKKOS_FUNCTION OutputIteratorType inclusive_scan_default_op_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ using value_type =
+ std::remove_const_t<typename InputIteratorType::value_type>;
+
+ // #if defined(KOKKOS_ENABLE_CUDA)
+
+ using exe_space = typename TeamHandleType::execution_space;
+ using index_type = typename InputIteratorType::difference_type;
+ using func_type = std::conditional_t<
+ ::Kokkos::is_detected<in_scan_has_reduction_identity_sum_t,
+ value_type>::value,
+ InclusiveScanDefaultFunctorForKnownIdentityElement<
+ exe_space, index_type, value_type, InputIteratorType,
+ OutputIteratorType>,
+ InclusiveScanDefaultFunctor<exe_space, index_type, value_type,
+ InputIteratorType, OutputIteratorType>>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(first_from, first_dest));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
+// -------------------------------------------------------------
+// inclusive_scan_custom_binary_op_impl
+// -------------------------------------------------------------
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class BinaryOpType>
+KOKKOS_FUNCTION OutputIteratorType inclusive_scan_custom_binary_op_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ BinaryOpType binary_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ using value_type =
+ std::remove_const_t<typename InputIteratorType::value_type>;
+
+ static_assert(
+ ::Kokkos::is_detected_v<ex_scan_has_reduction_identity_sum_t, value_type>,
+ "At the moment inclusive_scan doesn't support types without reduction "
+ "identity");
+
+ // #if defined(KOKKOS_ENABLE_CUDA)
+
+ // aliases
+ using exe_space = typename TeamHandleType::execution_space;
+ using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<value_type>;
+ using func_type = TeamTransformInclusiveScanNoInitValueFunctor<
+ exe_space, value_type, InputIteratorType, OutputIteratorType,
+ BinaryOpType, unary_op_type>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+
+ ::Kokkos::parallel_scan(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(first_from, first_dest, binary_op, unary_op_type()));
+ teamHandle.team_barrier();
+
+ return first_dest + num_elements;
+}
+
+// -------------------------------------------------------------
+// inclusive_scan_custom_binary_op_impl with init_value
+// -------------------------------------------------------------
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class BinaryOpType, class ValueType>
+KOKKOS_FUNCTION OutputIteratorType inclusive_scan_custom_binary_op_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ BinaryOpType binary_op, ValueType init_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ static_assert(
+ ::Kokkos::is_detected_v<ex_scan_has_reduction_identity_sum_t, ValueType>,
+ "At the moment inclusive_scan doesn't support types without reduction "
+ "identity");
+
+ // #if defined(KOKKOS_ENABLE_CUDA)
+
+ // aliases
+ using exe_space = typename TeamHandleType::execution_space;
+ using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<ValueType>;
+ using func_type = TeamTransformInclusiveScanWithInitValueFunctor<
+ exe_space, ValueType, InputIteratorType, OutputIteratorType, BinaryOpType,
+ unary_op_type>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(first_from, first_dest, binary_op,
+ unary_op_type(), std::move(init_value)));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_IMPL_HPP
::Kokkos::reduction_identity<index_type>::min();
constexpr index_type m_red_id_max =
::Kokkos::reduction_identity<index_type>::max();
- auto rv = predicate_value ? red_value_type{i, m_red_id_min}
- : red_value_type{m_red_id_max, i};
+
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {m_red_id_max, i};
+ if (predicate_value) {
+ rv = {i, m_red_id_min};
+ }
m_reducer.join(redValue, rv);
}
};
template <class ExecutionSpace, class IteratorType, class PredicateType>
-bool is_partitioned_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- PredicateType pred) {
+bool is_partitioned_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, PredicateType pred) {
// true if all elements in the range [first, last) that satisfy
// the predicate "pred" appear before all elements that don't.
// Also returns true if [first, last) is empty.
const auto num_elements = Kokkos::Experimental::distance(first, last);
::Kokkos::parallel_reduce(label,
RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+
+ func_t(first, reducer, pred), reducer);
+
+ // fence not needed because reducing into scalar
+
+ // decide and return
+ constexpr index_type red_id_min =
+ ::Kokkos::reduction_identity<index_type>::min();
+ constexpr index_type red_id_max =
+ ::Kokkos::reduction_identity<index_type>::max();
+
+ if (red_result.max_loc_true != red_id_max &&
+ red_result.min_loc_false != red_id_min) {
+ // this occurs when the reduction yields nontrivial values
+ return red_result.max_loc_true < red_result.min_loc_false;
+ } else if (red_result.max_loc_true == red_id_max &&
+ red_result.min_loc_false == 0) {
+ // this occurs when all values do NOT satisfy
+ // the predicate, and this corner case should also be true
+ return true;
+ } else if (first + red_result.max_loc_true == --last) {
+ // this occurs when all values satisfy the predicate,
+ // this corner case should also be true
+ return true;
+ } else {
+ return false;
+ }
+}
+
+template <class TeamHandleType, class IteratorType, class PredicateType>
+KOKKOS_FUNCTION bool is_partitioned_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last,
+ PredicateType pred) {
+ /* see exespace impl for the description of the impl */
+
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ // trivial case
+ if (first == last) {
+ return true;
+ }
+
+ // aliases
+ using index_type = typename IteratorType::difference_type;
+ using reducer_type = StdIsPartitioned<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t =
+ StdIsPartitionedFunctor<IteratorType, reducer_type, PredicateType>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
func_t(first, reducer, pred), reducer);
// fence not needed because reducing into scalar
if (red_result.max_loc_true != red_id_max &&
red_result.min_loc_false != red_id_min) {
+ // this occurs when the reduction yields nontrivial values
return red_result.max_loc_true < red_result.min_loc_false;
+ } else if (red_result.max_loc_true == red_id_max &&
+ red_result.min_loc_false == 0) {
+ // this occurs when all values do NOT satisfy
+ // the predicate, and this corner case should also be true
+ return true;
} else if (first + red_result.max_loc_true == --last) {
+ // this occurs when all values satisfy the predicate,
+ // this corner case should also be true
return true;
} else {
return false;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_IS_SORTED_IMPL_HPP
: m_first(std::move(_first1)), m_comparator(std::move(comparator)) {}
};
+//
+// exespace impl
+//
template <class ExecutionSpace, class IteratorType, class ComparatorType>
-bool is_sorted_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- ComparatorType comp) {
+bool is_sorted_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::expect_valid_range(first, last);
}
template <class ExecutionSpace, class IteratorType>
-bool is_sorted_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
+bool is_sorted_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last) {
+ using value_type = typename IteratorType::value_type;
+ using pred_t = Impl::StdAlgoLessThanBinaryPredicate<value_type>;
+ return is_sorted_exespace_impl(label, ex, first, last, pred_t());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType, class ComparatorType>
+KOKKOS_FUNCTION bool is_sorted_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ ComparatorType comp) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ if (num_elements <= 1) {
+ return true;
+ }
+
+ // use num_elements-1 because each index handles i and i+1
+ const auto num_elements_minus_one = num_elements - 1;
+
+ // result is incremented by one if sorting breaks at index i
+ std::size_t result = 0;
+ ::Kokkos::parallel_reduce(
+ TeamThreadRange(teamHandle, 0, num_elements_minus_one),
+ // use CTAD here
+ StdIsSortedFunctor(first, std::move(comp)), result);
+
+ return result == 0;
+}
+
+template <class TeamHandleType, class IteratorType>
+KOKKOS_FUNCTION bool is_sorted_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last) {
using value_type = typename IteratorType::value_type;
using pred_t = Impl::StdAlgoLessThanBinaryPredicate<value_type>;
- return is_sorted_impl(label, ex, first, last, pred_t());
+ return is_sorted_team_impl(teamHandle, first, last, pred_t());
}
} // namespace Impl
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <std_algorithms/Kokkos_Find.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class ComparatorType, class ReducerType>
+struct StdIsSortedUntilFunctor {
+ using index_type = typename IteratorType::difference_type;
+ using value_type = typename ReducerType::value_type;
+
+ IteratorType m_first;
+ ComparatorType m_comparator;
+ ReducerType m_reducer;
+
+ KOKKOS_FUNCTION
+ void operator()(const index_type i, value_type& reduction_result) const {
+ const auto& val_i = m_first[i];
+ const auto& val_ip1 = m_first[i + 1];
+ if (m_comparator(val_ip1, val_i)) {
+ m_reducer.join(reduction_result, i);
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdIsSortedUntilFunctor(IteratorType first, ComparatorType comparator,
+ ReducerType reducer)
+ : m_first(std::move(first)),
+ m_comparator(std::move(comparator)),
+ m_reducer(std::move(reducer)) {}
+};
+
+//
+// overloads accepting exespace
+//
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+IteratorType is_sorted_until_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last,
+ ComparatorType comp) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+
+ // trivial case
+ if (num_elements <= 1) {
+ return last;
+ }
+
+ /*
+ Do a par_reduce computing the *min* index that breaks the sorting.
+ If such an index is found, then the range is sorted until that element.
+ If no such index is found, then the range is sorted until the end.
+ */
+ using index_type = typename IteratorType::difference_type;
+ index_type reduction_result;
+ ::Kokkos::Min<index_type> reducer(reduction_result);
+ ::Kokkos::parallel_reduce(
+ label,
+ // use num_elements-1 because each index handles i and i+1
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements - 1),
+ StdIsSortedUntilFunctor(first, comp, reducer), reducer);
+
+ /* If the reduction result is equal to the initial value,
+ it means the range is sorted until the end */
+ index_type reduction_result_init;
+ reducer.init(reduction_result_init);
+ if (reduction_result == reduction_result_init) {
+ return last;
+ } else {
+ /* If such an index is found, then the range is sorted until there and
+ we need to return an iterator past the element found so do +1 */
+ return first + (reduction_result + 1);
+ }
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType is_sorted_until_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last) {
+ using value_type = typename IteratorType::value_type;
+ using pred_t = Impl::StdAlgoLessThanBinaryPredicate<value_type>;
+ return is_sorted_until_exespace_impl(label, ex, first, last, pred_t());
+}
+
+//
+// overloads accepting team handle
+//
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+KOKKOS_FUNCTION IteratorType
+is_sorted_until_team_impl(const ExecutionSpace& teamHandle, IteratorType first,
+ IteratorType last, ComparatorType comp) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+
+ // trivial case
+ if (num_elements <= 1) {
+ return last;
+ }
+
+ /*
+ Do a par_reduce computing the *min* index that breaks the sorting.
+ If one such index is found, then the range is sorted until that element,
+ if no such index is found, then it means the range is sorted until the end.
+ */
+ using index_type = typename IteratorType::difference_type;
+ index_type red_result;
+ index_type red_result_init;
+ ::Kokkos::Min<index_type> reducer(red_result);
+ reducer.init(red_result_init);
+ ::Kokkos::parallel_reduce( // use num_elements-1 because each index handles i
+ // and i+1
+ TeamThreadRange(teamHandle, 0, num_elements - 1),
+ StdIsSortedUntilFunctor(first, comp, reducer), reducer);
+ teamHandle.team_barrier();
+
+ /* If the reduction result is equal to the initial value,
+ and it means the range is sorted until the end */
+ if (red_result == red_result_init) {
+ return last;
+ } else {
+ /* If such index is found, then the range is sorted until there and
+ we need to return an iterator past the element found so do +1 */
+ return first + (red_result + 1);
+ }
+}
+
+template <class ExecutionSpace, class IteratorType>
+KOKKOS_FUNCTION IteratorType is_sorted_until_team_impl(
+ const ExecutionSpace& teamHandle, IteratorType first, IteratorType last) {
+ using value_type = typename IteratorType::value_type;
+ using pred_t = Impl::StdAlgoLessThanBinaryPredicate<value_type>;
+ return is_sorted_until_team_impl(teamHandle, first, last, pred_t());
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_IMPL_HPP
const auto& my_value1 = m_first1[i];
const auto& my_value2 = m_first2[i];
- bool different = m_comparator(my_value1, my_value2) ||
- m_comparator(my_value2, my_value1);
- auto rv =
- different
- ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+ const bool different = m_comparator(my_value1, my_value2) ||
+ m_comparator(my_value2, my_value1);
+
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {::Kokkos::reduction_identity<IndexType>::min()};
+ if (different) {
+ rv.min_loc_true = i;
+ }
m_reducer.join(red_value, rv);
}
m_comparator(std::move(_comp)) {}
};
+//
+// exespace impl
+//
template <class ExecutionSpace, class IteratorType1, class IteratorType2,
class ComparatorType>
-bool lexicographical_compare_impl(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, IteratorType2 last2,
- ComparatorType comp) {
+bool lexicographical_compare_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
+ ComparatorType comp) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first1, first2);
Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
}
template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-bool lexicographical_compare_impl(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType1 first1, IteratorType1 last1,
- IteratorType2 first2, IteratorType2 last2) {
+bool lexicographical_compare_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
+ using value_type_1 = typename IteratorType1::value_type;
+ using value_type_2 = typename IteratorType2::value_type;
+ using predicate_t =
+ Impl::StdAlgoLessThanBinaryPredicate<value_type_1, value_type_2>;
+ return lexicographical_compare_exespace_impl(label, ex, first1, last1, first2,
+ last2, predicate_t());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class ComparatorType>
+KOKKOS_FUNCTION bool lexicographical_compare_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2, ComparatorType comp) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, first2);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+ Impl::expect_valid_range(first2, last2);
+
+ // aliases
+ using index_type = typename IteratorType1::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ // run
+ const auto d1 = Kokkos::Experimental::distance(first1, last1);
+ const auto d2 = Kokkos::Experimental::distance(first2, last2);
+ const auto range = Kokkos::min(d1, d2);
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+ using func1_t =
+ StdLexicographicalCompareFunctor<index_type, IteratorType1, IteratorType2,
+ reducer_type, ComparatorType>;
+
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, range),
+ func1_t(first1, first2, reducer, comp), reducer);
+
+ teamHandle.team_barrier();
+
+ // no mismatch
+ if (red_result.min_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ auto new_last1 = first1 + range;
+ auto new_last2 = first2 + range;
+ bool is_prefix = (new_last1 == last1) && (new_last2 != last2);
+ return is_prefix;
+ }
+
+ // check mismatched
+ int less = 0;
+ auto it1 = first1 + red_result.min_loc_true;
+ auto it2 = first2 + red_result.min_loc_true;
+ using func2_t = StdCompareFunctor<index_type, IteratorType1, IteratorType2,
+ ComparatorType>;
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, 1),
+ func2_t(it1, it2, comp), less);
+
+ teamHandle.team_barrier();
+
+ return static_cast<bool>(less);
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION bool lexicographical_compare_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2) {
using value_type_1 = typename IteratorType1::value_type;
using value_type_2 = typename IteratorType2::value_type;
using predicate_t =
Impl::StdAlgoLessThanBinaryPredicate<value_type_1, value_type_2>;
- return lexicographical_compare_impl(label, ex, first1, last1, first2, last2,
- predicate_t());
+ return lexicographical_compare_team_impl(teamHandle, first1, last1, first2,
+ last2, predicate_t());
}
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_MIN_MAX_MINMAX_ELEMENT_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_MIN_MAX_MINMAX_ELEMENT_IMPL_HPP
: m_first(std::move(first)), m_reducer(std::move(reducer)) {}
};
+//
+// exespace impl
+//
template <template <class... Args> class ReducerType, class ExecutionSpace,
class IteratorType, class... Args>
-IteratorType min_or_max_element_impl(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- Args&&... args) {
+IteratorType min_or_max_element_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last,
+ Args&&... args) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::expect_valid_range(first, last);
template <template <class... Args> class ReducerType, class ExecutionSpace,
class IteratorType, class... Args>
-::Kokkos::pair<IteratorType, IteratorType> minmax_element_impl(
+::Kokkos::pair<IteratorType, IteratorType> minmax_element_exespace_impl(
const std::string& label, const ExecutionSpace& ex, IteratorType first,
IteratorType last, Args&&... args) {
// checks
return {first + red_result.min_loc, first + red_result.max_loc};
}
+//
+// team level impl
+//
+template <template <class... Args> class ReducerType, class TeamHandleType,
+ class IteratorType, class... Args>
+KOKKOS_FUNCTION IteratorType min_or_max_element_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ Args&&... args) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ return last;
+ }
+
+ // aliases
+ using index_type = typename IteratorType::difference_type;
+ using value_type = typename IteratorType::value_type;
+ using reducer_type = ReducerType<value_type, index_type, Args...>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t = StdMinOrMaxElemFunctor<IteratorType, reducer_type>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result, std::forward<Args>(args)...);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ func_t(first, reducer), reducer);
+ teamHandle.team_barrier();
+ // maybe the barrier is not needed since reducing into scalar?
+
+ // return
+ return first + red_result.loc;
+}
+
+template <template <class... Args> class ReducerType, class TeamHandleType,
+ class IteratorType, class... Args>
+KOKKOS_FUNCTION ::Kokkos::pair<IteratorType, IteratorType>
+minmax_element_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, Args&&... args) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ return {first, first};
+ }
+
+ // aliases
+ using index_type = typename IteratorType::difference_type;
+ using value_type = typename IteratorType::value_type;
+ using reducer_type = ReducerType<value_type, index_type, Args...>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t = StdMinMaxElemFunctor<IteratorType, reducer_type>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result, std::forward<Args>(args)...);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ func_t(first, reducer), reducer);
+ teamHandle.team_barrier();
+ // maybe the barrier is not needed since reducing into scalar?
+
+ // return
+ return {first + red_result.min_loc, first + red_result.max_loc};
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MISMATCH_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_MISMATCH_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType1, class IteratorType2, class ReducerType,
+ class BinaryPredicateType>
+struct StdMismatchRedFunctor {
+ using index_type = typename IteratorType1::difference_type;
+ using red_value_type = typename ReducerType::value_type;
+
+ IteratorType1 m_first1;
+ IteratorType2 m_first2;
+ ReducerType m_reducer;
+ BinaryPredicateType m_predicate;
+
+ KOKKOS_FUNCTION
+ void operator()(const index_type i, red_value_type& red_value) const {
+ const auto& my_value1 = m_first1[i];
+ const auto& my_value2 = m_first2[i];
+
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {i};
+ if (m_predicate(my_value1, my_value2)) {
+ rv = {::Kokkos::reduction_identity<index_type>::min()};
+ }
+
+ m_reducer.join(red_value, rv);
+ }
+
+ KOKKOS_FUNCTION
+ StdMismatchRedFunctor(IteratorType1 first1, IteratorType2 first2,
+ ReducerType reducer, BinaryPredicateType predicate)
+ : m_first1(std::move(first1)),
+ m_first2(std::move(first2)),
+ m_reducer(std::move(reducer)),
+ m_predicate(std::move(predicate)) {}
+};
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
+ BinaryPredicateType predicate) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+ Impl::expect_valid_range(first2, last2);
+
+ // aliases
+ using return_type = ::Kokkos::pair<IteratorType1, IteratorType2>;
+ using index_type = typename IteratorType1::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ // trivial case: note that this is important,
+ // for OpenMPTarget, omitting special handling of
+ // the trivial case was giving all sorts of strange stuff.
+ const auto num_e1 = last1 - first1;
+ const auto num_e2 = last2 - first2;
+ if (num_e1 == 0 || num_e2 == 0) {
+ return return_type(first1, first2);
+ }
+
+ // run
+ const auto num_elemen_par_reduce = (num_e1 <= num_e2) ? num_e1 : num_e2;
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+ ::Kokkos::parallel_reduce(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elemen_par_reduce),
+ // use CTAD
+ StdMismatchRedFunctor(first1, first2, reducer, std::move(predicate)),
+ reducer);
+
+ // fence not needed because reducing into scalar
+
+ // decide and return
+ constexpr auto red_min = ::Kokkos::reduction_identity<index_type>::min();
+ if (red_result.min_loc_true == red_min) {
+ // in here means mismatch has not been found
+ if (num_e1 == num_e2) {
+ return return_type(last1, last2);
+ } else if (num_e1 < num_e2) {
+ return return_type(last1, first2 + num_e1);
+ } else {
+ return return_type(first1 + num_e2, last2);
+ }
+ } else {
+ // in here means mismatch has been found
+ return return_type(first1 + red_result.min_loc_true,
+ first2 + red_result.min_loc_true);
+ }
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return mismatch_exespace_impl(label, ex, first1, last1, first2, last2,
+ pred_t());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+KOKKOS_FUNCTION ::Kokkos::pair<IteratorType1, IteratorType2> mismatch_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2, BinaryPredicateType predicate) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, first2);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+ Impl::expect_valid_range(first2, last2);
+
+ // aliases
+ using return_type = ::Kokkos::pair<IteratorType1, IteratorType2>;
+ using index_type = typename IteratorType1::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ // trivial case: note that this is important,
+ // for OpenMPTarget, omitting special handling of
+ // the trivial case was giving all sorts of strange stuff.
+ const auto num_e1 = last1 - first1;
+ const auto num_e2 = last2 - first2;
+ if (num_e1 == 0 || num_e2 == 0) {
+ return return_type(first1, first2);
+ }
+
+ // run
+ const auto num_elemen_par_reduce = (num_e1 <= num_e2) ? num_e1 : num_e2;
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+ ::Kokkos::parallel_reduce(
+ TeamThreadRange(teamHandle, 0, num_elemen_par_reduce),
+ // use CTAD
+ StdMismatchRedFunctor(first1, first2, reducer, std::move(predicate)),
+ reducer);
+
+ teamHandle.team_barrier();
+
+ // decide and return
+ constexpr auto red_min = ::Kokkos::reduction_identity<index_type>::min();
+ if (red_result.min_loc_true == red_min) {
+ // in here means mismatch has not been found
+ if (num_e1 == num_e2) {
+ return return_type(last1, last2);
+ } else if (num_e1 < num_e2) {
+ return return_type(last1, first2 + num_e1);
+ } else {
+ return return_type(first1 + num_e2, last2);
+ }
+ } else {
+ // in here means mismatch has been found
+ return return_type(first1 + red_result.min_loc_true,
+ first2 + red_result.min_loc_true);
+ }
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION ::Kokkos::pair<IteratorType1, IteratorType2> mismatch_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, IteratorType2 last2) {
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return mismatch_team_impl(teamHandle, first1, last1, first2, last2, pred_t());
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIterator, class OutputIterator>
+struct StdMoveFunctor {
+ InputIterator m_first;
+ OutputIterator m_dest_first;
+
+ KOKKOS_FUNCTION
+ void operator()(IndexType i) const {
+ m_dest_first[i] = std::move(m_first[i]);
+ }
+
+ KOKKOS_FUNCTION StdMoveFunctor(InputIterator _first,
+ OutputIterator _dest_first)
+ : m_first(std::move(_first)), m_dest_first(std::move(_dest_first)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator move_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // aliases
+ using index_type = typename InputIterator::difference_type;
+ using func_t = StdMoveFunctor<index_type, InputIterator, OutputIterator>;
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ func_t(first, d_first));
+ ex.fence("Kokkos::move: fence after operation");
+
+ // return
+ return d_first + num_elements;
+}
+
+template <class TeamHandleType, class InputIterator, class OutputIterator>
+KOKKOS_FUNCTION OutputIterator move_team_impl(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator last,
+ OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // aliases
+ using index_type = typename InputIterator::difference_type;
+ using func_t = StdMoveFunctor<index_type, InputIterator, OutputIterator>;
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ func_t(first, d_first));
+ teamHandle.team_barrier();
+
+ // return
+ return d_first + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType1, class IteratorType2>
+struct StdMoveBackwardFunctor {
+ using index_type = typename IteratorType1::difference_type;
+ static_assert(std::is_signed_v<index_type>,
+ "Kokkos: StdMoveBackwardFunctor requires signed index type");
+
+ IteratorType1 m_last;
+ IteratorType2 m_dest_last;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ m_dest_last[-i - 1] = std::move(m_last[-i - 1]);
+ }
+
+ KOKKOS_FUNCTION
+ StdMoveBackwardFunctor(IteratorType1 _last, IteratorType2 _dest_last)
+ : m_last(std::move(_last)), m_dest_last(std::move(_dest_last)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 move_backward_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType1 first,
+ IteratorType1 last,
+ IteratorType2 d_last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first, d_last);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdMoveBackwardFunctor(last, d_last));
+ ex.fence("Kokkos::move_backward: fence after operation");
+
+ // return
+ return d_last - num_elements;
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION IteratorType2
+move_backward_team_impl(const TeamHandleType& teamHandle, IteratorType1 first,
+ IteratorType1 last, IteratorType2 d_last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_last);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdMoveBackwardFunctor(last, d_last));
+ teamHandle.team_barrier();
+
+ // return
+ return d_last - num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_MUSTUSEKOKKOSSINGLEINTEAM_HPP
+#define KOKKOS_STD_ALGORITHMS_MUSTUSEKOKKOSSINGLEINTEAM_HPP
+
+#include <Kokkos_Core.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <typename T>
+struct stdalgo_must_use_kokkos_single_for_team_scan : std::false_type {};
+
+// the following do not support the overload for team-level scan
+// accepting an "out" value to store the scan result
+
+// FIXME_OPENACC
+#if defined(KOKKOS_ENABLE_OPENACC)
+template <>
+struct stdalgo_must_use_kokkos_single_for_team_scan<
+ Kokkos::Experimental::OpenACC> : std::true_type {};
+#endif
+
+template <typename T>
+inline constexpr bool stdalgo_must_use_kokkos_single_for_team_scan_v =
+ stdalgo_must_use_kokkos_single_for_team_scan<T>::value;
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_COPY_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_PARTITION_COPY_IMPL_HPP
struct StdPartitionCopyScalar {
ValueType true_count_;
ValueType false_count_;
-
- // Here we implement the copy assignment operators explicitly for consistency
- // with how the Scalar structs are implemented inside
- // Kokkos_Parallel_Reduce.hpp.
- KOKKOS_FUNCTION
- void operator=(const StdPartitionCopyScalar& other) {
- true_count_ = other.true_count_;
- false_count_ = other.false_count_;
- }
-
- // this is needed for
- // OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp:699:21: error: no viable
- // overloaded '=' m_returnvalue = 0;
- //
- KOKKOS_FUNCTION
- void operator=(const ValueType value) {
- true_count_ = value;
- false_count_ = value;
- }
};
-template <class IndexType, class FirstFrom, class FirstDestTrue,
- class FirstDestFalse, class PredType>
+template <class FirstFrom, class FirstDestTrue, class FirstDestFalse,
+ class PredType>
struct StdPartitionCopyFunctor {
- using value_type = StdPartitionCopyScalar<IndexType>;
+ using index_type = typename FirstFrom::difference_type;
+ using value_type = StdPartitionCopyScalar<index_type>;
FirstFrom m_first_from;
FirstDestTrue m_first_dest_true;
m_pred(std::move(pred)) {}
KOKKOS_FUNCTION
- void operator()(const IndexType i, value_type& update,
+ void operator()(const index_type i, value_type& update,
const bool final_pass) const {
const auto& myval = m_first_from[i];
if (final_pass) {
class OutputIteratorTrueType, class OutputIteratorFalseType,
class PredicateType>
::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType>
-partition_copy_impl(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType from_first, InputIteratorType from_last,
- OutputIteratorTrueType to_first_true,
- OutputIteratorFalseType to_first_false,
- PredicateType pred) {
+partition_copy_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType from_first,
+ InputIteratorType from_last,
+ OutputIteratorTrueType to_first_true,
+ OutputIteratorFalseType to_first_false,
+ PredicateType pred) {
// impl uses a scan, this is similar how we implemented copy_if
// checks
return {to_first_true, to_first_false};
}
- // aliases
- using index_type = typename InputIteratorType::difference_type;
using func_type =
- StdPartitionCopyFunctor<index_type, InputIteratorType,
- OutputIteratorTrueType, OutputIteratorFalseType,
- PredicateType>;
+ StdPartitionCopyFunctor<InputIteratorType, OutputIteratorTrueType,
+ OutputIteratorFalseType, PredicateType>;
// run
const auto num_elements =
to_first_false + counts.false_count_};
}
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorTrueType, class OutputIteratorFalseType,
+ class PredicateType>
+KOKKOS_FUNCTION ::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType>
+partition_copy_team_impl(const TeamHandleType& teamHandle,
+ InputIteratorType from_first,
+ InputIteratorType from_last,
+ OutputIteratorTrueType to_first_true,
+ OutputIteratorFalseType to_first_false,
+ PredicateType pred) {
+ // impl uses a scan, this is similar how we implemented copy_if
+
+ // checks
+ Impl::static_assert_random_access_and_accessible(
+ teamHandle, from_first, to_first_true, to_first_false);
+ Impl::static_assert_iterators_have_matching_difference_type(
+ from_first, to_first_true, to_first_false);
+ Impl::expect_valid_range(from_first, from_last);
+
+ if (from_first == from_last) {
+ return {to_first_true, to_first_false};
+ }
+
+ const std::size_t num_elements =
+ Kokkos::Experimental::distance(from_first, from_last);
+
+ // FIXME: there is no parallel_scan overload that accepts TeamThreadRange and
+ // return_value, so temporarily serial implementation is used
+ using counts_t = ::Kokkos::pair<std::size_t, std::size_t>;
+ counts_t counts = {};
+ Kokkos::single(
+ Kokkos::PerTeam(teamHandle),
+ [=](counts_t& lcounts) {
+ lcounts = {};
+ for (std::size_t i = 0; i < num_elements; ++i) {
+ const auto& myval = from_first[i];
+ if (pred(myval)) {
+ to_first_true[lcounts.first++] = myval;
+ } else {
+ to_first_false[lcounts.second++] = myval;
+ }
+ }
+ },
+ counts);
+ // no barrier needed since single above broadcasts to all members
+
+ return {to_first_true + counts.first, to_first_false + counts.second};
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_POINT_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_PARTITION_POINT_IMPL_HPP
KOKKOS_FUNCTION
void operator()(const index_type i, red_value_type& redValue) const {
const auto predicate_value = m_p(m_first[i]);
- auto rv =
- predicate_value
- ? red_value_type{::Kokkos::reduction_identity<index_type>::min()}
- : red_value_type{i};
+
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {i};
+ if (predicate_value) {
+ rv = {::Kokkos::reduction_identity<index_type>::min()};
+ }
+
m_reducer.join(redValue, rv);
}
};
template <class ExecutionSpace, class IteratorType, class PredicateType>
-IteratorType partition_point_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last, PredicateType pred) {
+IteratorType partition_point_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first,
+ IteratorType last,
+ PredicateType pred) {
// locates the end of the first partition, that is, the first
// element that does not satisfy p or last if all elements satisfy p.
// Implementation below finds the first location where p is false.
}
}
+template <class TeamHandleType, class IteratorType, class PredicateType>
+KOKKOS_FUNCTION IteratorType
+partition_point_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ return first;
+ }
+
+ // aliases
+ using index_type = typename IteratorType::difference_type;
+ using reducer_type = StdPartitionPoint<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ StdPartitionPointFunctor(first, reducer, pred),
+ reducer);
+
+ // fence not needed because reducing into scalar
+
+ // decide and return
+ if (red_result.min_loc_false ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ // if all elements are true, return last
+ return last;
+ } else {
+ return first + red_result.min_loc_false;
+ }
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_RANDOM_ACCESS_ITERATOR_IMPL_HPP
#define KOKKOS_RANDOM_ACCESS_ITERATOR_IMPL_HPP
using iterator_type = RandomAccessIterator<view_type>;
using iterator_category = std::random_access_iterator_tag;
- using value_type = typename view_type::value_type;
+ using value_type = typename view_type::non_const_value_type;
using difference_type = ptrdiff_t;
using pointer = typename view_type::pointer_type;
using reference = typename view_type::reference_type;
static_assert(view_type::rank == 1 &&
- (std::is_same<typename view_type::traits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename view_type::traits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename view_type::traits::array_layout,
- Kokkos::LayoutStride>::value),
+ (std::is_same_v<typename view_type::traits::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename view_type::traits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename view_type::traits::array_layout,
+ Kokkos::LayoutStride>),
"RandomAccessIterator only supports 1D Views with LayoutLeft, "
"LayoutRight, LayoutStride.");
ptrdiff_t current_index)
: m_view(view), m_current_index(current_index) {}
+#ifndef KOKKOS_ENABLE_CXX17 // C++20 and beyond
+ template <class OtherViewType>
+ requires(std::is_constructible_v<view_type, OtherViewType>)
+ KOKKOS_FUNCTION explicit(!std::is_convertible_v<OtherViewType, view_type>)
+ RandomAccessIterator(const RandomAccessIterator<OtherViewType>& other)
+ : m_view(other.m_view), m_current_index(other.m_current_index) {}
+#else
+ template <
+ class OtherViewType,
+ std::enable_if_t<std::is_constructible_v<view_type, OtherViewType> &&
+ !std::is_convertible_v<OtherViewType, view_type>,
+ int> = 0>
+ KOKKOS_FUNCTION explicit RandomAccessIterator(
+ const RandomAccessIterator<OtherViewType>& other)
+ : m_view(other.m_view), m_current_index(other.m_current_index) {}
+
+ template <class OtherViewType,
+ std::enable_if_t<std::is_convertible_v<OtherViewType, view_type>,
+ int> = 0>
+ KOKKOS_FUNCTION RandomAccessIterator(
+ const RandomAccessIterator<OtherViewType>& other)
+ : m_view(other.m_view), m_current_index(other.m_current_index) {}
+#endif
+
KOKKOS_FUNCTION
iterator_type& operator++() {
++m_current_index;
KOKKOS_FUNCTION
reference operator*() const { return m_view(m_current_index); }
+ KOKKOS_FUNCTION
+ view_type view() const { return m_view; }
+
private:
view_type m_view;
ptrdiff_t m_current_index = 0;
+
+ // Needed for the converting constructor accepting another iterator
+ template <class>
+ friend class RandomAccessIterator;
};
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_REDUCE_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_REDUCE_IMPL_HPP
: m_first(std::move(first)), m_reducer(std::move(reducer)) {}
};
-//------------------------------
-// reduce_custom_functors_impl
-//------------------------------
+template <typename ValueType>
+using has_reduction_identity_sum_t =
+ decltype(Kokkos::reduction_identity<ValueType>::sum());
+
+//
+// exespace impl
+//
+
+//-------------------------------------
+// reduce_custom_functors_exespace_impl
+//-------------------------------------
template <class ExecutionSpace, class IteratorType, class ValueType,
class JoinerType>
-ValueType reduce_custom_functors_impl(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- ValueType init_reduction_value,
- JoinerType joiner) {
+ValueType reduce_custom_functors_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ValueType init_reduction_value, JoinerType joiner) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::static_assert_is_not_openmptarget(ex);
// aliases
using reducer_type =
ReducerWithArbitraryJoinerNoNeutralElement<ValueType, JoinerType>;
- using functor_type = StdReduceFunctor<IteratorType, reducer_type>;
using reduction_value_type = typename reducer_type::value_type;
// run
const auto num_elements = Kokkos::Experimental::distance(first, last);
::Kokkos::parallel_reduce(label,
RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- functor_type(first, reducer), reducer);
+ StdReduceFunctor(first, reducer), reducer);
// fence not needed since reducing into scalar
return joiner(result.val, init_reduction_value);
}
-template <typename ValueType>
-using has_reduction_identity_sum_t =
- decltype(Kokkos::reduction_identity<ValueType>::sum());
-
template <class ExecutionSpace, class IteratorType, class ValueType>
-ValueType reduce_default_functors_impl(const std::string& label,
- const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- ValueType init_reduction_value) {
+ValueType reduce_default_functors_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, ValueType init_reduction_value) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::static_assert_is_not_openmptarget(ex);
return tmp;
} else {
using joiner_type = Impl::StdReduceDefaultJoinFunctor<value_type>;
- return reduce_custom_functors_impl(
+ return reduce_custom_functors_exespace_impl(
label, ex, first, last, std::move(init_reduction_value), joiner_type());
}
}
+//
+// team impl
+//
+
+//---------------------------------
+// reduce_custom_functors_team_impl
+//---------------------------------
+template <class TeamHandleType, class IteratorType, class ValueType,
+ class JoinerType>
+KOKKOS_FUNCTION ValueType reduce_custom_functors_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ ValueType init_reduction_value, JoinerType joiner) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ // init is returned, unmodified
+ return init_reduction_value;
+ }
+
+ // aliases
+ using reducer_type =
+ ReducerWithArbitraryJoinerNoNeutralElement<ValueType, JoinerType>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ // run
+ reduction_value_type result;
+ reducer_type reducer(result, joiner);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ StdReduceFunctor(first, reducer), reducer);
+
+ teamHandle.team_barrier();
+
+ return joiner(result.val, init_reduction_value);
+}
+
+template <class TeamHandleType, class IteratorType, class ValueType>
+KOKKOS_FUNCTION ValueType reduce_default_functors_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ ValueType init_reduction_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::expect_valid_range(first, last);
+
+ using value_type = Kokkos::Impl::remove_cvref_t<ValueType>;
+
+ if (::Kokkos::is_detected<has_reduction_identity_sum_t, value_type>::value) {
+ if (first == last) {
+ // init is returned, unmodified
+ return init_reduction_value;
+ }
+
+ using functor_type =
+ Impl::StdReduceDefaultFunctor<IteratorType, value_type>;
+
+ // run
+ value_type tmp;
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ functor_type{first}, tmp);
+
+ teamHandle.team_barrier();
+
+ tmp += init_reduction_value;
+ return tmp;
+ } else {
+ using joiner_type = Impl::StdReduceDefaultJoinFunctor<value_type>;
+ return reduce_custom_functors_team_impl(teamHandle, first, last,
+ std::move(init_reduction_value),
+ joiner_type());
+ }
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_REDUCER_WITH_ARBITRARY_JOINER_NONEUTRAL_ELEMENT_HPP
#define KOKKOS_STD_ALGORITHMS_REDUCER_WITH_ARBITRARY_JOINER_NONEUTRAL_ELEMENT_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_REMOVE_IMPL_HPP
void operator()(const IndexType i, IndexType& update,
const bool final_pass) const {
auto& myval = m_first_from[i];
- if (final_pass) {
- if (!m_must_remove(myval)) {
+
+ if (!m_must_remove(myval)) {
+ if (final_pass) {
// calling move here is ok because we are inside final pass
// we are calling move assign as specified by the std
m_first_dest[update] = std::move(myval);
}
- }
- if (!m_must_remove(myval)) {
update += 1;
}
}
}
};
+//
+// remove if
+//
template <class ExecutionSpace, class IteratorType, class UnaryPredicateType>
-IteratorType remove_if_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- UnaryPredicateType pred) {
+IteratorType remove_if_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ UnaryPredicateType pred) {
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::expect_valid_range(first, last);
// create helper tmp view
using value_type = typename IteratorType::value_type;
using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
- tmp_view_type tmp_view("std_remove_if_tmp_view", keep_count);
+ tmp_view_type tmp_view(Kokkos::view_alloc(Kokkos::WithoutInitializing, ex,
+ "std_remove_if_tmp_view"),
+ keep_count);
using tmp_readwrite_iterator_type = decltype(begin(tmp_view));
// in stage 1, *move* all elements to keep from original range to tmp
}
}
+template <class TeamHandleType, class IteratorType, class UnaryPredicateType>
+KOKKOS_FUNCTION IteratorType
+remove_if_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, UnaryPredicateType pred) {
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ return last;
+ } else {
+ const auto remove_count =
+ ::Kokkos::Experimental::count_if(teamHandle, first, last, pred);
+ const std::size_t num_elements =
+ ::Kokkos::Experimental::distance(first, last);
+
+ if (remove_count > 0) {
+ std::size_t count = 0;
+ Kokkos::single(
+ Kokkos::PerTeam(teamHandle),
+ [=](std::size_t& lcount) {
+ lcount = 0;
+ for (std::size_t i = 0; i < num_elements; ++i) {
+ if (!pred(first[i])) {
+ first[lcount++] = std::move(first[i]);
+ }
+ }
+ },
+ count);
+ }
+ // no barrier needed since single above broadcasts to all members
+
+ return first + num_elements - remove_count;
+ }
+}
+
+//
+// remove
+//
template <class ExecutionSpace, class IteratorType, class ValueType>
-auto remove_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- const ValueType& value) {
+auto remove_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ const ValueType& value) {
+ using predicate_type = StdAlgoEqualsValUnaryPredicate<ValueType>;
+ return remove_if_exespace_impl(label, ex, first, last, predicate_type(value));
+}
+
+template <class TeamHandleType, class IteratorType, class ValueType>
+KOKKOS_FUNCTION auto remove_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ const ValueType& value) {
using predicate_type = StdAlgoEqualsValUnaryPredicate<ValueType>;
- return remove_if_impl(label, ex, first, last, predicate_type(value));
+ return remove_if_team_impl(teamHandle, first, last, predicate_type(value));
}
+//
+// remove_copy
+//
template <class ExecutionSpace, class InputIteratorType,
class OutputIteratorType, class ValueType>
-auto remove_copy_impl(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first_from, InputIteratorType last_from,
- OutputIteratorType first_dest, const ValueType& value) {
+auto remove_copy_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ const ValueType& value) {
// this is like copy_if except that we need to *ignore* the elements
// that match the value, so we can solve this as follows:
first_dest, predicate_type(value));
}
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class ValueType>
+KOKKOS_FUNCTION auto remove_copy_team_impl(const TeamHandleType& teamHandle,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ const ValueType& value) {
+ // this is like copy_if except that we need to *ignore* the elements
+ // that match the value, so we can solve this as follows:
+
+ using predicate_type = StdAlgoNotEqualsValUnaryPredicate<ValueType>;
+ return ::Kokkos::Experimental::copy_if(teamHandle, first_from, last_from,
+ first_dest, predicate_type(value));
+}
+
+//
+// remove_copy_if
+//
template <class ExecutionSpace, class InputIteratorType,
class OutputIteratorType, class UnaryPredicate>
-auto remove_copy_if_impl(const std::string& label, const ExecutionSpace& ex,
- InputIteratorType first_from,
- InputIteratorType last_from,
- OutputIteratorType first_dest,
- const UnaryPredicate& pred) {
+auto remove_copy_if_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ const UnaryPredicate& pred) {
// this is like copy_if except that we need to *ignore* the elements
// satisfying the pred, so we can solve this as follows:
first_dest, pred_wrapper_type(pred));
}
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class UnaryPredicate>
+KOKKOS_FUNCTION auto remove_copy_if_team_impl(const TeamHandleType& teamHandle,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ const UnaryPredicate& pred) {
+ using value_type = typename InputIteratorType::value_type;
+ using pred_wrapper_type =
+ StdAlgoNegateUnaryPredicateWrapper<value_type, UnaryPredicate>;
+ return ::Kokkos::Experimental::copy_if(teamHandle, first_from, last_from,
+ first_dest, pred_wrapper_type(pred));
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class ValueType>
+struct StdReplaceFunctor {
+ using index_type = typename InputIterator::difference_type;
+ InputIterator m_first;
+ ValueType m_old_value;
+ ValueType m_new_value;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ if (m_first[i] == m_old_value) {
+ m_first[i] = m_new_value;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdReplaceFunctor(InputIterator first, ValueType old_value,
+ ValueType new_value)
+ : m_first(std::move(first)),
+ m_old_value(std::move(old_value)),
+ m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class ValueType>
+void replace_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ const ValueType& old_value,
+ const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdReplaceFunctor(first, old_value, new_value));
+ ex.fence("Kokkos::replace: fence after operation");
+}
+
+template <class TeamHandleType, class IteratorType, class ValueType>
+KOKKOS_FUNCTION void replace_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ const ValueType& old_value,
+ const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdReplaceFunctor(first, old_value, new_value));
+ teamHandle.team_barrier();
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class OutputIterator, class ValueType>
+struct StdReplaceCopyFunctor {
+ using index_type = typename InputIterator::difference_type;
+
+ InputIterator m_first_from;
+ OutputIterator m_first_dest;
+ ValueType m_old_value;
+ ValueType m_new_value;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ const auto& myvalue_from = m_first_from[i];
+
+ if (myvalue_from == m_old_value) {
+ m_first_dest[i] = m_new_value;
+ } else {
+ m_first_dest[i] = myvalue_from;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdReplaceCopyFunctor(InputIterator first_from, OutputIterator first_dest,
+ ValueType old_value, ValueType new_value)
+ : m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_old_value(std::move(old_value)),
+ m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class ValueType>
+OutputIteratorType replace_copy_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ const ValueType& old_value,
+ const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_for(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdReplaceCopyFunctor(first_from, first_dest, old_value, new_value));
+ ex.fence("Kokkos::replace_copy: fence after operation");
+
+ // return
+ return first_dest + num_elements;
+}
+
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class ValueType>
+KOKKOS_FUNCTION OutputIteratorType replace_copy_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ const ValueType& old_value, const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_for(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ StdReplaceCopyFunctor(first_from, first_dest, old_value, new_value));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class OutputIterator, class PredicateType,
+ class ValueType>
+struct StdReplaceIfCopyFunctor {
+ using index_type = typename InputIterator::difference_type;
+
+ InputIterator m_first_from;
+ OutputIterator m_first_dest;
+ PredicateType m_pred;
+ ValueType m_new_value;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ const auto& myvalue_from = m_first_from[i];
+
+ if (m_pred(myvalue_from)) {
+ m_first_dest[i] = m_new_value;
+ } else {
+ m_first_dest[i] = myvalue_from;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdReplaceIfCopyFunctor(InputIterator first_from, OutputIterator first_dest,
+ PredicateType pred, ValueType new_value)
+ : m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_pred(std::move(pred)),
+ m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class PredicateType, class ValueType>
+OutputIteratorType replace_copy_if_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIteratorType first_from,
+ InputIteratorType last_from,
+ OutputIteratorType first_dest,
+ PredicateType pred,
+ const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ // use CTAD
+ StdReplaceIfCopyFunctor(first_from, first_dest,
+ std::move(pred), new_value));
+ ex.fence("Kokkos::replace_copy_if: fence after operation");
+
+ // return
+ return first_dest + num_elements;
+}
+
+//
+// team-level impl
+//
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class PredicateType, class ValueType>
+KOKKOS_FUNCTION OutputIteratorType replace_copy_if_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ PredicateType pred, const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ // use CTAD
+ StdReplaceIfCopyFunctor(first_from, first_dest,
+ std::move(pred), new_value));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_IF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class PredicateType, class NewValueType>
+struct StdReplaceIfFunctor {
+ using index_type = typename InputIterator::difference_type;
+
+ InputIterator m_first;
+ PredicateType m_predicate;
+ NewValueType m_new_value;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ if (m_predicate(m_first[i])) {
+ m_first[i] = m_new_value;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ StdReplaceIfFunctor(InputIterator first, PredicateType pred,
+ NewValueType new_value)
+ : m_first(std::move(first)),
+ m_predicate(std::move(pred)),
+ m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class PredicateType,
+ class ValueType>
+void replace_if_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, PredicateType pred,
+ const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdReplaceIfFunctor(first, std::move(pred), new_value));
+ ex.fence("Kokkos::replace_if: fence after operation");
+}
+
+template <class TeamHandleType, class IteratorType, class PredicateType,
+ class ValueType>
+KOKKOS_FUNCTION void replace_if_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first, IteratorType last,
+ PredicateType pred,
+ const ValueType& new_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ StdReplaceIfFunctor(first, std::move(pred), new_value));
+ teamHandle.team_barrier();
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator>
+struct StdReverseFunctor {
+ using index_type = typename InputIterator::difference_type;
+ static_assert(std::is_signed_v<index_type>,
+ "Kokkos: StdReverseFunctor requires signed index type");
+
+ InputIterator m_first;
+ InputIterator m_last;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ ::Kokkos::kokkos_swap(m_first[i], m_last[-i - 1]);
+ }
+
+ KOKKOS_FUNCTION
+ StdReverseFunctor(InputIterator first, InputIterator last)
+ : m_first(std::move(first)), m_last(std::move(last)) {}
+};
+
+template <class ExecutionSpace, class InputIterator>
+void reverse_exespace_impl(const std::string& label, const ExecutionSpace& ex,
+ InputIterator first, InputIterator last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ if (last >= first + 2) {
+ // only need half
+ const auto num_elements = Kokkos::Experimental::distance(first, last) / 2;
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdReverseFunctor(first, last));
+ ex.fence("Kokkos::reverse: fence after operation");
+ }
+}
+
+template <class TeamHandleType, class InputIterator>
+KOKKOS_FUNCTION void reverse_team_impl(const TeamHandleType& teamHandle,
+ InputIterator first,
+ InputIterator last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ if (last >= first + 2) {
+ // only need half
+ const auto num_elements = Kokkos::Experimental::distance(first, last) / 2;
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdReverseFunctor(first, last));
+ teamHandle.team_barrier();
+ }
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class OutputIterator>
+struct StdReverseCopyFunctor {
+ using index_type = typename InputIterator::difference_type;
+ static_assert(std::is_signed_v<index_type>,
+ "Kokkos: StdReverseCopyFunctor requires signed index type");
+
+ InputIterator m_last;
+ OutputIterator m_dest_first;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const { m_dest_first[i] = m_last[-1 - i]; }
+
+ KOKKOS_FUNCTION
+ StdReverseCopyFunctor(InputIterator _last, OutputIterator _dest_first)
+ : m_last(std::move(_last)), m_dest_first(std::move(_dest_first)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator reverse_copy_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIterator first,
+ InputIterator last,
+ OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdReverseCopyFunctor(last, d_first));
+ ex.fence("Kokkos::reverse_copy: fence after operation");
+
+ // return
+ return d_first + num_elements;
+}
+
+template <class TeamHandleType, class InputIterator, class OutputIterator>
+KOKKOS_FUNCTION OutputIterator
+reverse_copy_team_impl(const TeamHandleType& teamHandle, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdReverseCopyFunctor(last, d_first));
+ teamHandle.team_barrier();
+
+ // return
+ return d_first + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_ROTATE_IMPL_HPP
#include <Kokkos_Core.hpp>
#include "Kokkos_Constraints.hpp"
#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_Reverse.hpp"
#include <std_algorithms/Kokkos_Move.hpp>
#include <std_algorithms/Kokkos_Distance.hpp>
#include <string>
}
template <class ExecutionSpace, class IteratorType>
-IteratorType rotate_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType n_first,
- IteratorType last) {
+IteratorType rotate_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, IteratorType first,
+ IteratorType n_first, IteratorType last) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::expect_valid_range(first, last);
Impl::expect_valid_range(first, n_first);
Impl::expect_valid_range(n_first, last);
+ // might be worth checking if for exespace we should do
+ // something similar to what we do for team since it avoids a new allocation
namespace KE = ::Kokkos::Experimental;
const auto num_elements = KE::distance(first, last);
const auto n_distance_from_first = KE::distance(first, n_first);
}
}
+template <class TeamHandleType, class IteratorType>
+KOKKOS_FUNCTION IteratorType rotate_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType n_first,
+ IteratorType last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+ Impl::expect_valid_range(first, n_first);
+ Impl::expect_valid_range(n_first, last);
+
+ namespace KE = ::Kokkos::Experimental;
+
+ auto result = first + (last - n_first);
+ // first reverse the whole range
+ KE::Impl::reverse_team_impl(teamHandle, first, last);
+ // re-reverse each piece
+ KE::Impl::reverse_team_impl(teamHandle, first, result);
+ KE::Impl::reverse_team_impl(teamHandle, result, last);
+
+ // no need for barrier here since reverse already calls it
+
+ return result;
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_COPY_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_ROTATE_COPY_IMPL_HPP
namespace Experimental {
namespace Impl {
-template <class IndexType, class InputIterator, class OutputIterator>
+template <class InputIterator, class OutputIterator>
struct StdRotateCopyFunctor {
+ using index_type = typename InputIterator::difference_type;
+
InputIterator m_first;
InputIterator m_last;
InputIterator m_first_n;
OutputIterator m_dest_first;
KOKKOS_FUNCTION
- void operator()(IndexType i) const {
- const IndexType shift = m_last - m_first_n;
+ void operator()(index_type i) const {
+ const index_type shift = m_last - m_first_n;
if (i < shift) {
m_dest_first[i] = m_first_n[i];
}
}
+ KOKKOS_FUNCTION
StdRotateCopyFunctor(InputIterator first, InputIterator last,
InputIterator first_n, OutputIterator dest_first)
: m_first(std::move(first)),
};
template <class ExecutionSpace, class InputIterator, class OutputIterator>
-OutputIterator rotate_copy_impl(const std::string& label,
- const ExecutionSpace& ex, InputIterator first,
- InputIterator n_first, InputIterator last,
- OutputIterator d_first) {
+OutputIterator rotate_copy_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, InputIterator first,
+ InputIterator n_first, InputIterator last, OutputIterator d_first) {
/*
algorithm is implemented as follows:
return d_first;
}
- // aliases
- using index_type = typename InputIterator::difference_type;
- using func_type =
- StdRotateCopyFunctor<index_type, InputIterator, OutputIterator>;
-
// run
const auto num_elements = Kokkos::Experimental::distance(first, last);
::Kokkos::parallel_for(label,
RangePolicy<ExecutionSpace>(ex, 0, num_elements),
- func_type(first, last, n_first, d_first));
+ StdRotateCopyFunctor(first, last, n_first, d_first));
ex.fence("Kokkos::rotate_copy: fence after operation");
return d_first + num_elements;
}
+template <class TeamHandleType, class InputIterator, class OutputIterator>
+KOKKOS_FUNCTION OutputIterator rotate_copy_team_impl(
+ const TeamHandleType& teamHandle, InputIterator first,
+ InputIterator n_first, InputIterator last, OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+ Impl::expect_valid_range(first, n_first);
+ Impl::expect_valid_range(n_first, last);
+
+ if (first == last) {
+ return d_first;
+ }
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdRotateCopyFunctor(first, last, n_first, d_first));
+
+ teamHandle.team_barrier();
+
+ // return
+ return d_first + num_elements;
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_SEARCH_IMPL_HPP
}
}
- const auto rv =
- found ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {::Kokkos::reduction_identity<IndexType>::min()};
+ if (found) {
+ rv = {i};
+ }
m_reducer.join(red_value, rv);
}
m_p(std::move(p)) {}
};
+//
+// exespace impl
+//
template <class ExecutionSpace, class IteratorType1, class IteratorType2,
class BinaryPredicateType>
-IteratorType1 search_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last,
- const BinaryPredicateType& pred) {
+IteratorType1 search_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first, IteratorType2 s_last,
+ const BinaryPredicateType& pred) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first, s_first);
Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
const auto num_elements = KE::distance(first, last);
const auto s_count = KE::distance(s_first, s_last);
KOKKOS_EXPECTS(num_elements >= s_count);
- (void)s_count; // needed when macro above is a no-op
if (s_first == s_last) {
return first;
// special case where the two ranges have equal size
if (num_elements == s_count) {
- const auto equal_result = equal_impl(label, ex, first, last, s_first, pred);
+ const auto equal_result =
+ equal_exespace_impl(label, ex, first, last, s_first, pred);
return (equal_result) ? first : last;
} else {
using index_type = typename IteratorType1::difference_type;
}
template <class ExecutionSpace, class IteratorType1, class IteratorType2>
-IteratorType1 search_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType1 first, IteratorType1 last,
- IteratorType2 s_first, IteratorType2 s_last) {
+IteratorType1 search_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType1 first, IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last) {
+ using value_type1 = typename IteratorType1::value_type;
+ using value_type2 = typename IteratorType2::value_type;
+ using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+ return search_exespace_impl(label, ex, first, last, s_first, s_last,
+ predicate_type());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class BinaryPredicateType>
+KOKKOS_FUNCTION IteratorType1
+search_team_impl(const TeamHandleType& teamHandle, IteratorType1 first,
+ IteratorType1 last, IteratorType2 s_first,
+ IteratorType2 s_last, const BinaryPredicateType& pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, s_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
+ Impl::expect_valid_range(first, last);
+ Impl::expect_valid_range(s_first, s_last);
+
+ // the target sequence should not be larger than the range [first, last)
+ namespace KE = ::Kokkos::Experimental;
+ const auto num_elements = KE::distance(first, last);
+ const auto s_count = KE::distance(s_first, s_last);
+ KOKKOS_EXPECTS(num_elements >= s_count);
+
+ if (s_first == s_last) {
+ return first;
+ }
+
+ if (first == last) {
+ return last;
+ }
+
+ // special case where the two ranges have equal size
+ if (num_elements == s_count) {
+ const auto equal_result =
+ equal_team_impl(teamHandle, first, last, s_first, pred);
+ return (equal_result) ? first : last;
+ } else {
+ using index_type = typename IteratorType1::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t = StdSearchFunctor<index_type, IteratorType1, IteratorType2,
+ reducer_type, BinaryPredicateType>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+
+ // decide the size of the range policy of the par_red:
+ // note that the last feasible index to start looking is the index
+ // whose distance from the "last" is equal to the sequence count.
+ // the +1 is because we need to include that location too.
+ const auto range_size = num_elements - s_count + 1;
+
+ // run par reduce
+ ::Kokkos::parallel_reduce(
+ TeamThreadRange(teamHandle, 0, range_size),
+ func_t(first, last, s_first, s_last, reducer, pred), reducer);
+
+ teamHandle.team_barrier();
+
+ // decide and return
+ if (red_result.min_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ // location has not been found
+ return last;
+ } else {
+ // location has been found
+ return first + red_result.min_loc_true;
+ }
+ }
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION IteratorType1 search_team_impl(const TeamHandleType& teamHandle,
+ IteratorType1 first,
+ IteratorType1 last,
+ IteratorType2 s_first,
+ IteratorType2 s_last) {
using value_type1 = typename IteratorType1::value_type;
using value_type2 = typename IteratorType2::value_type;
using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
- return search_impl(label, ex, first, last, s_first, s_last, predicate_type());
+ return search_team_impl(teamHandle, first, last, s_first, s_last,
+ predicate_type());
}
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_N_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_SEARCH_N_IMPL_HPP
}
}
- const auto rv =
- found ? red_value_type{i}
- : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+ // FIXME_NVHPC using a ternary operator causes problems
+ red_value_type rv = {::Kokkos::reduction_identity<IndexType>::min()};
+ if (found) {
+ rv.min_loc_true = i;
+ }
m_reducer.join(red_value, rv);
}
m_p(std::move(p)) {}
};
+//
+// exespace impl
+//
template <class ExecutionSpace, class IteratorType, class SizeType,
class ValueType, class BinaryPredicateType>
-IteratorType search_n_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- SizeType count, const ValueType& value,
- const BinaryPredicateType& pred) {
+IteratorType search_n_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ SizeType count, const ValueType& value,
+ const BinaryPredicateType& pred) {
// checks
static_assert_random_access_and_accessible(ex, first);
expect_valid_range(first, last);
if ((std::size_t)num_elements == (std::size_t)count) {
using equal_to_value = StdAlgoEqualsValUnaryPredicate<ValueType>;
const auto satisfies =
- all_of_impl(label, ex, first, last, equal_to_value(value));
+ all_of_exespace_impl(label, ex, first, last, equal_to_value(value));
return (satisfies) ? first : last;
} else {
// aliases
template <class ExecutionSpace, class IteratorType, class SizeType,
class ValueType>
-IteratorType search_n_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- SizeType count, const ValueType& value) {
+IteratorType search_n_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType first, IteratorType last,
+ SizeType count, const ValueType& value) {
+ using iter_value_type = typename IteratorType::value_type;
+ using predicate_type =
+ StdAlgoEqualBinaryPredicate<iter_value_type, ValueType>;
+
+ /* above we use <iter_value_type, ValueType> for the predicate_type
+ to be consistent with the standard, which says:
+
+ "
+ The signature of the predicate function should be equivalent to:
+
+ bool pred(const Type1 &a, const Type2 &b);
+
+ The type Type1 must be such that an object of type ForwardIt can be
+ dereferenced and then implicitly converted to Type1. The type Type2 must be
+ such that an object of type T can be implicitly converted to Type2.
+ "
+
+ In our case, IteratorType = ForwardIt, and ValueType = T.
+ */
+
+ return search_n_exespace_impl(label, ex, first, last, count, value,
+ predicate_type());
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class IteratorType, class SizeType,
+ class ValueType, class BinaryPredicateType>
+KOKKOS_FUNCTION IteratorType search_n_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ SizeType count, const ValueType& value, const BinaryPredicateType& pred) {
+ // checks
+ static_assert_random_access_and_accessible(teamHandle, first);
+ expect_valid_range(first, last);
+ KOKKOS_EXPECTS((std::ptrdiff_t)count >= 0);
+
+ // count should not be larger than the range [first, last)
+ namespace KE = ::Kokkos::Experimental;
+ const auto num_elements = KE::distance(first, last);
+ // cast things to avoid compiler warning
+ KOKKOS_EXPECTS((std::size_t)num_elements >= (std::size_t)count);
+
+ if (first == last) {
+ return first;
+ }
+
+ // special case where num elements in [first, last) == count
+ if ((std::size_t)num_elements == (std::size_t)count) {
+ using equal_to_value = StdAlgoEqualsValUnaryPredicate<ValueType>;
+ const auto satisfies =
+ all_of_team_impl(teamHandle, first, last, equal_to_value(value));
+ return (satisfies) ? first : last;
+ } else {
+ // aliases
+ using index_type = typename IteratorType::difference_type;
+ using reducer_type = FirstLoc<index_type>;
+ using reduction_value_type = typename reducer_type::value_type;
+ using func_t =
+ StdSearchNFunctor<index_type, IteratorType, SizeType, ValueType,
+ reducer_type, BinaryPredicateType>;
+
+ // run
+ reduction_value_type red_result;
+ reducer_type reducer(red_result);
+
+ // decide the size of the range policy of the par_red:
+ // the last feasible index to start looking is the index
+ // whose distance from the "last" is equal to count.
+ // the +1 is because we need to include that location too.
+ const auto range_size = num_elements - count + 1;
+
+ // run par reduce
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, range_size),
+ func_t(first, last, count, value, reducer, pred),
+ reducer);
+
+ teamHandle.team_barrier();
+
+ // decide and return
+ if (red_result.min_loc_true ==
+ ::Kokkos::reduction_identity<index_type>::min()) {
+ // location has not been found
+ return last;
+ } else {
+ // location has been found
+ return first + red_result.min_loc_true;
+ }
+ }
+}
+
+template <class TeamHandleType, class IteratorType, class SizeType,
+ class ValueType>
+KOKKOS_FUNCTION IteratorType
+search_n_team_impl(const TeamHandleType& teamHandle, IteratorType first,
+ IteratorType last, SizeType count, const ValueType& value) {
using iter_value_type = typename IteratorType::value_type;
using predicate_type =
StdAlgoEqualBinaryPredicate<iter_value_type, ValueType>;
In our case, IteratorType = ForwardIt, and ValueType = T.
*/
- return search_n_impl(label, ex, first, last, count, value, predicate_type());
+ return search_n_team_impl(teamHandle, first, last, count, value,
+ predicate_type());
}
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_IMPL_HPP
namespace Impl {
template <class ExecutionSpace, class IteratorType>
-IteratorType shift_left_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- typename IteratorType::difference_type n) {
+IteratorType shift_left_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, typename IteratorType::difference_type n) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::expect_valid_range(first, last);
return last - n;
}
+template <class TeamHandleType, class IteratorType>
+KOKKOS_FUNCTION IteratorType shift_left_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ typename IteratorType::difference_type n) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+ KOKKOS_EXPECTS(n >= 0);
+
+ // handle trivial cases
+ if (n == 0) {
+ return last;
+ }
+
+ if (n >= Kokkos::Experimental::distance(first, last)) {
+ return first;
+ }
+
+ // we cannot use here a new allocation like we do for the
+ // execution space impl because for this team impl we are
+ // within a parallel region, so for now we solve serially
+
+ using difference_type = typename IteratorType::difference_type;
+ const difference_type numElementsToMove =
+ ::Kokkos::Experimental::distance(first + n, last);
+ Kokkos::single(Kokkos::PerTeam(teamHandle), [=]() {
+ for (difference_type i = 0; i < numElementsToMove; ++i) {
+ first[i] = std::move(first[i + n]);
+ }
+ });
+ teamHandle.team_barrier();
+
+ return last - n;
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_IMPL_HPP
namespace Impl {
template <class ExecutionSpace, class IteratorType>
-IteratorType shift_right_impl(const std::string& label,
- const ExecutionSpace& ex, IteratorType first,
- IteratorType last,
- typename IteratorType::difference_type n) {
+IteratorType shift_right_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, typename IteratorType::difference_type n) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::expect_valid_range(first, last);
return first + n;
}
+template <class TeamHandleType, class IteratorType>
+KOKKOS_FUNCTION IteratorType shift_right_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ typename IteratorType::difference_type n) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+ KOKKOS_EXPECTS(n >= 0);
+
+ // handle trivial cases
+ if (n == 0) {
+ return first;
+ }
+
+ if (n >= Kokkos::Experimental::distance(first, last)) {
+ return last;
+ }
+
+ // we cannot use here a new allocation like we do for the
+ // execution space impl because for this team impl we are
+ // within a parallel region, so for now we solve serially
+
+ using difference_type = typename IteratorType::difference_type;
+ const difference_type numElementsToMove =
+ ::Kokkos::Experimental::distance(first, last - n);
+ Kokkos::single(Kokkos::PerTeam(teamHandle), [=]() {
+ for (difference_type i = 0; i < numElementsToMove; ++i) {
+ last[-i - 1] = std::move(last[-n - i - 1]);
+ }
+ });
+ teamHandle.team_barrier();
+
+ return first + n;
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_SWAP_RANGES_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_SWAP_RANGES_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType1, class IteratorType2>
+struct StdSwapRangesFunctor {
+ using index_type = typename IteratorType1::difference_type;
+ IteratorType1 m_first1;
+ IteratorType2 m_first2;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ ::Kokkos::kokkos_swap(m_first1[i], m_first2[i]);
+ }
+
+ KOKKOS_FUNCTION
+ StdSwapRangesFunctor(IteratorType1 _first1, IteratorType2 _first2)
+ : m_first1(std::move(_first1)), m_first2(std::move(_first2)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 swap_ranges_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ IteratorType1 first1,
+ IteratorType1 last1,
+ IteratorType2 first2) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements_to_swap =
+ Kokkos::Experimental::distance(first1, last1);
+ ::Kokkos::parallel_for(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_to_swap),
+ StdSwapRangesFunctor(first1, first2));
+ ex.fence("Kokkos::swap_ranges: fence after operation");
+
+ // return
+ return first2 + num_elements_to_swap;
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2>
+KOKKOS_FUNCTION IteratorType2
+swap_ranges_team_impl(const TeamHandleType& teamHandle, IteratorType1 first1,
+ IteratorType1 last1, IteratorType2 first2) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, first2);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements_to_swap =
+ Kokkos::Experimental::distance(first1, last1);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements_to_swap),
+ StdSwapRangesFunctor(first1, first2));
+ teamHandle.team_barrier();
+
+ // return
+ return first2 + num_elements_to_swap;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class OutputIterator, class UnaryFunctorType>
+struct StdTransformFunctor {
+ // we can use difference type from InputIterator since
+ // the impl functions calling this functor already
+ // static assert that the iterators have matching difference type
+ using index_type = typename InputIterator::difference_type;
+
+ InputIterator m_first;
+ OutputIterator m_d_first;
+ UnaryFunctorType m_unary_op;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const { m_d_first[i] = m_unary_op(m_first[i]); }
+
+ KOKKOS_FUNCTION
+ StdTransformFunctor(InputIterator _first, OutputIterator _m_d_first,
+ UnaryFunctorType _functor)
+ : m_first(std::move(_first)),
+ m_d_first(std::move(_m_d_first)),
+ m_unary_op(std::move(_functor)) {}
+};
+
+template <class InputIterator1, class InputIterator2, class OutputIterator,
+ class BinaryFunctorType>
+struct StdTransformBinaryFunctor {
+ // we can use difference type from InputIterator1 since
+ // the impl functions calling this functor already
+ // static assert that the iterators have matching difference type
+ using index_type = typename InputIterator1::difference_type;
+
+ InputIterator1 m_first1;
+ InputIterator2 m_first2;
+ OutputIterator m_d_first;
+ BinaryFunctorType m_binary_op;
+
+ KOKKOS_FUNCTION
+ void operator()(index_type i) const {
+ m_d_first[i] = m_binary_op(m_first1[i], m_first2[i]);
+ }
+
+ KOKKOS_FUNCTION
+ StdTransformBinaryFunctor(InputIterator1 _first1, InputIterator2 _first2,
+ OutputIterator _m_d_first,
+ BinaryFunctorType _functor)
+ : m_first1(std::move(_first1)),
+ m_first2(std::move(_first2)),
+ m_d_first(std::move(_m_d_first)),
+ m_binary_op(std::move(_functor)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+ class UnaryOperation>
+OutputIterator transform_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, InputIterator first1,
+ InputIterator last1, OutputIterator d_first, UnaryOperation unary_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first1, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, d_first);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+ ::Kokkos::parallel_for(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdTransformFunctor(first1, d_first, unary_op));
+ ex.fence("Kokkos::transform: fence after operation");
+
+ // return
+ return d_first + num_elements;
+}
+
+template <class ExecutionSpace, class InputIterator1, class InputIterator2,
+ class OutputIterator, class BinaryOperation>
+OutputIterator transform_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, InputIterator1 first1,
+ InputIterator1 last1, InputIterator2 first2, OutputIterator d_first,
+ BinaryOperation binary_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first1, first2, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2,
+ d_first);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+ ::Kokkos::parallel_for(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ StdTransformBinaryFunctor(first1, first2, d_first, binary_op));
+ ex.fence("Kokkos::transform: fence after operation");
+ return d_first + num_elements;
+}
+
+//
+// team-level impl
+//
+
+template <class TeamHandleType, class InputIterator, class OutputIterator,
+ class UnaryOperation>
+KOKKOS_FUNCTION OutputIterator transform_team_impl(
+ const TeamHandleType& teamHandle, InputIterator first1, InputIterator last1,
+ OutputIterator d_first, UnaryOperation unary_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, d_first);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+ ::Kokkos::parallel_for(TeamThreadRange(teamHandle, 0, num_elements),
+ StdTransformFunctor(first1, d_first, unary_op));
+ teamHandle.team_barrier();
+
+ // return
+ return d_first + num_elements;
+}
+
+template <class TeamHandleType, class InputIterator1, class InputIterator2,
+ class OutputIterator, class BinaryOperation>
+KOKKOS_FUNCTION OutputIterator
+transform_team_impl(const TeamHandleType& teamHandle, InputIterator1 first1,
+ InputIterator1 last1, InputIterator2 first2,
+ OutputIterator d_first, BinaryOperation binary_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, first2,
+ d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2,
+ d_first);
+ Impl::expect_valid_range(first1, last1);
+
+ // run
+ const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+ ::Kokkos::parallel_for(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ StdTransformBinaryFunctor(first1, first2, d_first, binary_op));
+ teamHandle.team_barrier();
+
+ return d_first + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_EXCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_EXCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+#include "Kokkos_FunctorsForExclusiveScan.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+//
+// exespace impl
+//
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class ValueType, class BinaryOpType,
+ class UnaryOpType>
+OutputIteratorType transform_exclusive_scan_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType first_from, InputIteratorType last_from,
+ OutputIteratorType first_dest, ValueType init_value, BinaryOpType bop,
+ UnaryOpType uop) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // aliases
+ using index_type = typename InputIteratorType::difference_type;
+
+ using func_type = std::conditional_t<
+ ::Kokkos::is_detected<ex_scan_has_reduction_identity_sum_t,
+ ValueType>::value,
+ TransformExclusiveScanFunctorWithoutValueWrapper<
+ ExecutionSpace, index_type, ValueType, InputIteratorType,
+ OutputIteratorType, BinaryOpType, UnaryOpType>,
+ TransformExclusiveScanFunctorWithValueWrapper<
+ ExecutionSpace, index_type, ValueType, InputIteratorType,
+ OutputIteratorType, BinaryOpType, UnaryOpType> >;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ func_type(std::move(init_value), first_from, first_dest, bop, uop));
+ ex.fence("Kokkos::transform_exclusive_scan: fence after operation");
+
+ // return
+ return first_dest + num_elements;
+}
+
+//
+// team impl
+//
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class ValueType, class BinaryOpType,
+ class UnaryOpType>
+KOKKOS_FUNCTION OutputIteratorType transform_exclusive_scan_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ ValueType init_value, BinaryOpType bop, UnaryOpType uop) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ static_assert(
+ ::Kokkos::is_detected_v<ex_scan_has_reduction_identity_sum_t, ValueType>,
+ "The team-level impl of Kokkos::Experimental::transform_exclusive_scan "
+ "currently does not support types without reduction identity");
+
+ // aliases
+ using exe_space = typename TeamHandleType::execution_space;
+ using index_type = typename InputIteratorType::difference_type;
+ using func_type = TransformExclusiveScanFunctorWithoutValueWrapper<
+ exe_space, index_type, ValueType, InputIteratorType, OutputIteratorType,
+ BinaryOpType, UnaryOpType>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(std::move(init_value), first_from, first_dest, bop, uop));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+#include "Kokkos_IdentityReferenceUnaryFunctor.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+ class FirstDest, class BinaryOpType, class UnaryOpType>
+struct ExeSpaceTransformInclusiveScanNoInitValueFunctor {
+ using execution_space = ExeSpace;
+ using value_type = ValueWrapperForNoNeutralElement<ValueType>;
+
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+ BinaryOpType m_binary_op;
+ UnaryOpType m_unary_op;
+
+ KOKKOS_FUNCTION
+ ExeSpaceTransformInclusiveScanNoInitValueFunctor(FirstFrom first_from,
+ FirstDest first_dest,
+ BinaryOpType bop,
+ UnaryOpType uop)
+ : m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_binary_op(std::move(bop)),
+ m_unary_op(std::move(uop)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const IndexType i, value_type& update,
+ const bool final_pass) const {
+ const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
+ this->join(update, tmp);
+ if (final_pass) {
+ m_first_dest[i] = update.val;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ void init(value_type& update) const {
+ update.val = {};
+ update.is_initial = true;
+ }
+
+ KOKKOS_FUNCTION
+ void join(value_type& update, const value_type& input) const {
+ if (input.is_initial) return;
+
+ if (update.is_initial) {
+ update.val = input.val;
+ } else {
+ update.val = m_binary_op(update.val, input.val);
+ }
+ update.is_initial = false;
+ }
+};
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+ class FirstDest, class BinaryOpType, class UnaryOpType>
+struct ExeSpaceTransformInclusiveScanWithInitValueFunctor {
+ using execution_space = ExeSpace;
+ using value_type = ValueWrapperForNoNeutralElement<ValueType>;
+
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+ BinaryOpType m_binary_op;
+ UnaryOpType m_unary_op;
+ ValueType m_init;
+
+ KOKKOS_FUNCTION
+ ExeSpaceTransformInclusiveScanWithInitValueFunctor(FirstFrom first_from,
+ FirstDest first_dest,
+ BinaryOpType bop,
+ UnaryOpType uop,
+ ValueType init)
+ : m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_binary_op(std::move(bop)),
+ m_unary_op(std::move(uop)),
+ m_init(std::move(init)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const IndexType i, value_type& update,
+ const bool final_pass) const {
+ const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
+ this->join(update, tmp);
+
+ if (final_pass) {
+ m_first_dest[i] = m_binary_op(update.val, m_init);
+ }
+ }
+
+ KOKKOS_FUNCTION
+ void init(value_type& update) const {
+ update.val = {};
+ update.is_initial = true;
+ }
+
+ KOKKOS_FUNCTION
+ void join(value_type& update, const value_type& input) const {
+ if (input.is_initial) return;
+
+ if (update.is_initial) {
+ update.val = input.val;
+ } else {
+ update.val = m_binary_op(update.val, input.val);
+ }
+ update.is_initial = false;
+ }
+};
+
+//
+// exespace impl
+//
+
+// -------------------------------------------------------------
+// transform_inclusive_scan_exespace_impl without init_value
+// -------------------------------------------------------------
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class BinaryOpType, class UnaryOpType>
+OutputIteratorType transform_inclusive_scan_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType first_from, InputIteratorType last_from,
+ OutputIteratorType first_dest, BinaryOpType binary_op,
+ UnaryOpType unary_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // aliases
+ using index_type = typename InputIteratorType::difference_type;
+ using value_type =
+ std::remove_const_t<typename InputIteratorType::value_type>;
+ using func_type = ExeSpaceTransformInclusiveScanNoInitValueFunctor<
+ ExecutionSpace, index_type, value_type, InputIteratorType,
+ OutputIteratorType, BinaryOpType, UnaryOpType>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(
+ label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ func_type(first_from, first_dest, binary_op, unary_op));
+ ex.fence("Kokkos::transform_inclusive_scan: fence after operation");
+
+ // return
+ return first_dest + num_elements;
+}
+
+// -------------------------------------------------------------
+// transform_inclusive_scan_exespace_impl with init_value
+// -------------------------------------------------------------
+template <class ExecutionSpace, class InputIteratorType,
+ class OutputIteratorType, class BinaryOpType, class UnaryOpType,
+ class ValueType>
+OutputIteratorType transform_inclusive_scan_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex,
+ InputIteratorType first_from, InputIteratorType last_from,
+ OutputIteratorType first_dest, BinaryOpType binary_op, UnaryOpType unary_op,
+ ValueType init_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // aliases
+ using index_type = typename InputIteratorType::difference_type;
+ using func_type = ExeSpaceTransformInclusiveScanWithInitValueFunctor<
+ ExecutionSpace, index_type, ValueType, InputIteratorType,
+ OutputIteratorType, BinaryOpType, UnaryOpType>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(label,
+ RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+ func_type(first_from, first_dest, binary_op, unary_op,
+ std::move(init_value)));
+ ex.fence("Kokkos::transform_inclusive_scan: fence after operation");
+
+ // return
+ return first_dest + num_elements;
+}
+
+//
+// team impl
+//
+
+template <class ExeSpace, class ValueType, class FirstFrom, class FirstDest,
+ class BinaryOpType, class UnaryOpType>
+struct TeamTransformInclusiveScanNoInitValueFunctor {
+ using execution_space = ExeSpace;
+ using index_type = typename FirstFrom::difference_type;
+
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+ BinaryOpType m_binary_op;
+ UnaryOpType m_unary_op;
+
+ KOKKOS_FUNCTION
+ TeamTransformInclusiveScanNoInitValueFunctor(FirstFrom first_from,
+ FirstDest first_dest,
+ BinaryOpType bop,
+ UnaryOpType uop)
+ : m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_binary_op(std::move(bop)),
+ m_unary_op(std::move(uop)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const index_type i, ValueType& update,
+ const bool final_pass) const {
+ const auto tmp = ValueType{m_unary_op(m_first_from[i])};
+ this->join(update, tmp);
+ if (final_pass) {
+ m_first_dest[i] = update;
+ }
+ }
+
+ KOKKOS_FUNCTION
+ void init(ValueType& update) const { update = {}; }
+
+ KOKKOS_FUNCTION
+ void join(ValueType& update, const ValueType& input) const {
+ update = m_binary_op(update, input);
+ }
+};
+
+template <class ExeSpace, class ValueType, class FirstFrom, class FirstDest,
+ class BinaryOpType, class UnaryOpType>
+struct TeamTransformInclusiveScanWithInitValueFunctor {
+ using execution_space = ExeSpace;
+ using index_type = typename FirstFrom::difference_type;
+
+ FirstFrom m_first_from;
+ FirstDest m_first_dest;
+ BinaryOpType m_binary_op;
+ UnaryOpType m_unary_op;
+ ValueType m_init;
+
+ KOKKOS_FUNCTION
+ TeamTransformInclusiveScanWithInitValueFunctor(FirstFrom first_from,
+ FirstDest first_dest,
+ BinaryOpType bop,
+ UnaryOpType uop,
+ ValueType init)
+ : m_first_from(std::move(first_from)),
+ m_first_dest(std::move(first_dest)),
+ m_binary_op(std::move(bop)),
+ m_unary_op(std::move(uop)),
+ m_init(std::move(init)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const index_type i, ValueType& update,
+ const bool final_pass) const {
+ const auto tmp = ValueType{m_unary_op(m_first_from[i])};
+ this->join(update, tmp);
+
+ if (final_pass) {
+ m_first_dest[i] = m_binary_op(update, m_init);
+ }
+ }
+
+ KOKKOS_FUNCTION
+ void init(ValueType& update) const { update = {}; }
+
+ KOKKOS_FUNCTION
+ void join(ValueType& update, const ValueType& input) const {
+ update = m_binary_op(update, input);
+ }
+};
+
+// -------------------------------------------------------------
+// transform_inclusive_scan_team_impl without init_value
+// -------------------------------------------------------------
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class BinaryOpType, class UnaryOpType>
+KOKKOS_FUNCTION OutputIteratorType transform_inclusive_scan_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // aliases
+ using exe_space = typename TeamHandleType::execution_space;
+ using value_type =
+ std::remove_const_t<typename InputIteratorType::value_type>;
+ using func_type = TeamTransformInclusiveScanNoInitValueFunctor<
+ exe_space, value_type, InputIteratorType, OutputIteratorType,
+ BinaryOpType, UnaryOpType>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(
+ TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(first_from, first_dest, binary_op, unary_op));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
+// -------------------------------------------------------------
+// transform_inclusive_scan_team_impl with init_value
+// -------------------------------------------------------------
+template <class TeamHandleType, class InputIteratorType,
+ class OutputIteratorType, class BinaryOpType, class UnaryOpType,
+ class ValueType>
+KOKKOS_FUNCTION OutputIteratorType transform_inclusive_scan_team_impl(
+ const TeamHandleType& teamHandle, InputIteratorType first_from,
+ InputIteratorType last_from, OutputIteratorType first_dest,
+ BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first_from,
+ first_dest);
+ Impl::static_assert_iterators_have_matching_difference_type(first_from,
+ first_dest);
+ Impl::expect_valid_range(first_from, last_from);
+
+ // aliases
+ using exe_space = typename TeamHandleType::execution_space;
+ using func_type = TeamTransformInclusiveScanWithInitValueFunctor<
+ exe_space, ValueType, InputIteratorType, OutputIteratorType, BinaryOpType,
+ UnaryOpType>;
+
+ // run
+ const auto num_elements =
+ Kokkos::Experimental::distance(first_from, last_from);
+ ::Kokkos::parallel_scan(TeamThreadRange(teamHandle, 0, num_elements),
+ func_type(first_from, first_dest, binary_op, unary_op,
+ std::move(init_value)));
+ teamHandle.team_barrier();
+
+ // return
+ return first_dest + num_elements;
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_IMPL_HPP
//
//------------------------------
+//
+// exespace impl
+//
+
template <class ExecutionSpace, class IteratorType, class ValueType,
class JoinerType, class UnaryTransformerType>
-ValueType transform_reduce_custom_functors_impl(
+ValueType transform_reduce_custom_functors_exespace_impl(
const std::string& label, const ExecutionSpace& ex, IteratorType first,
IteratorType last, ValueType init_reduction_value, JoinerType joiner,
UnaryTransformerType transformer) {
template <class ExecutionSpace, class IteratorType1, class IteratorType2,
class ValueType, class JoinerType, class BinaryTransformerType>
-ValueType transform_reduce_custom_functors_impl(
+ValueType transform_reduce_custom_functors_exespace_impl(
const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
IteratorType1 last1, IteratorType2 first2, ValueType init_reduction_value,
JoinerType joiner, BinaryTransformerType transformer) {
template <class ExecutionSpace, class IteratorType1, class IteratorType2,
class ValueType>
-ValueType transform_reduce_default_functors_impl(
+ValueType transform_reduce_default_functors_exespace_impl(
const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
IteratorType1 last1, IteratorType2 first2, ValueType init_reduction_value) {
// checks
Impl::StdTranformReduceDefaultBinaryTransformFunctor<ValueType>;
using joiner_type = Impl::StdTranformReduceDefaultJoinFunctor<ValueType>;
- return transform_reduce_custom_functors_impl(
+ return transform_reduce_custom_functors_exespace_impl(
label, ex, first1, last1, first2, std::move(init_reduction_value),
joiner_type(), transformer_type());
}
+//
+// team impl
+//
+
+template <class TeamHandleType, class IteratorType, class ValueType,
+ class JoinerType, class UnaryTransformerType>
+KOKKOS_FUNCTION ValueType transform_reduce_custom_functors_team_impl(
+ const TeamHandleType& teamHandle, IteratorType first, IteratorType last,
+ ValueType init_reduction_value, JoinerType joiner,
+ UnaryTransformerType transformer) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::expect_valid_range(first, last);
+
+ if (first == last) {
+ // init is returned, unmodified
+ return init_reduction_value;
+ }
+
+ // aliases
+ using reducer_type =
+ ReducerWithArbitraryJoinerNoNeutralElement<ValueType, JoinerType>;
+ using functor_type =
+ StdTransformReduceSingleIntervalFunctor<IteratorType, reducer_type,
+ UnaryTransformerType>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ // run
+ reduction_value_type result;
+ reducer_type reducer(result, joiner);
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ functor_type(first, reducer, transformer), reducer);
+
+ teamHandle.team_barrier();
+
+ // as per standard, transform is not applied to the init value
+ // https://en.cppreference.com/w/cpp/algorithm/transform_reduce
+ return joiner(result.val, init_reduction_value);
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class ValueType, class JoinerType, class BinaryTransformerType>
+KOKKOS_FUNCTION ValueType transform_reduce_custom_functors_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, ValueType init_reduction_value, JoinerType joiner,
+ BinaryTransformerType transformer) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, first2);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+
+ if (first1 == last1) {
+ // init is returned, unmodified
+ return init_reduction_value;
+ }
+
+ // aliases
+ using index_type = typename IteratorType1::difference_type;
+ using reducer_type =
+ ReducerWithArbitraryJoinerNoNeutralElement<ValueType, JoinerType>;
+ using functor_type =
+ StdTransformReduceTwoIntervalsFunctor<index_type, IteratorType1,
+ IteratorType2, reducer_type,
+ BinaryTransformerType>;
+ using reduction_value_type = typename reducer_type::value_type;
+
+ // run
+ reduction_value_type result;
+ reducer_type reducer(result, joiner);
+
+ const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+ ::Kokkos::parallel_reduce(TeamThreadRange(teamHandle, 0, num_elements),
+ functor_type(first1, first2, reducer, transformer),
+ reducer);
+
+ teamHandle.team_barrier();
+
+ return joiner(result.val, init_reduction_value);
+}
+
+template <class TeamHandleType, class IteratorType1, class IteratorType2,
+ class ValueType>
+KOKKOS_FUNCTION ValueType transform_reduce_default_functors_team_impl(
+ const TeamHandleType& teamHandle, IteratorType1 first1, IteratorType1 last1,
+ IteratorType2 first2, ValueType init_reduction_value) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first1, first2);
+ Impl::static_assert_is_not_openmptarget(teamHandle);
+ Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+ Impl::expect_valid_range(first1, last1);
+
+ // aliases
+ using transformer_type =
+ Impl::StdTranformReduceDefaultBinaryTransformFunctor<ValueType>;
+ using joiner_type = Impl::StdTranformReduceDefaultJoinFunctor<ValueType>;
+
+ return transform_reduce_custom_functors_team_impl(
+ teamHandle, first1, last1, first2, std::move(init_reduction_value),
+ joiner_type(), transformer_type());
+}
+
} // namespace Impl
} // namespace Experimental
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_IMPL_HPP
#define KOKKOS_STD_ALGORITHMS_UNIQUE_IMPL_HPP
namespace Experimental {
namespace Impl {
-template <class IndexType, class InputIt, class OutputIt,
- class BinaryPredicateType>
+template <class InputIt, class OutputIt, class BinaryPredicateType>
struct StdUniqueFunctor {
+ using index_type = typename InputIt::difference_type;
+
InputIt m_first_from;
InputIt m_last_from;
OutputIt m_first_dest;
m_pred(std::move(pred)) {}
KOKKOS_FUNCTION
- void operator()(const IndexType i, IndexType& update,
+ void operator()(const index_type i, index_type& update,
const bool final_pass) const {
auto& val_i = m_first_from[i];
const auto& val_ip1 = m_first_from[i + 1];
};
template <class ExecutionSpace, class IteratorType, class PredicateType>
-IteratorType unique_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last,
- PredicateType pred) {
+IteratorType unique_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, IteratorType first,
+ IteratorType last, PredicateType pred) {
// checks
Impl::static_assert_random_access_and_accessible(ex, first);
Impl::expect_valid_range(first, last);
// using the same algorithm used for unique_copy but we now move things
using value_type = typename IteratorType::value_type;
using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
- tmp_view_type tmp_view("std_unique_tmp_view", num_elements_to_explore);
+ tmp_view_type tmp_view(Kokkos::view_alloc(ex, Kokkos::WithoutInitializing,
+ "std_unique_tmp_view"),
+ num_elements_to_explore);
// scan extent is: num_elements_to_explore - 1
// for same reason as the one explained in unique_copy
const auto scan_size = num_elements_to_explore - 1;
auto tmp_first = ::Kokkos::Experimental::begin(tmp_view);
- using output_it = decltype(tmp_first);
using index_type = typename IteratorType::difference_type;
- using func_type =
- StdUniqueFunctor<index_type, IteratorType, output_it, PredicateType>;
index_type count = 0;
::Kokkos::parallel_scan(
label, RangePolicy<ExecutionSpace>(ex, 0, scan_size),
- func_type(it_found, last, tmp_first, pred), count);
+ StdUniqueFunctor(it_found, last, tmp_first, pred), count);
// move last element too, for the same reason as the unique_copy
- auto unused_r =
- Impl::move_impl("Kokkos::move_from_unique", ex, it_found + scan_size,
- last, tmp_first + count);
- (void)unused_r; // r1 not used
+ [[maybe_unused]] auto unused_r = Impl::move_exespace_impl(
+ "Kokkos::move_from_unique", ex, it_found + scan_size, last,
+ tmp_first + count);
// ----------
// step 3
}
template <class ExecutionSpace, class IteratorType>
-IteratorType unique_impl(const std::string& label, const ExecutionSpace& ex,
- IteratorType first, IteratorType last) {
+IteratorType unique_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex, IteratorType first,
+ IteratorType last) {
using value_type = typename IteratorType::value_type;
using binary_pred_t = StdAlgoEqualBinaryPredicate<value_type>;
- return unique_impl(label, ex, first, last, binary_pred_t());
+ return unique_exespace_impl(label, ex, first, last, binary_pred_t());
+}
+
+//
+// team level
+//
+template <class TeamHandleType, class IteratorType, class PredicateType>
+KOKKOS_FUNCTION IteratorType unique_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last,
+ PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ // branch for trivial vs non trivial case
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ if (num_elements == 0) {
+ return first;
+ } else if (num_elements == 1) {
+ return last;
+ } else {
+ // FIXME: for the execution-space-based impl we used an auxiliary
+ // allocation, but for the team level we cannot do the same, so do this
+ // serially for now and later figure out if this can be done in parallel
+
+ std::size_t count = 0;
+ Kokkos::single(
+ Kokkos::PerTeam(teamHandle),
+ [=](std::size_t& lcount) {
+ IteratorType result = first;
+ IteratorType lfirst = first;
+ while (++lfirst != last) {
+ if (!pred(*result, *lfirst) && ++result != lfirst) {
+ *result = std::move(*lfirst);
+ }
+ }
+ lcount = Kokkos::Experimental::distance(first, result);
+ },
+ count);
+ // no barrier needed since single above broadcasts to all members
+
+ // +1 is needed because we want one element past the end
+ return first + count + 1;
+ }
+}
+
+template <class TeamHandleType, class IteratorType>
+KOKKOS_FUNCTION IteratorType unique_team_impl(const TeamHandleType& teamHandle,
+ IteratorType first,
+ IteratorType last) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first);
+ Impl::expect_valid_range(first, last);
+
+ using binary_pred_t =
+ StdAlgoEqualBinaryPredicate<typename IteratorType::value_type>;
+ return unique_team_impl(teamHandle, first, last, binary_pred_t());
}
} // namespace Impl
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_MustUseKokkosSingleInTeam.hpp"
+#include "Kokkos_CopyCopyN.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIt, class OutputIt, class BinaryPredicateType>
+struct StdUniqueCopyFunctor {
+ using index_type = typename InputIt::difference_type;
+ InputIt m_first_from;
+ InputIt m_last_from;
+ OutputIt m_first_dest;
+ BinaryPredicateType m_pred;
+
+ KOKKOS_FUNCTION
+ StdUniqueCopyFunctor(InputIt first_from, InputIt last_from,
+ OutputIt first_dest, BinaryPredicateType pred)
+ : m_first_from(std::move(first_from)),
+ m_last_from(std::move(last_from)),
+ m_first_dest(std::move(first_dest)),
+ m_pred(std::move(pred)) {}
+
+ KOKKOS_FUNCTION
+ void operator()(const index_type i, std::size_t& update,
+ const bool final_pass) const {
+ const auto& val_i = m_first_from[i];
+ const auto& val_ip1 = m_first_from[i + 1];
+
+ if (final_pass) {
+ if (!m_pred(val_i, val_ip1)) {
+ m_first_dest[update] = val_i;
+ }
+ }
+
+ if (!m_pred(val_i, val_ip1)) {
+ update += 1;
+ }
+ }
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+ class PredicateType>
+OutputIterator unique_copy_exespace_impl(
+ const std::string& label, const ExecutionSpace& ex, InputIterator first,
+ InputIterator last, OutputIterator d_first, PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // branch for trivial vs non trivial case
+ const auto num_elements = Kokkos::Experimental::distance(first, last);
+ if (num_elements == 0) {
+ return d_first;
+ } else if (num_elements == 1) {
+ return Impl::copy_exespace_impl("Kokkos::copy_from_unique_copy", ex, first,
+ last, d_first);
+ } else {
+ // note here that we run scan for num_elements - 1
+ // because of the way we implement this, the last element is always needed.
+ // We avoid performing checks inside functor that we are within limits
+ // and run a "safe" scan and then copy the last element.
+ const auto scan_size = num_elements - 1;
+ std::size_t count = 0;
+ ::Kokkos::parallel_scan(
+ label, RangePolicy<ExecutionSpace>(ex, 0, scan_size),
+ // use CTAD
+ StdUniqueCopyFunctor(first, last, d_first, pred), count);
+
+ return Impl::copy_exespace_impl("Kokkos::copy_from_unique_copy", ex,
+ first + scan_size, last, d_first + count);
+ }
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator unique_copy_exespace_impl(const std::string& label,
+ const ExecutionSpace& ex,
+ InputIterator first,
+ InputIterator last,
+ OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // aliases
+ using value_type1 = typename InputIterator::value_type;
+ using value_type2 = typename OutputIterator::value_type;
+ using binary_pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+
+ // run
+ return unique_copy_exespace_impl(label, ex, first, last, d_first,
+ binary_pred_t());
+}
+
+//
+// team level
+//
+
+template <class TeamHandleType, class InputIterator, class OutputIterator,
+ class PredicateType>
+KOKKOS_FUNCTION OutputIterator unique_copy_team_impl(
+ const TeamHandleType& teamHandle, InputIterator first, InputIterator last,
+ OutputIterator d_first, PredicateType pred) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // branch for trivial vs non trivial case
+ const std::size_t num_elements = Kokkos::Experimental::distance(first, last);
+ if (num_elements == 0) {
+ return d_first;
+ } else if (num_elements == 1) {
+ d_first[0] = first[0];
+ return d_first + 1;
+ }
+
+ else {
+ if constexpr (stdalgo_must_use_kokkos_single_for_team_scan_v<
+ typename TeamHandleType::execution_space>) {
+ std::size_t count = 0;
+ Kokkos::single(
+ Kokkos::PerTeam(teamHandle),
+ [=](std::size_t& lcount) {
+ lcount = 0;
+ for (std::size_t i = 0; i < num_elements - 1; ++i) {
+ const auto& val_i = first[i];
+ const auto& val_ip1 = first[i + 1];
+ if (!pred(val_i, val_ip1)) {
+ d_first[lcount++] = val_i;
+ }
+ }
+ // we need to copy the last element always
+ d_first[lcount++] = first[num_elements - 1];
+ },
+ count);
+ // no barrier needed since single above broadcasts to all members
+
+ return d_first + count;
+ } else {
+ const auto scan_size = num_elements - 1;
+ std::size_t count = 0;
+ ::Kokkos::parallel_scan(TeamThreadRange(teamHandle, 0, scan_size),
+ StdUniqueCopyFunctor(first, last, d_first, pred),
+ count);
+ // no barrier needed since reducing into count
+
+ return Impl::copy_team_impl(teamHandle, first + scan_size, last,
+ d_first + count);
+ }
+
+#if defined KOKKOS_COMPILER_INTEL || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+ }
+}
+
+template <class TeamHandleType, class InputIterator, class OutputIterator>
+KOKKOS_FUNCTION OutputIterator
+unique_copy_team_impl(const TeamHandleType& teamHandle, InputIterator first,
+ InputIterator last, OutputIterator d_first) {
+ // checks
+ Impl::static_assert_random_access_and_accessible(teamHandle, first, d_first);
+ Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+ Impl::expect_valid_range(first, last);
+
+ // aliases
+ using value_type1 = typename InputIterator::value_type;
+ using value_type2 = typename OutputIterator::value_type;
+
+ // default binary predicate uses ==
+ using binary_pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+
+ // run
+ return unique_copy_team_impl(teamHandle, first, last, d_first,
+ binary_pred_t());
+}
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_STD_ALGORITHMS_VALUE_WRAPPER_FOR_NO_NEUTRAL_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_VALUE_WRAPPER_FOR_NO_NEUTRAL_ELEMENT_HPP
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+//
+// scalar wrapper used for reductions and scans
+// when we don't have neutral element
+//
+template <class Scalar>
+struct ValueWrapperForNoNeutralElement {
+ Scalar val;
+ bool is_initial = true;
+};
+
+} // namespace Impl
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+if(NOT Kokkos_INSTALL_TESTING)
+ add_subdirectory(src)
+endif()
+
+# FIXME_OPENACC: temporarily disabled due to unimplemented features
+if(NOT KOKKOS_ENABLE_OPENACC)
+ kokkos_add_test_directories(unit_tests)
+ kokkos_add_test_directories(performance_tests)
+endif()
--- /dev/null
+#need these here for now
+kokkos_include_directories(${CMAKE_CURRENT_BINARY_DIR})
+kokkos_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+#-----------------------------------------------------------------------------
+
+set(KOKKOS_CONTAINERS_SRCS)
+append_glob(KOKKOS_CONTAINERS_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/impl/*.cpp)
+set(KOKKOS_CONTAINER_HEADERS)
+append_glob(KOKKOS_CONTAINERS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/impl/*.hpp)
+append_glob(KOKKOS_CONTAINERS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
+
+install(
+ DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/"
+ DESTINATION ${KOKKOS_HEADER_DIR}
+ FILES_MATCHING
+ PATTERN "*.hpp"
+)
+
+kokkos_add_library(kokkoscontainers SOURCES ${KOKKOS_CONTAINERS_SRCS} HEADERS ${KOKKOS_CONTAINERS_HEADERS})
+
+kokkos_lib_include_directories(
+ kokkoscontainers ${KOKKOS_TOP_BUILD_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
+)
+kokkos_link_internal_library(kokkoscontainers kokkoscore)
+
+#-----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_BITSET_HPP
#define KOKKOS_BITSET_HPP
block_shift = Kokkos::Impl::integral_power_of_two(block_size)
};
+ //! Type of @ref m_blocks.
+ using block_view_type = View<unsigned*, Device, MemoryTraits<RandomAccess>>;
+
public:
- /// constructor
+ Bitset() = default;
+
/// arg_size := number of bit in set
- Bitset(unsigned arg_size = 0u)
- : m_size(arg_size),
- m_last_block_mask(0u),
- m_blocks("Bitset", ((m_size + block_mask) >> block_shift)) {
+ Bitset(unsigned arg_size) : Bitset(Kokkos::view_alloc(), arg_size) {}
+
+ template <class... P>
+ Bitset(const Impl::ViewCtorProp<P...>& arg_prop, unsigned arg_size)
+ : m_size(arg_size), m_last_block_mask(0u) {
+ //! Ensure that allocation properties are consistent.
+ using alloc_prop_t = std::decay_t<decltype(arg_prop)>;
+ static_assert(alloc_prop_t::initialize,
+ "Allocation property 'initialize' should be true.");
+ static_assert(
+ !alloc_prop_t::has_pointer,
+ "Allocation properties should not contain the 'pointer' property.");
+
+ //! Update 'label' property and allocate.
+ const auto prop_copy =
+ Impl::with_properties_if_unset(arg_prop, std::string("Bitset"));
+ m_blocks =
+ block_view_type(prop_copy, ((m_size + block_mask) >> block_shift));
+
for (int i = 0, end = static_cast<int>(m_size & block_mask); i < end; ++i) {
m_last_block_mask |= 1u << i;
}
/// number of bits which are set to 1
/// can only be called from the host
unsigned count() const {
- Impl::BitsetCount<Bitset<Device> > f(*this);
+ Impl::BitsetCount<Bitset<Device>> f(*this);
return f.apply();
}
offset = !(scan_direction & BIT_SCAN_REVERSE)
? offset
: (offset + block_mask) & block_mask;
- block = Impl::rotate_right(block, offset);
+ block = Impl::rotate_right(block, offset);
return (((!(scan_direction & BIT_SCAN_REVERSE)
? Impl::bit_scan_forward(block)
: Impl::int_log2(block)) +
}
private:
- unsigned m_size;
- unsigned m_last_block_mask;
- View<unsigned*, Device, MemoryTraits<RandomAccess> > m_blocks;
+ unsigned m_size = 0;
+ unsigned m_last_block_mask = 0;
+ block_view_type m_blocks;
private:
template <typename DDevice>
public:
using execution_space = typename Device::execution_space;
using size_type = unsigned int;
+ using block_view_type = typename Bitset<Device>::block_view_type::const_type;
private:
enum { block_size = static_cast<unsigned>(sizeof(unsigned) * CHAR_BIT) };
unsigned size() const { return m_size; }
unsigned count() const {
- Impl::BitsetCount<ConstBitset<Device> > f(*this);
+ Impl::BitsetCount<ConstBitset<Device>> f(*this);
return f.apply();
}
private:
unsigned m_size;
- View<const unsigned*, Device, MemoryTraits<RandomAccess> > m_blocks;
+ block_view_type m_blocks;
private:
template <typename DDevice>
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
/// \file Kokkos_DualView.hpp
/// \brief Declaration and definition of Kokkos::DualView.
#endif // KOKKOS_ENABLE_CUDA
} // namespace Impl
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
template <class DataType, class Arg1Type = void, class Arg2Type = void,
class Arg3Type = void>
+class DualView;
+#else
+template <class DataType, class... Properties>
+class DualView;
+#endif
+
+template <class>
+struct is_dual_view : public std::false_type {};
+
+template <class DT, class... DP>
+struct is_dual_view<DualView<DT, DP...>> : public std::true_type {};
+
+template <class DT, class... DP>
+struct is_dual_view<const DualView<DT, DP...>> : public std::true_type {};
+
+template <class T>
+inline constexpr bool is_dual_view_v = is_dual_view<T>::value;
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+template <class DataType, class Arg1Type, class Arg2Type, class Arg3Type>
class DualView : public ViewTraits<DataType, Arg1Type, Arg2Type, Arg3Type> {
template <class, class, class, class>
+#else
+template <class DataType, class... Properties>
+class DualView : public ViewTraits<DataType, Properties...> {
+ template <class, class...>
+#endif
friend class DualView;
public:
//! \name Typedefs for device types and various Kokkos::View specializations.
//@{
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
using traits = ViewTraits<DataType, Arg1Type, Arg2Type, Arg3Type>;
+#else
+ using traits = ViewTraits<DataType, Properties...>;
+#endif
//! The Kokkos Host Device type;
using host_mirror_space = typename traits::host_mirror_space;
//! The type of a Kokkos::View on the device.
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
using t_dev = View<typename traits::data_type, Arg1Type, Arg2Type, Arg3Type>;
+#else
+ using t_dev = View<typename traits::data_type, Properties...>;
+#endif
/// \typedef t_host
/// \brief The type of a Kokkos::View host mirror of \c t_dev.
//! The type of a const View on the device.
//! The type of a Kokkos::View on the device.
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
using t_dev_const =
View<typename traits::const_data_type, Arg1Type, Arg2Type, Arg3Type>;
+#else
+ using t_dev_const = View<typename traits::const_data_type, Properties...>;
+#endif
/// \typedef t_host_const
/// \brief The type of a const View host mirror of \c t_dev_const.
const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
- : modified_flags(t_modified_flags("DualView::modified_flags")),
- d_view(arg_prop, n0, n1, n2, n3, n4, n5, n6, n7) {
- // without UVM, host View mirrors
- if (Kokkos::Impl::has_type<Impl::WithoutInitializing_t, P...>::value)
- h_view = Kokkos::create_mirror_view(Kokkos::WithoutInitializing, d_view);
- else
- h_view = Kokkos::create_mirror_view(d_view);
+ : modified_flags(t_modified_flags("DualView::modified_flags")) {
+ if constexpr (Impl::ViewCtorProp<P...>::sequential_host_init) {
+ h_view = t_host(arg_prop, n0, n1, n2, n3, n4, n5, n6, n7);
+ static_assert(Impl::ViewCtorProp<P...>::initialize,
+ "DualView: SequentialHostInit isn't compatible with "
+ "WithoutInitializing!");
+ static_assert(!Impl::ViewCtorProp<P...>::has_execution_space,
+ "DualView: SequentialHostInit isn't compatible with "
+ "providing an execution space instance!");
+
+ d_view = Kokkos::create_mirror_view_and_copy(
+ typename traits::memory_space{}, h_view);
+ } else {
+ d_view = t_dev(arg_prop, n0, n1, n2, n3, n4, n5, n6, n7);
+
+ // without UVM, host View mirrors
+ if constexpr (Kokkos::Impl::has_type<Impl::WithoutInitializing_t,
+ P...>::value)
+ h_view =
+ Kokkos::create_mirror_view(Kokkos::WithoutInitializing, d_view);
+ else
+ h_view = Kokkos::create_mirror_view(d_view);
+ }
}
//! Copy constructor (shallow copy)
- template <class SS, class LS, class DS, class MS>
- DualView(const DualView<SS, LS, DS, MS>& src)
+ template <typename DT, typename... DP>
+ DualView(const DualView<DT, DP...>& src)
: modified_flags(src.modified_flags),
d_view(src.d_view),
h_view(src.h_view) {}
//! Subview constructor
- template <class SD, class S1, class S2, class S3, class Arg0, class... Args>
- DualView(const DualView<SD, S1, S2, S3>& src, const Arg0& arg0, Args... args)
+ template <class DT, class... DP, class Arg0, class... Args>
+ DualView(const DualView<DT, DP...>& src, const Arg0& arg0, Args... args)
: modified_flags(src.modified_flags),
d_view(Kokkos::subview(src.d_view, arg0, args...)),
h_view(Kokkos::subview(src.h_view, arg0, args...)) {}
// does the DualView have only one device
struct impl_dualview_is_single_device {
enum : bool {
- value = std::is_same<typename t_dev::device_type,
- typename t_host::device_type>::value
+ value = std::is_same_v<typename t_dev::device_type,
+ typename t_host::device_type>
};
};
// does the given device match the device of t_dev?
template <typename Device>
struct impl_device_matches_tdev_device {
- enum : bool {
- value = std::is_same<typename t_dev::device_type, Device>::value
- };
+ enum : bool { value = std::is_same_v<typename t_dev::device_type, Device> };
};
// does the given device match the device of t_host?
template <typename Device>
struct impl_device_matches_thost_device {
enum : bool {
- value = std::is_same<typename t_host::device_type, Device>::value
+ value = std::is_same_v<typename t_host::device_type, Device>
};
};
template <typename Device>
struct impl_device_matches_thost_exec {
enum : bool {
- value = std::is_same<typename t_host::execution_space, Device>::value
+ value = std::is_same_v<typename t_host::execution_space, Device>
};
};
template <typename Device>
struct impl_device_matches_tdev_exec {
enum : bool {
- value = std::is_same<typename t_dev::execution_space, Device>::value
+ value = std::is_same_v<typename t_dev::execution_space, Device>
};
};
template <typename Device>
struct impl_device_matches_tdev_memory_space {
enum : bool {
- value = std::is_same<typename t_dev::memory_space,
- typename Device::memory_space>::value
+ value = std::is_same_v<typename t_dev::memory_space,
+ typename Device::memory_space>
};
};
/// \brief Return a View on a specific device \c Device.
///
- /// Please don't be afraid of the nested if_c expressions in the return
- /// value's type. That just tells the method what the return type
- /// should be: t_dev if the \c Device template parameter matches
- /// this DualView's device type, else t_host.
- ///
/// For example, suppose you create a DualView on Cuda, like this:
/// \code
/// using dual_view_type =
/// typename dual_view_type::t_host hostView = DV.view<host_device_type> ();
/// \endcode
template <class Device>
- KOKKOS_INLINE_FUNCTION const typename std::conditional_t<
- impl_device_matches_tdev_device<Device>::value, t_dev,
- typename std::conditional_t<
- impl_device_matches_thost_device<Device>::value, t_host,
- typename std::conditional_t<
- impl_device_matches_thost_exec<Device>::value, t_host,
- typename std::conditional_t<
- impl_device_matches_tdev_exec<Device>::value, t_dev,
- typename std::conditional_t<
- impl_device_matches_tdev_memory_space<Device>::value,
- t_dev, t_host>>>>>
- view() const {
- constexpr bool device_is_memspace =
- std::is_same<Device, typename Device::memory_space>::value;
- constexpr bool device_is_execspace =
- std::is_same<Device, typename Device::execution_space>::value;
- constexpr bool device_exec_is_t_dev_exec =
- std::is_same<typename Device::execution_space,
- typename t_dev::execution_space>::value;
- constexpr bool device_mem_is_t_dev_mem =
- std::is_same<typename Device::memory_space,
- typename t_dev::memory_space>::value;
- constexpr bool device_exec_is_t_host_exec =
- std::is_same<typename Device::execution_space,
- typename t_host::execution_space>::value;
- constexpr bool device_mem_is_t_host_mem =
- std::is_same<typename Device::memory_space,
- typename t_host::memory_space>::value;
- constexpr bool device_is_t_host_device =
- std::is_same<typename Device::execution_space,
- typename t_host::device_type>::value;
- constexpr bool device_is_t_dev_device =
- std::is_same<typename Device::memory_space,
- typename t_host::device_type>::value;
-
- static_assert(
- device_is_t_dev_device || device_is_t_host_device ||
- (device_is_memspace &&
- (device_mem_is_t_dev_mem || device_mem_is_t_host_mem)) ||
- (device_is_execspace &&
- (device_exec_is_t_dev_exec || device_exec_is_t_host_exec)) ||
- ((!device_is_execspace && !device_is_memspace) &&
- ((device_mem_is_t_dev_mem || device_mem_is_t_host_mem) ||
- (device_exec_is_t_dev_exec || device_exec_is_t_host_exec))),
- "Template parameter to .view() must exactly match one of the "
- "DualView's device types or one of the execution or memory spaces");
-
- return Impl::if_c<std::is_same<typename t_dev::memory_space,
- typename Device::memory_space>::value,
- t_dev, t_host>::select(d_view, h_view);
+ KOKKOS_FUNCTION auto view() const {
+ if constexpr (std::is_same_v<Device, typename Device::memory_space>) {
+ if constexpr (std::is_same_v<typename Device::memory_space,
+ typename t_dev::memory_space>) {
+ return d_view;
+ } else {
+ static_assert(std::is_same_v<typename Device::memory_space,
+ typename t_host::memory_space>,
+ "The template argument is a memory space but doesn't "
+ "match either of DualView's memory spaces!");
+ return h_view;
+ }
+ } else {
+ if constexpr (std::is_same_v<Device, typename Device::execution_space>) {
+ if constexpr (std::is_same_v<typename Device::execution_space,
+ typename t_dev::execution_space>) {
+ return d_view;
+ } else {
+ static_assert(std::is_same_v<typename Device::execution_space,
+ typename t_host::execution_space>,
+ "The template argument is an execution space but "
+ "doesn't match either of DualView's execution spaces!");
+ return h_view;
+ }
+ } else {
+ static_assert(std::is_same_v<Device, typename Device::device_type>,
+ "The template argument is neither a memory space, "
+ "execution space, or device!");
+ if constexpr (std::is_same_v<Device, typename t_dev::device_type>)
+ return d_view;
+ else {
+ static_assert(std::is_same_v<Device, typename t_host::device_type>,
+ "The template argument is a device but "
+ "doesn't match either of DualView's devices!");
+ return h_view;
+ }
+ }
+ }
+#ifdef KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
}
KOKKOS_INLINE_FUNCTION
template <class Device>
static int get_device_side() {
constexpr bool device_is_memspace =
- std::is_same<Device, typename Device::memory_space>::value;
+ std::is_same_v<Device, typename Device::memory_space>;
constexpr bool device_is_execspace =
- std::is_same<Device, typename Device::execution_space>::value;
+ std::is_same_v<Device, typename Device::execution_space>;
constexpr bool device_exec_is_t_dev_exec =
- std::is_same<typename Device::execution_space,
- typename t_dev::execution_space>::value;
+ std::is_same_v<typename Device::execution_space,
+ typename t_dev::execution_space>;
constexpr bool device_mem_is_t_dev_mem =
- std::is_same<typename Device::memory_space,
- typename t_dev::memory_space>::value;
+ std::is_same_v<typename Device::memory_space,
+ typename t_dev::memory_space>;
constexpr bool device_exec_is_t_host_exec =
- std::is_same<typename Device::execution_space,
- typename t_host::execution_space>::value;
+ std::is_same_v<typename Device::execution_space,
+ typename t_host::execution_space>;
constexpr bool device_mem_is_t_host_mem =
- std::is_same<typename Device::memory_space,
- typename t_host::memory_space>::value;
+ std::is_same_v<typename Device::memory_space,
+ typename t_host::memory_space>;
constexpr bool device_is_t_host_device =
- std::is_same<typename Device::execution_space,
- typename t_host::device_type>::value;
+ std::is_same_v<typename Device::execution_space,
+ typename t_host::device_type>;
constexpr bool device_is_t_dev_device =
- std::is_same<typename Device::memory_space,
- typename t_host::device_type>::value;
+ std::is_same_v<typename Device::memory_space,
+ typename t_host::device_type>;
static_assert(
device_is_t_dev_device || device_is_t_host_device ||
impl_report_host_sync();
}
}
- if (std::is_same<typename t_host::memory_space,
- typename t_dev::memory_space>::value) {
+ if constexpr (std::is_same<typename t_host::memory_space,
+ typename t_dev::memory_space>::value) {
typename t_dev::execution_space().fence(
"Kokkos::DualView<>::sync: fence after syncing DualView");
typename t_host::execution_space().fence(
template <class Device>
void sync(const std::enable_if_t<
- (std::is_same<typename traits::data_type,
- typename traits::non_const_data_type>::value) ||
- (std::is_same<Device, int>::value),
+ (std::is_same_v<typename traits::data_type,
+ typename traits::non_const_data_type>) ||
+ (std::is_same_v<Device, int>),
int>& = 0) {
sync_impl<Device>(std::true_type{});
}
template <class Device, class ExecutionSpace>
void sync(const ExecutionSpace& exec,
const std::enable_if_t<
- (std::is_same<typename traits::data_type,
- typename traits::non_const_data_type>::value) ||
- (std::is_same<Device, int>::value),
+ (std::is_same_v<typename traits::data_type,
+ typename traits::non_const_data_type>) ||
+ (std::is_same_v<Device, int>),
int>& = 0) {
sync_impl<Device>(std::true_type{}, exec);
}
template <class Device>
void sync(const std::enable_if_t<
- (!std::is_same<typename traits::data_type,
- typename traits::non_const_data_type>::value) ||
- (std::is_same<Device, int>::value),
+ (!std::is_same_v<typename traits::data_type,
+ typename traits::non_const_data_type>) ||
+ (std::is_same_v<Device, int>),
int>& = 0) {
sync_impl<Device>(std::false_type{});
}
template <class Device, class ExecutionSpace>
void sync(const ExecutionSpace& exec,
const std::enable_if_t<
- (!std::is_same<typename traits::data_type,
- typename traits::non_const_data_type>::value) ||
- (std::is_same<Device, int>::value),
+ (!std::is_same_v<typename traits::data_type,
+ typename traits::non_const_data_type>) ||
+ (std::is_same_v<Device, int>),
int>& = 0) {
sync_impl<Device>(std::false_type{}, exec);
}
Impl::size_mismatch(h_view, h_view.rank_dynamic, new_extents);
if (sizeMismatch) {
- ::Kokkos::realloc(arg_prop, d_view, n0, n1, n2, n3, n4, n5, n6, n7);
- if (alloc_prop_input::initialize) {
- h_view = create_mirror_view(typename t_host::memory_space(), d_view);
+ if constexpr (alloc_prop_input::sequential_host_init) {
+ static_assert(alloc_prop_input::initialize,
+ "DualView: SequentialHostInit isn't compatible with "
+ "WithoutInitializing!");
+ ::Kokkos::realloc(arg_prop, h_view, n0, n1, n2, n3, n4, n5, n6, n7);
+ d_view =
+ create_mirror_view_and_copy(typename t_dev::memory_space(), h_view);
} else {
- h_view = create_mirror_view(Kokkos::WithoutInitializing,
- typename t_host::memory_space(), d_view);
+ ::Kokkos::realloc(arg_prop, d_view, n0, n1, n2, n3, n4, n5, n6, n7);
+ if constexpr (alloc_prop_input::initialize) {
+ h_view = create_mirror_view(typename t_host::memory_space(), d_view);
+ } else {
+ h_view = create_mirror_view(Kokkos::WithoutInitializing,
+ typename t_host::memory_space(), d_view);
+ }
}
- } else if (alloc_prop_input::initialize) {
- if (alloc_prop_input::has_execution_space) {
- // Add execution_space if not provided to avoid need for if constexpr
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs...,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 2>,
- typename t_dev::execution_space>>;
- alloc_prop arg_prop_copy(arg_prop);
- using execution_space_type = typename alloc_prop::execution_space;
- const execution_space_type& exec_space =
- static_cast<
- Kokkos::Impl::ViewCtorProp<void, execution_space_type> const&>(
- arg_prop_copy)
- .value;
+ } else if constexpr (alloc_prop_input::initialize) {
+ if constexpr (alloc_prop_input::has_execution_space) {
+ const auto& exec_space =
+ Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
::Kokkos::deep_copy(exec_space, d_view, typename t_dev::value_type{});
} else
::Kokkos::deep_copy(d_view, typename t_dev::value_type{});
if (modified_flags.data() == nullptr) {
modified_flags = t_modified_flags("DualView::modified_flags");
}
- if (modified_flags(1) >= modified_flags(0)) {
+
+ [[maybe_unused]] auto resize_on_device = [&](const auto& properties) {
/* Resize on Device */
if (sizeMismatch) {
- ::Kokkos::resize(arg_prop, d_view, n0, n1, n2, n3, n4, n5, n6, n7);
- if (alloc_prop_input::initialize) {
- h_view = create_mirror_view(typename t_host::memory_space(), d_view);
- } else {
- h_view = create_mirror_view(Kokkos::WithoutInitializing,
- typename t_host::memory_space(), d_view);
- }
+ ::Kokkos::resize(properties, d_view, n0, n1, n2, n3, n4, n5, n6, n7);
+ // this part of the lambda was relocated in a method as it contains a
+ // `if constexpr`. In some cases, both branches were evaluated
+ // leading to a compile error
+ resync_host(properties);
/* Mark Device copy as modified */
++modified_flags(1);
}
- } else {
+ };
+
+ [[maybe_unused]] auto resize_on_host = [&](const auto& properties) {
/* Resize on Host */
if (sizeMismatch) {
- ::Kokkos::resize(arg_prop, h_view, n0, n1, n2, n3, n4, n5, n6, n7);
- if (alloc_prop_input::initialize) {
- d_view = create_mirror_view(typename t_dev::memory_space(), h_view);
-
- } else {
- d_view = create_mirror_view(Kokkos::WithoutInitializing,
- typename t_dev::memory_space(), h_view);
- }
+ ::Kokkos::resize(properties, h_view, n0, n1, n2, n3, n4, n5, n6, n7);
+ // this part of the lambda was relocated in a method as it contains a
+ // `if constexpr`. In some cases, both branches were evaluated
+ // leading to a compile error
+ resync_device(properties);
/* Mark Host copy as modified */
++modified_flags(0);
}
+ };
+
+ if constexpr (alloc_prop_input::sequential_host_init) {
+ static_assert(alloc_prop_input::initialize,
+ "DualView: SequentialHostInit isn't compatible with "
+ "WithoutInitializing!");
+ static_assert(!alloc_prop_input::has_execution_space,
+ "DualView: SequentialHostInit isn't compatible with "
+ "providing an execution space instance!");
+
+ if (sizeMismatch) {
+ sync<typename t_host::memory_space>();
+ ::Kokkos::resize(arg_prop, h_view, n0, n1, n2, n3, n4, n5, n6, n7);
+ d_view =
+ create_mirror_view_and_copy(typename t_dev::memory_space(), h_view);
+ }
+ return;
+ } else if constexpr (alloc_prop_input::has_execution_space) {
+ using ExecSpace = typename alloc_prop_input::execution_space;
+ const auto& exec_space =
+ Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
+ constexpr bool exec_space_can_access_device =
+ SpaceAccessibility<ExecSpace,
+ typename t_dev::memory_space>::accessible;
+ constexpr bool exec_space_can_access_host =
+ SpaceAccessibility<ExecSpace,
+ typename t_host::memory_space>::accessible;
+ static_assert(exec_space_can_access_device || exec_space_can_access_host);
+ if constexpr (exec_space_can_access_device) {
+ sync<typename t_dev::memory_space>(exec_space);
+ resize_on_device(arg_prop);
+ return;
+ }
+ if constexpr (exec_space_can_access_host) {
+ sync<typename t_host::memory_space>(exec_space);
+ resize_on_host(arg_prop);
+ return;
+ }
+ } else {
+ if (modified_flags(1) >= modified_flags(0)) {
+ resize_on_device(arg_prop);
+ } else {
+ resize_on_host(arg_prop);
+ }
+ }
+ }
+
+ private:
+ // resync host mirror from device
+ // this code was relocated from a lambda as it contains a `if constexpr`.
+ // In some cases, both branches were evaluated, leading to a compile error
+ template <class... ViewCtorArgs>
+ inline void resync_host(Impl::ViewCtorProp<ViewCtorArgs...> const&) {
+ using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+ if constexpr (alloc_prop_input::initialize) {
+ h_view = create_mirror_view(typename t_host::memory_space(), d_view);
+ } else {
+ h_view = create_mirror_view(Kokkos::WithoutInitializing,
+ typename t_host::memory_space(), d_view);
+ }
+ }
+
+ // resync device mirror from host
+ // this code was relocated from a lambda as it contains a `if constexpr`
+ // In some cases, both branches were evaluated leading to a compile error
+ template <class... ViewCtorArgs>
+ inline void resync_device(Impl::ViewCtorProp<ViewCtorArgs...> const&) {
+ using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+ if constexpr (alloc_prop_input::initialize) {
+ d_view = create_mirror_view(typename t_dev::memory_space(), h_view);
+
+ } else {
+ d_view = create_mirror_view(Kokkos::WithoutInitializing,
+ typename t_dev::memory_space(), h_view);
}
}
+ public:
void resize(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
}
template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, size_t>
+ KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<std::is_integral_v<iType>,
+ size_t>
extent(const iType& r) const {
return d_view.extent(r);
}
template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, int>
+ KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<std::is_integral_v<iType>,
+ int>
extent_int(const iType& r) const {
return static_cast<int>(d_view.extent(r));
}
namespace Kokkos {
namespace Impl {
-template <class D, class A1, class A2, class A3, class... Args>
-struct DualViewSubview {
- using dst_traits = typename Kokkos::Impl::ViewMapping<
- void, Kokkos::ViewTraits<D, A1, A2, A3>, Args...>::traits_type;
+template <class V>
+struct V2DV;
- using type = Kokkos::DualView<
- typename dst_traits::data_type, typename dst_traits::array_layout,
- typename dst_traits::device_type, typename dst_traits::memory_traits>;
+template <class D, class... P>
+struct V2DV<View<D, P...>> {
+ using type = DualView<D, P...>;
};
-
} /* namespace Impl */
-template <class D, class A1, class A2, class A3, class... Args>
-typename Impl::DualViewSubview<D, A1, A2, A3, Args...>::type subview(
- const DualView<D, A1, A2, A3>& src, Args... args) {
- return typename Impl::DualViewSubview<D, A1, A2, A3, Args...>::type(src,
- args...);
+template <class DataType, class... Properties, class... Args>
+auto subview(const DualView<DataType, Properties...>& src, Args&&... args) {
+ // leverage Kokkos::View facilities to deduce the properties of the subview
+ using deduce_subview_type =
+ decltype(subview(std::declval<View<DataType, Properties...>>(),
+ std::forward<Args>(args)...));
+ // map it back to dual view
+ return typename Impl::V2DV<deduce_subview_type>::type(
+ src, std::forward<Args>(args)...);
}
} /* namespace Kokkos */
// Partial specialization of Kokkos::deep_copy() for DualView objects.
//
-template <class DT, class DL, class DD, class DM, class ST, class SL, class SD,
- class SM>
-void deep_copy(
- DualView<DT, DL, DD, DM> dst, // trust me, this must not be a reference
- const DualView<ST, SL, SD, SM>& src) {
+template <class DT, class... DP, class ST, class... SP>
+void deep_copy(DualView<DT, DP...>& dst, const DualView<ST, SP...>& src) {
if (src.need_sync_device()) {
deep_copy(dst.h_view, src.h_view);
dst.modify_host();
}
}
-template <class ExecutionSpace, class DT, class DL, class DD, class DM,
- class ST, class SL, class SD, class SM>
-void deep_copy(
- const ExecutionSpace& exec,
- DualView<DT, DL, DD, DM> dst, // trust me, this must not be a reference
- const DualView<ST, SL, SD, SM>& src) {
+template <class ExecutionSpace, class DT, class... DP, class ST, class... SP>
+void deep_copy(const ExecutionSpace& exec, DualView<DT, DP...>& dst,
+ const DualView<ST, SP...>& src) {
if (src.need_sync_device()) {
deep_copy(exec, dst.h_view, src.h_view);
dst.modify_host();
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+/// \file Kokkos_DynRankView.hpp
+/// \brief Declaration and definition of Kokkos::DynRankView.
+///
+/// This header file declares and defines Kokkos::DynRankView and its
+/// related nonmember functions.
+
+#ifndef KOKKOS_DYNRANKVIEW_HPP
+#define KOKKOS_DYNRANKVIEW_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <type_traits>
+
+namespace Kokkos {
+
+template <typename DataType, class... Properties>
+class DynRankView; // forward declare
+
+namespace Impl {
+
+template <class T, size_t Rank>
+struct ViewDataTypeFromRank {
+ using type = typename ViewDataTypeFromRank<T, Rank - 1>::type*;
+};
+
+template <class T>
+struct ViewDataTypeFromRank<T, 0> {
+ using type = T;
+};
+
+template <unsigned N, typename T, typename... Args>
+KOKKOS_FUNCTION View<typename ViewDataTypeFromRank<T, N>::type, Args...>
+as_view_of_rank_n(
+ DynRankView<T, Args...> v,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<T, Args...>::specialize,
+ void>>* = nullptr);
+
+template <typename Specialize>
+struct DynRankDimTraits {
+ enum : size_t { unspecified = KOKKOS_INVALID_INDEX };
+
+ // Compute the rank of the view from the nonzero dimension arguments.
+ KOKKOS_INLINE_FUNCTION
+ static size_t computeRank(const size_t N0, const size_t N1, const size_t N2,
+ const size_t N3, const size_t N4, const size_t N5,
+ const size_t N6, const size_t /* N7 */) {
+ return (
+ (N6 == unspecified && N5 == unspecified && N4 == unspecified &&
+ N3 == unspecified && N2 == unspecified && N1 == unspecified &&
+ N0 == unspecified)
+ ? 0
+ : ((N6 == unspecified && N5 == unspecified && N4 == unspecified &&
+ N3 == unspecified && N2 == unspecified && N1 == unspecified)
+ ? 1
+ : ((N6 == unspecified && N5 == unspecified &&
+ N4 == unspecified && N3 == unspecified &&
+ N2 == unspecified)
+ ? 2
+ : ((N6 == unspecified && N5 == unspecified &&
+ N4 == unspecified && N3 == unspecified)
+ ? 3
+ : ((N6 == unspecified && N5 == unspecified &&
+ N4 == unspecified)
+ ? 4
+ : ((N6 == unspecified &&
+ N5 == unspecified)
+ ? 5
+ : ((N6 == unspecified)
+ ? 6
+ : 7)))))));
+ }
+
+ // Compute the rank of the view from the nonzero layout arguments.
+ template <typename Layout>
+ KOKKOS_INLINE_FUNCTION static size_t computeRank(const Layout& layout) {
+ return computeRank(layout.dimension[0], layout.dimension[1],
+ layout.dimension[2], layout.dimension[3],
+ layout.dimension[4], layout.dimension[5],
+ layout.dimension[6], layout.dimension[7]);
+ }
+
+ // Extra overload to match that for specialize types v2
+ template <typename Layout, typename... P>
+ KOKKOS_INLINE_FUNCTION static size_t computeRank(
+ const Kokkos::Impl::ViewCtorProp<P...>& /* prop */,
+ const Layout& layout) {
+ return computeRank(layout);
+ }
+
+ // Create the layout for the rank-7 view.
+ // Because the underlying View is rank-7, preserve "unspecified" for
+ // dimension 8.
+
+ // Non-strided Layout
+ template <typename Layout>
+ KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+ (std::is_same_v<Layout, Kokkos::LayoutRight> ||
+ std::is_same_v<Layout, Kokkos::LayoutLeft>),
+ Layout>
+ createLayout(const Layout& layout) {
+ Layout new_layout(
+ layout.dimension[0] != unspecified ? layout.dimension[0] : 1,
+ layout.dimension[1] != unspecified ? layout.dimension[1] : 1,
+ layout.dimension[2] != unspecified ? layout.dimension[2] : 1,
+ layout.dimension[3] != unspecified ? layout.dimension[3] : 1,
+ layout.dimension[4] != unspecified ? layout.dimension[4] : 1,
+ layout.dimension[5] != unspecified ? layout.dimension[5] : 1,
+ layout.dimension[6] != unspecified ? layout.dimension[6] : 1,
+ layout.dimension[7] != unspecified ? layout.dimension[7] : unspecified);
+ new_layout.stride = layout.stride;
+ return new_layout;
+ }
+
+ // LayoutStride
+ template <typename Layout>
+ KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+ (std::is_same_v<Layout, Kokkos::LayoutStride>), Layout>
+ createLayout(const Layout& layout) {
+ return Layout(
+ layout.dimension[0] != unspecified ? layout.dimension[0] : 1,
+ layout.stride[0],
+ layout.dimension[1] != unspecified ? layout.dimension[1] : 1,
+ layout.stride[1],
+ layout.dimension[2] != unspecified ? layout.dimension[2] : 1,
+ layout.stride[2],
+ layout.dimension[3] != unspecified ? layout.dimension[3] : 1,
+ layout.stride[3],
+ layout.dimension[4] != unspecified ? layout.dimension[4] : 1,
+ layout.stride[4],
+ layout.dimension[5] != unspecified ? layout.dimension[5] : 1,
+ layout.stride[5],
+ layout.dimension[6] != unspecified ? layout.dimension[6] : 1,
+ layout.stride[6],
+ layout.dimension[7] != unspecified ? layout.dimension[7] : unspecified,
+ layout.stride[7]);
+ }
+
+ // Extra overload to match that for specialize types
+ template <typename Traits, typename... P>
+ KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+ (std::is_same_v<typename Traits::array_layout, Kokkos::LayoutRight> ||
+ std::is_same_v<typename Traits::array_layout, Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Traits::array_layout, Kokkos::LayoutStride>),
+ typename Traits::array_layout>
+ createLayout(const Kokkos::Impl::ViewCtorProp<P...>& /* prop */,
+ const typename Traits::array_layout& layout) {
+ return createLayout(layout);
+ }
+
+ // Create a view from the given dimension arguments.
+ // This is only necessary because the shmem constructor doesn't take a layout.
+ // NDE shmem View's are not compatible with the added view_alloc value_type
+ // / fad_dim deduction functionality
+ template <typename ViewType, typename ViewArg>
+ static ViewType createView(const ViewArg& arg, const size_t N0,
+ const size_t N1, const size_t N2, const size_t N3,
+ const size_t N4, const size_t N5, const size_t N6,
+ const size_t N7) {
+ return ViewType(arg, N0 != unspecified ? N0 : 1, N1 != unspecified ? N1 : 1,
+ N2 != unspecified ? N2 : 1, N3 != unspecified ? N3 : 1,
+ N4 != unspecified ? N4 : 1, N5 != unspecified ? N5 : 1,
+ N6 != unspecified ? N6 : 1, N7 != unspecified ? N7 : 1);
+ }
+};
+
+// Non-strided Layout
+template <typename Layout, typename iType>
+KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+ (std::is_same_v<Layout, Kokkos::LayoutRight> ||
+ std::is_same_v<Layout, Kokkos::LayoutLeft>)&&std::is_integral_v<iType>,
+ Layout>
+reconstructLayout(const Layout& layout, iType dynrank) {
+ return Layout(dynrank > 0 ? layout.dimension[0] : KOKKOS_INVALID_INDEX,
+ dynrank > 1 ? layout.dimension[1] : KOKKOS_INVALID_INDEX,
+ dynrank > 2 ? layout.dimension[2] : KOKKOS_INVALID_INDEX,
+ dynrank > 3 ? layout.dimension[3] : KOKKOS_INVALID_INDEX,
+ dynrank > 4 ? layout.dimension[4] : KOKKOS_INVALID_INDEX,
+ dynrank > 5 ? layout.dimension[5] : KOKKOS_INVALID_INDEX,
+ dynrank > 6 ? layout.dimension[6] : KOKKOS_INVALID_INDEX,
+ dynrank > 7 ? layout.dimension[7] : KOKKOS_INVALID_INDEX);
+}
+
+// LayoutStride
+template <typename Layout, typename iType>
+KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+ (std::is_same_v<Layout, Kokkos::LayoutStride>)&&std::is_integral_v<iType>,
+ Layout>
+reconstructLayout(const Layout& layout, iType dynrank) {
+ return Layout(dynrank > 0 ? layout.dimension[0] : KOKKOS_INVALID_INDEX,
+ dynrank > 0 ? layout.stride[0] : (0),
+ dynrank > 1 ? layout.dimension[1] : KOKKOS_INVALID_INDEX,
+ dynrank > 1 ? layout.stride[1] : (0),
+ dynrank > 2 ? layout.dimension[2] : KOKKOS_INVALID_INDEX,
+ dynrank > 2 ? layout.stride[2] : (0),
+ dynrank > 3 ? layout.dimension[3] : KOKKOS_INVALID_INDEX,
+ dynrank > 3 ? layout.stride[3] : (0),
+ dynrank > 4 ? layout.dimension[4] : KOKKOS_INVALID_INDEX,
+ dynrank > 4 ? layout.stride[4] : (0),
+ dynrank > 5 ? layout.dimension[5] : KOKKOS_INVALID_INDEX,
+ dynrank > 5 ? layout.stride[5] : (0),
+ dynrank > 6 ? layout.dimension[6] : KOKKOS_INVALID_INDEX,
+ dynrank > 6 ? layout.stride[6] : (0),
+ dynrank > 7 ? layout.dimension[7] : KOKKOS_INVALID_INDEX,
+ dynrank > 7 ? layout.stride[7] : (0));
+}
+
+/** \brief Debug bounds-checking routines */
+// Enhanced debug checking - most infrastructure matches that of functions in
+// Kokkos_ViewMapping; additional checks for extra arguments beyond rank are 0
+template <unsigned, typename iType0, class MapType>
+KOKKOS_INLINE_FUNCTION bool dyn_rank_view_verify_operator_bounds(
+ const iType0&, const MapType&) {
+ return true;
+}
+
+template <unsigned R, typename iType0, class MapType, typename iType1,
+ class... Args>
+KOKKOS_INLINE_FUNCTION bool dyn_rank_view_verify_operator_bounds(
+ const iType0& rank, const MapType& map, const iType1& i, Args... args) {
+ if (static_cast<iType0>(R) < rank) {
+ return (size_t(i) < map.extent(R)) &&
+ dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
+ } else if (i != 0) {
+ Kokkos::printf(
+ "DynRankView Debug Bounds Checking Error: at rank %u\n Extra "
+ "arguments beyond the rank must be zero \n",
+ R);
+ return (false) &&
+ dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
+ } else {
+ return (true) &&
+ dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
+ }
+}
+
+template <unsigned, class MapType>
+inline void dyn_rank_view_error_operator_bounds(char*, int, const MapType&) {}
+
+template <unsigned R, class MapType, class iType, class... Args>
+inline void dyn_rank_view_error_operator_bounds(char* buf, int len,
+ const MapType& map,
+ const iType& i, Args... args) {
+ const int n = snprintf(
+ buf, len, " %ld < %ld %c", static_cast<unsigned long>(i),
+ static_cast<unsigned long>(map.extent(R)), (sizeof...(Args) ? ',' : ')'));
+ dyn_rank_view_error_operator_bounds<R + 1>(buf + n, len - n, map, args...);
+}
+
+// op_rank = rank of the operator version that was called
+template <typename MemorySpace, typename iType0, typename iType1, class MapType,
+ class... Args>
+KOKKOS_INLINE_FUNCTION void dyn_rank_view_verify_operator_bounds(
+ const iType0& op_rank, const iType1& rank,
+ const Kokkos::Impl::SharedAllocationTracker& tracker, const MapType& map,
+ Args... args) {
+ if (static_cast<iType0>(rank) > op_rank) {
+ Kokkos::abort(
+ "DynRankView Bounds Checking Error: Need at least rank arguments to "
+ "the operator()");
+ }
+
+ if (!dyn_rank_view_verify_operator_bounds<0>(rank, map, args...)) {
+ KOKKOS_IF_ON_HOST(
+ (enum {LEN = 1024}; char buffer[LEN];
+ const std::string label = tracker.template get_label<MemorySpace>();
+ int n = snprintf(buffer, LEN, "DynRankView bounds error of view %s (",
+ label.c_str());
+ dyn_rank_view_error_operator_bounds<0>(buffer + n, LEN - n, map,
+ args...);
+ Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
+
+ KOKKOS_IF_ON_DEVICE(
+ ((void)tracker; Kokkos::abort("DynRankView bounds error");))
+ }
+}
+
+/** \brief Assign compatible default mappings */
+struct ViewToDynRankViewTag {};
+
+} // namespace Impl
+
+namespace Impl {
+
+template <class DstTraits, class SrcTraits>
+class ViewMapping<
+ DstTraits, SrcTraits,
+ std::enable_if_t<
+ (std::is_same_v<typename DstTraits::memory_space,
+ typename SrcTraits::memory_space> &&
+ std::is_void_v<typename DstTraits::specialize> &&
+ std::is_void_v<typename SrcTraits::specialize> &&
+ (std::is_same_v<typename DstTraits::array_layout,
+ typename SrcTraits::array_layout> ||
+ ((std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<
+ typename DstTraits::array_layout,
+ Kokkos::LayoutStride>)&&(std::is_same_v<typename SrcTraits::
+ array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<
+ typename SrcTraits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<
+ typename SrcTraits::array_layout,
+ Kokkos::LayoutStride>)))),
+ Kokkos::Impl::ViewToDynRankViewTag>> {
+ private:
+ enum {
+ is_assignable_value_type =
+ std::is_same_v<typename DstTraits::value_type,
+ typename SrcTraits::value_type> ||
+ std::is_same_v<typename DstTraits::value_type,
+ typename SrcTraits::const_value_type>
+ };
+
+ enum {
+ is_assignable_layout =
+ std::is_same_v<typename DstTraits::array_layout,
+ typename SrcTraits::array_layout> ||
+ std::is_same_v<typename DstTraits::array_layout, Kokkos::LayoutStride>
+ };
+
+ public:
+ enum { is_assignable = is_assignable_value_type && is_assignable_layout };
+
+ using DstType = ViewMapping<DstTraits, typename DstTraits::specialize>;
+ using SrcType = ViewMapping<SrcTraits, typename SrcTraits::specialize>;
+
+ template <typename DT, typename... DP, typename ST, typename... SP>
+ KOKKOS_INLINE_FUNCTION static void assign(
+ Kokkos::DynRankView<DT, DP...>& dst, const Kokkos::View<ST, SP...>& src) {
+ static_assert(
+ is_assignable_value_type,
+ "View assignment must have same value type or const = non-const");
+
+ static_assert(
+ is_assignable_layout,
+ "View assignment must have compatible layout or have rank <= 1");
+
+ // Removed dimension checks...
+
+ using dst_offset_type = typename DstType::offset_type;
+ dst.m_map.m_impl_offset = dst_offset_type(
+ std::integral_constant<unsigned, 0>(),
+ src.layout()); // Check this for integer input1 for padding, etc
+ dst.m_map.m_impl_handle = Kokkos::Impl::ViewDataHandle<DstTraits>::assign(
+ src.m_map.m_impl_handle, src.m_track.m_tracker);
+ dst.m_track.m_tracker.assign(src.m_track.m_tracker, DstTraits::is_managed);
+ dst.m_rank = Kokkos::View<ST, SP...>::rank();
+ }
+};
+
+} // namespace Impl
+
+/* \class DynRankView
+ * \brief Container that creates a Kokkos view with rank determined at runtime.
+ * Essentially this is a rank 7 view
+ *
+ * Changes from View
+ * 1. The rank of the DynRankView is returned by the method rank()
+ * 2. Max rank of a DynRankView is 7
+ * 3. subview called with 'subview(...)' or 'subdynrankview(...)' (backward
+ * compatibility)
+ * 4. Every subview is returned with LayoutStride
+ * 5. Copy and Copy-Assign View to DynRankView
+ * 6. deep_copy between Views and DynRankViews
+ * 7. rank( view ); returns the rank of View or DynRankView
+ *
+ */
+
+template <class>
+struct is_dyn_rank_view : public std::false_type {};
+
+template <class D, class... P>
+struct is_dyn_rank_view<Kokkos::DynRankView<D, P...>> : public std::true_type {
+};
+
+template <class T>
+inline constexpr bool is_dyn_rank_view_v = is_dyn_rank_view<T>::value;
+
+// Inherit privately from View, this way we don't import anything funky
+// for example the rank member vs the rank() function of DynRankView
+template <typename DataType, class... Properties>
+class DynRankView : private View<DataType*******, Properties...> {
+ static_assert(!std::is_array_v<DataType> && !std::is_pointer_v<DataType>,
+ "Cannot template DynRankView with array or pointer datatype - "
+ "must be pod");
+
+ private:
+ template <class, class...>
+ friend class DynRankView;
+ template <class, class...>
+ friend class Kokkos::Impl::ViewMapping;
+
+ size_t m_rank{};
+
+ public:
+ using drvtraits = ViewTraits<DataType, Properties...>;
+
+ using view_type = View<DataType*******, Properties...>;
+
+ private:
+ using drdtraits = Impl::DynRankDimTraits<typename view_type::specialize>;
+
+ public:
+ // typedefs from ViewTraits, overriden
+ using data_type = typename drvtraits::data_type;
+ using const_data_type = typename drvtraits::const_data_type;
+ using non_const_data_type = typename drvtraits::non_const_data_type;
+
+ // typedefs from ViewTraits not overriden
+ using value_type = typename view_type::value_type;
+ using const_value_type = typename view_type::const_value_type;
+ using non_const_value_type = typename view_type::non_const_value_type;
+ using traits = typename view_type::traits;
+ using array_layout = typename view_type::array_layout;
+
+ using execution_space = typename view_type::execution_space;
+ using memory_space = typename view_type::memory_space;
+ using device_type = typename view_type::device_type;
+
+ using memory_traits = typename view_type::memory_traits;
+ using host_mirror_space = typename view_type::host_mirror_space;
+ using size_type = typename view_type::size_type;
+
+ using reference_type = typename view_type::reference_type;
+ using pointer_type = typename view_type::pointer_type;
+
+ using scalar_array_type = value_type;
+ using const_scalar_array_type = const_value_type;
+ using non_const_scalar_array_type = non_const_value_type;
+ using specialize = typename view_type::specialize;
+
+ // typedefs in View for mdspan compatibility
+ // cause issues with MSVC+CUDA
+ // using layout_type = typename view_type::layout_type;
+ using index_type = typename view_type::index_type;
+ using element_type = typename view_type::element_type;
+ using rank_type = typename view_type::rank_type;
+ using reference = reference_type;
+ using data_handle_type = pointer_type;
+
+ KOKKOS_FUNCTION
+ view_type& DownCast() const { return (view_type&)(*this); }
+
+ // FIXME: this function make NO sense, the above one already is marked const
+ // Maybe one would want to get back a view of const??
+ KOKKOS_FUNCTION
+ const view_type& ConstDownCast() const { return (const view_type&)(*this); }
+
+ // FIXME: deprecate DownCast in favor of to_view
+ // KOKKOS_FUNCTION
+ // view_type to_view() const { return *this; }
+
+ // Types below - at least the HostMirror requires the value_type, NOT the rank
+ // 7 data_type of the traits
+
+ /** \brief Compatible view of array of scalar types */
+ using array_type = DynRankView<
+ typename drvtraits::scalar_array_type, typename drvtraits::array_layout,
+ typename drvtraits::device_type, typename drvtraits::memory_traits>;
+
+ /** \brief Compatible view of const data type */
+ using const_type = DynRankView<
+ typename drvtraits::const_data_type, typename drvtraits::array_layout,
+ typename drvtraits::device_type, typename drvtraits::memory_traits>;
+
+ /** \brief Compatible view of non-const data type */
+ using non_const_type = DynRankView<
+ typename drvtraits::non_const_data_type, typename drvtraits::array_layout,
+ typename drvtraits::device_type, typename drvtraits::memory_traits>;
+
+ /** \brief Compatible HostMirror view */
+ using HostMirror = DynRankView<typename drvtraits::non_const_data_type,
+ typename drvtraits::array_layout,
+ typename drvtraits::host_mirror_space>;
+
+ using host_mirror_type = HostMirror;
+ //----------------------------------------
+ // Domain rank and extents
+
+ // enum { Rank = map_type::Rank }; //Will be dyn rank of 7 always, keep the
+ // enum?
+
+ //----------------------------------------
+ /* Deprecate all 'dimension' functions in favor of
+ * ISO/C++ vocabulary 'extent'.
+ */
+
+ //----------------------------------------
+
+ private:
+ enum {
+ is_layout_left =
+ std::is_same_v<typename traits::array_layout, Kokkos::LayoutLeft>,
+
+ is_layout_right =
+ std::is_same_v<typename traits::array_layout, Kokkos::LayoutRight>,
+
+ is_layout_stride =
+ std::is_same_v<typename traits::array_layout, Kokkos::LayoutStride>,
+
+ is_default_map = std::is_void_v<typename traits::specialize> &&
+ (is_layout_left || is_layout_right || is_layout_stride),
+
+ is_default_access =
+ is_default_map && std::is_same_v<reference_type, element_type&>
+ };
+
+// Bounds checking macros
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+
+// rank of the calling operator - included as first argument in ARG
+#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(ARG) \
+ Kokkos::Impl::runtime_check_memory_access_violation< \
+ typename traits::memory_space>( \
+ "Kokkos::DynRankView ERROR: attempt to access inaccessible memory " \
+ "space"); \
+ Kokkos::Impl::dyn_rank_view_verify_operator_bounds< \
+ typename traits::memory_space> \
+ ARG;
+
+#else
+
+#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(ARG) \
+ Kokkos::Impl::runtime_check_memory_access_violation< \
+ typename traits::memory_space>( \
+ "Kokkos::DynRankView ERROR: attempt to access inaccessible memory " \
+ "space");
+
+#endif
+
+ public:
+ KOKKOS_FUNCTION
+ constexpr unsigned rank() const { return m_rank; }
+
+ using view_type::data;
+ using view_type::extent;
+ using view_type::extent_int; // FIXME: not tested
+ using view_type::impl_map; // FIXME: not tested
+ using view_type::is_allocated;
+ using view_type::label;
+ using view_type::size;
+ using view_type::span;
+ using view_type::span_is_contiguous; // FIXME: not tested
+ using view_type::stride; // FIXME: not tested
+ using view_type::stride_0; // FIXME: not tested
+ using view_type::stride_1; // FIXME: not tested
+ using view_type::stride_2; // FIXME: not tested
+ using view_type::stride_3; // FIXME: not tested
+ using view_type::stride_4; // FIXME: not tested
+ using view_type::stride_5; // FIXME: not tested
+ using view_type::stride_6; // FIXME: not tested
+ using view_type::stride_7; // FIXME: not tested
+ using view_type::use_count;
+
+#ifdef KOKKOS_ENABLE_CUDA
+ KOKKOS_FUNCTION reference_type
+ operator()(index_type i0 = 0, index_type i1 = 0, index_type i2 = 0,
+ index_type i3 = 0, index_type i4 = 0, index_type i5 = 0,
+ index_type i6 = 0) const {
+ return view_type::operator()(i0, i1, i2, i3, i4, i5, i6);
+ }
+#else
+ // Adding shortcut operators for rank-0 to rank-3 for default layouts
+ // and access modalities.
+ // This removes performance overhead for always using rank-7 mapping.
+ // See https://github.com/kokkos/kokkos/issues/7604
+ // When boundschecking is enabled we still go through the underlying
+ // rank-7 View to leverage the error checks there.
+
+ KOKKOS_FUNCTION reference_type operator()() const {
+#ifdef KOKKOS_ENABLE_DEBUG
+ if (rank() != 0u)
+ Kokkos::abort(
+ "DynRankView rank 0 operator() called with invalid number of "
+ "arguments.");
+#endif
+#ifndef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ if constexpr (is_default_access) {
+ return view_type::data()[0];
+ } else
+#endif
+ return view_type::operator()(0, 0, 0, 0, 0, 0, 0);
+ }
+
+ KOKKOS_FUNCTION reference_type operator()(index_type i0) const {
+#ifdef KOKKOS_ENABLE_DEBUG
+ // FIXME: Should be equal, only access(...) allows mismatch of rank and
+ // index args
+ if (rank() > 1u)
+ Kokkos::abort(
+ "DynRankView rank 1 operator() called with invalid number of "
+ "arguments.");
+#endif
+#ifndef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ if constexpr (is_default_access) {
+ if constexpr (is_layout_stride) {
+ return view_type::data()[i0 * view_type::stride(0)];
+ } else {
+ return view_type::data()[i0];
+ }
+ } else
+#endif
+ return view_type::operator()(i0, 0, 0, 0, 0, 0, 0);
+#if defined KOKKOS_COMPILER_INTEL || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+ }
+
+ KOKKOS_FUNCTION reference_type operator()(index_type i0,
+ index_type i1) const {
+#ifdef KOKKOS_ENABLE_DEBUG
+ // FIXME: Should be equal, only access(...) allows mismatch of rank and
+ // index args
+ if (rank() > 2u)
+ Kokkos::abort(
+ "DynRankView rank 2 operator() called with invalid number of "
+ "arguments.");
+#endif
+#ifndef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ if constexpr (is_default_access) {
+ if constexpr (is_layout_left) {
+ return view_type::data()[i0 + i1 * view_type::stride(1)];
+ } else if constexpr (is_layout_right) {
+ return view_type::data()[i0 * view_type::extent(1) + i1];
+ } else {
+ return view_type::data()[i0 * view_type::stride(0) +
+ i1 * view_type::stride(1)];
+ }
+ } else
+#endif
+ return view_type::operator()(i0, i1, 0, 0, 0, 0, 0);
+#if defined KOKKOS_COMPILER_INTEL || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+ }
+
+ KOKKOS_FUNCTION reference_type operator()(index_type i0, index_type i1,
+ index_type i2) const {
+#ifdef KOKKOS_ENABLE_DEBUG
+ // FIXME: Should be equal, only access(...) allows mismatch of rank and
+ // index args
+ if (rank() > 3u)
+ Kokkos::abort(
+ "DynRankView rank 3 operator() called with invalid number of "
+ "arguments.");
+#endif
+#ifndef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ if constexpr (is_default_access) {
+ if constexpr (is_layout_left) {
+ return view_type::data()[i0 + view_type::stride(1) *
+ (i1 + i2 * view_type::extent(1))];
+ } else if constexpr (is_layout_right) {
+ return view_type::data()[(i0 * view_type::extent(1) + i1) *
+ view_type::extent(2) +
+ i2];
+ } else {
+ return view_type::data()[i0 * view_type::stride(0) +
+ i1 * view_type::stride(1) +
+ i2 * view_type::stride(2)];
+ }
+ } else
+#endif
+ return view_type::operator()(i0, i1, i2, 0, 0, 0, 0);
+#if defined KOKKOS_COMPILER_INTEL || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+ }
+
+ KOKKOS_FUNCTION reference_type operator()(index_type i0, index_type i1,
+ index_type i2, index_type i3,
+ index_type i4 = 0,
+ index_type i5 = 0,
+ index_type i6 = 0) const {
+ return view_type::operator()(i0, i1, i2, i3, i4, i5, i6);
+ }
+#endif
+
+// This is an accomodation for Phalanx, that is usint the operator[] to access
+// all elements in a linear fashion even when the rank is not 1
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_FUNCTION reference_type operator[](index_type i0) const {
+ if constexpr (std::is_same_v<typename drvtraits::value_type,
+ typename drvtraits::scalar_array_type>) {
+ return view_type::data()[i0];
+ } else {
+ const size_t dim_scalar = view_type::impl_map().dimension_scalar();
+ const size_t bytes = view_type::span() / dim_scalar;
+
+ using tmp_view_type =
+ Kokkos::View<DataType*, typename traits::array_layout,
+ typename traits::device_type,
+ Kokkos::MemoryTraits<traits::memory_traits::impl_value |
+ unsigned(Kokkos::Unmanaged)>>;
+ tmp_view_type rankone_view(view_type::data(), bytes, dim_scalar);
+ return rankone_view(i0);
+ }
+ }
+#else
+ KOKKOS_FUNCTION reference_type operator[](index_type i0) const {
+#ifdef KOKKOS_ENABLE_DEBUG
+ if (rank() != 1u)
+ Kokkos::abort("DynRankView operator[] can only be used for rank-1");
+#endif
+ return view_type::operator()(i0, 0, 0, 0, 0, 0, 0);
+ }
+#endif
+
+ KOKKOS_FUNCTION reference_type access(index_type i0 = 0, index_type i1 = 0,
+ index_type i2 = 0, index_type i3 = 0,
+ index_type i4 = 0, index_type i5 = 0,
+ index_type i6 = 0) const {
+ return view_type::operator()(i0, i1, i2, i3, i4, i5, i6);
+ }
+
+ //----------------------------------------
+ // Standard constructor, destructor, and assignment operators...
+
+ KOKKOS_DEFAULTED_FUNCTION
+ ~DynRankView() = default;
+
+ KOKKOS_DEFAULTED_FUNCTION DynRankView() = default;
+
+ //----------------------------------------
+ // Compatible view copy constructor and assignment
+ // may assign unmanaged from managed.
+ // Make this conditionally explicit?
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION DynRankView(const DynRankView<RT, RP...>& rhs)
+ : view_type(rhs), m_rank(rhs.m_rank) {}
+
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION DynRankView& operator=(const DynRankView<RT, RP...>& rhs) {
+ view_type::operator=(rhs);
+ m_rank = rhs.m_rank;
+ return *this;
+ }
+
+#if 0 // TODO: this will later be swapped in depending on whether the new View
+ // impl is active
+ private:
+ template <class Ext>
+ KOKKOS_FUNCTION typename view_type::extents_type create_rank7_extents(
+ const Ext& ext) {
+ return typename view_type::extents_type(
+ ext.rank() > 0 ? ext.extent(0) : 1, ext.rank() > 1 ? ext.extent(1) : 1,
+ ext.rank() > 2 ? ext.extent(2) : 1, ext.rank() > 3 ? ext.extent(3) : 1,
+ ext.rank() > 4 ? ext.extent(4) : 1, ext.rank() > 5 ? ext.extent(5) : 1,
+ ext.rank() > 6 ? ext.extent(6) : 1);
+ }
+
+ public:
+ // Copy/Assign View to DynRankView
+ template <class RT, class... RP>
+ KOKKOS_INLINE_FUNCTION DynRankView(const View<RT, RP...>& rhs,
+ size_t new_rank)
+ : view_type(rhs.data_handle(), drdtraits::createLayout(rhs.layout())),
+ m_rank(new_rank) {
+ if (new_rank > rhs.rank())
+ Kokkos::abort(
+ "Attempting to construct DynRankView from View and new rank, with "
+ "the new rank being too large.");
+ }
+
+ template <class RT, class... RP>
+ KOKKOS_INLINE_FUNCTION DynRankView& operator=(const View<RT, RP...>& rhs) {
+ view_type::operator=(view_type(
+ rhs.data_handle(),
+ typename view_type::mapping_type(create_rank7_extents(rhs.extents())),
+ rhs.accessor()));
+ m_rank = rhs.rank();
+ return *this;
+ }
+#else
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION DynRankView(const View<RT, RP...>& rhs, size_t new_rank) {
+ using SrcTraits = typename View<RT, RP...>::traits;
+ using Mapping =
+ Kokkos::Impl::ViewMapping<traits, SrcTraits,
+ Kokkos::Impl::ViewToDynRankViewTag>;
+ static_assert(Mapping::is_assignable,
+ "Incompatible View to DynRankView copy assignment");
+ if (new_rank > View<RT, RP...>::rank())
+ Kokkos::abort(
+ "Attempting to construct DynRankView from View and new rank, with "
+ "the new rank being too large.");
+ Mapping::assign(*this, rhs);
+ m_rank = new_rank;
+ }
+
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION DynRankView& operator=(const View<RT, RP...>& rhs) {
+ using SrcTraits = typename View<RT, RP...>::traits;
+ using Mapping =
+ Kokkos::Impl::ViewMapping<traits, SrcTraits,
+ Kokkos::Impl::ViewToDynRankViewTag>;
+ static_assert(Mapping::is_assignable,
+ "Incompatible View to DynRankView copy assignment");
+ Mapping::assign(*this, rhs);
+ m_rank = View<RT, RP...>::rank();
+ return *this;
+ }
+#endif
+
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION DynRankView(const View<RT, RP...>& rhs)
+ : DynRankView(rhs, View<RT, RP...>::rank()) {}
+
+ //----------------------------------------
+ // Allocation tracking properties
+
+ //----------------------------------------
+ // Allocation according to allocation properties and array layout
+ // unused arg_layout dimensions must be set to KOKKOS_INVALID_INDEX so that
+ // rank deduction can properly take place
+ // We need two variants to avoid calling host function from host device
+ // function warnings
+ template <class... P>
+ explicit KOKKOS_FUNCTION DynRankView(
+ const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+ std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+ typename traits::array_layout const&>
+ arg_layout)
+ : view_type(arg_prop, drdtraits::template createLayout<traits, P...>(
+ arg_prop, arg_layout)),
+ m_rank(drdtraits::computeRank(arg_prop, arg_layout)) {}
+
+ template <class... P>
+ explicit DynRankView(
+ const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+ std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+ typename traits::array_layout const&>
+ arg_layout)
+ : view_type(arg_prop, drdtraits::template createLayout<traits, P...>(
+ arg_prop, arg_layout)),
+ m_rank(drdtraits::computeRank(arg_prop, arg_layout)) {}
+
+ //----------------------------------------
+ // Constructor(s)
+
+ // Simple dimension-only layout
+ // We need two variants to avoid calling host function from host device
+ // function warnings
+ template <class... P>
+ explicit KOKKOS_FUNCTION DynRankView(
+ const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+ std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+ const size_t>
+ arg_N0 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+ : DynRankView(arg_prop, typename traits::array_layout(
+ arg_N0, arg_N1, arg_N2, arg_N3, arg_N4,
+ arg_N5, arg_N6, arg_N7)) {}
+
+ template <class... P>
+ explicit DynRankView(
+ const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+ std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+ const size_t>
+ arg_N0 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+ : DynRankView(arg_prop, typename traits::array_layout(
+ arg_N0, arg_N1, arg_N2, arg_N3, arg_N4,
+ arg_N5, arg_N6, arg_N7)) {}
+
+ // Allocate with label and layout
+ template <typename Label>
+ explicit inline DynRankView(
+ const Label& arg_label,
+ std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
+ typename traits::array_layout> const& arg_layout)
+ : DynRankView(Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
+ arg_layout) {}
+
+ // Allocate label and layout, must disambiguate from subview constructor
+ template <typename Label>
+ explicit inline DynRankView(
+ const Label& arg_label,
+ std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value, const size_t>
+ arg_N0 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+ : DynRankView(
+ Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
+ typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+ arg_N4, arg_N5, arg_N6, arg_N7)) {}
+
+ //----------------------------------------
+ // Memory span required to wrap these dimensions.
+ // FIXME: this function needs to be tested
+ static constexpr size_t required_allocation_size(
+ const size_t arg_N0 = 1, const size_t arg_N1 = 1, const size_t arg_N2 = 1,
+ const size_t arg_N3 = 1, const size_t arg_N4 = 1, const size_t arg_N5 = 1,
+ const size_t arg_N6 = 1,
+ [[maybe_unused]] const size_t arg_N7 = KOKKOS_INVALID_INDEX) {
+ // FIXME: check that arg_N7 is not set by user (in debug mode)
+ return view_type::required_allocation_size(arg_N0, arg_N1, arg_N2, arg_N3,
+ arg_N4, arg_N5, arg_N6);
+ }
+
+ explicit KOKKOS_FUNCTION DynRankView(
+ typename view_type::pointer_type arg_ptr,
+ const size_t arg_N0 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+ : DynRankView(
+ Kokkos::Impl::ViewCtorProp<typename view_type::pointer_type>(
+ arg_ptr),
+ arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7) {}
+
+ explicit KOKKOS_FUNCTION DynRankView(
+ typename view_type::pointer_type arg_ptr,
+ typename traits::array_layout& arg_layout)
+ : DynRankView(
+ Kokkos::Impl::ViewCtorProp<typename view_type::pointer_type>(
+ arg_ptr),
+ arg_layout) {}
+
+ //----------------------------------------
+ // Shared scratch memory constructor
+
+ // Note: We must pass 7 valid args since view_type is rank 7
+ static inline size_t shmem_size(
+ const size_t arg_N0 = 1, const size_t arg_N1 = 1, const size_t arg_N2 = 1,
+ const size_t arg_N3 = 1, const size_t arg_N4 = 1, const size_t arg_N5 = 1,
+ const size_t arg_N6 = 1, const size_t arg_N7 = KOKKOS_INVALID_INDEX) {
+ return view_type::shmem_size(arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5,
+ arg_N6, arg_N7);
+ }
+
+ explicit KOKKOS_FUNCTION DynRankView(
+ const typename traits::execution_space::scratch_memory_space& arg_space,
+ const typename traits::array_layout& arg_layout)
+ : view_type(arg_space, drdtraits::createLayout(arg_layout)),
+ m_rank(drdtraits::computeRank(arg_layout)) {}
+
+ explicit KOKKOS_FUNCTION DynRankView(
+ const typename traits::execution_space::scratch_memory_space& arg_space,
+ const size_t arg_N0 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+ const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+
+ : DynRankView(arg_space, typename traits::array_layout(
+ arg_N0, arg_N1, arg_N2, arg_N3, arg_N4,
+ arg_N5, arg_N6, arg_N7)) {}
+
+ KOKKOS_FUNCTION constexpr auto layout() const {
+ switch (rank()) {
+ case 0: return Impl::as_view_of_rank_n<0>(*this).layout();
+ case 1: return Impl::as_view_of_rank_n<1>(*this).layout();
+ case 2: return Impl::as_view_of_rank_n<2>(*this).layout();
+ case 3: return Impl::as_view_of_rank_n<3>(*this).layout();
+ case 4: return Impl::as_view_of_rank_n<4>(*this).layout();
+ case 5: return Impl::as_view_of_rank_n<5>(*this).layout();
+ case 6: return Impl::as_view_of_rank_n<6>(*this).layout();
+ case 7: return Impl::as_view_of_rank_n<7>(*this).layout();
+ default:
+ KOKKOS_IF_ON_HOST(
+ Kokkos::abort(
+ std::string(
+ "Calling DynRankView::layout on DRV of unexpected rank " +
+ std::to_string(rank()))
+ .c_str());)
+ KOKKOS_IF_ON_DEVICE(
+ Kokkos::abort(
+ "Calling DynRankView::layout on DRV of unexpected rank");)
+ }
+ // control flow should never reach here
+ return view_type::layout();
+ }
+};
+
+template <typename D, class... P>
+KOKKOS_FUNCTION constexpr unsigned rank(const DynRankView<D, P...>& DRV) {
+ return DRV.rank();
+} // needed for transition to common constexpr method in view and dynrankview
+ // to return rank
+
+//----------------------------------------------------------------------------
+// Subview mapping.
+// Deduce destination view type from source view traits and subview arguments
+
+namespace Impl {
+
+struct DynRankSubviewTag {};
+
+} // namespace Impl
+
+template <class V, class... Args>
+using Subdynrankview =
+ typename Kokkos::Impl::ViewMapping<Kokkos::Impl::DynRankSubviewTag, V,
+ Args...>::ret_type;
+
+template <class... DRVArgs, class SubArg0 = int, class SubArg1 = int,
+ class SubArg2 = int, class SubArg3 = int, class SubArg4 = int,
+ class SubArg5 = int, class SubArg6 = int>
+KOKKOS_INLINE_FUNCTION auto subdynrankview(
+ const DynRankView<DRVArgs...>& drv, SubArg0 arg0 = SubArg0{},
+ SubArg1 arg1 = SubArg1{}, SubArg2 arg2 = SubArg2{},
+ SubArg3 arg3 = SubArg3{}, SubArg4 arg4 = SubArg4{},
+ SubArg5 arg5 = SubArg5{}, SubArg6 arg6 = SubArg6{}) {
+ auto sub = subview(drv.DownCast(), arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ using sub_t = decltype(sub);
+ size_t new_rank = (drv.rank() > 0 && !std::is_integral_v<SubArg0> ? 1 : 0) +
+ (drv.rank() > 1 && !std::is_integral_v<SubArg1> ? 1 : 0) +
+ (drv.rank() > 2 && !std::is_integral_v<SubArg2> ? 1 : 0) +
+ (drv.rank() > 3 && !std::is_integral_v<SubArg3> ? 1 : 0) +
+ (drv.rank() > 4 && !std::is_integral_v<SubArg4> ? 1 : 0) +
+ (drv.rank() > 5 && !std::is_integral_v<SubArg5> ? 1 : 0) +
+ (drv.rank() > 6 && !std::is_integral_v<SubArg6> ? 1 : 0);
+
+ using return_type =
+ DynRankView<typename sub_t::value_type, Kokkos::LayoutStride,
+ typename sub_t::device_type, typename sub_t::memory_traits>;
+ return static_cast<return_type>(
+ DynRankView<typename sub_t::value_type, typename sub_t::array_layout,
+ typename sub_t::device_type, typename sub_t::memory_traits>(
+ sub, new_rank));
+}
+template <class... DRVArgs, class SubArg0 = int, class SubArg1 = int,
+ class SubArg2 = int, class SubArg3 = int, class SubArg4 = int,
+ class SubArg5 = int, class SubArg6 = int>
+KOKKOS_INLINE_FUNCTION auto subview(
+ const DynRankView<DRVArgs...>& drv, SubArg0 arg0 = SubArg0{},
+ SubArg1 arg1 = SubArg1{}, SubArg2 arg2 = SubArg2{},
+ SubArg3 arg3 = SubArg3{}, SubArg4 arg4 = SubArg4{},
+ SubArg5 arg5 = SubArg5{}, SubArg6 arg6 = SubArg6{}) {
+ return subdynrankview(drv, arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+}
+
+} // namespace Kokkos
+
+namespace Kokkos {
+
+// overload == and !=
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const DynRankView<LT, LP...>& lhs,
+ const DynRankView<RT, RP...>& rhs) {
+ // Same data, layout, dimensions
+ using lhs_traits = ViewTraits<LT, LP...>;
+ using rhs_traits = ViewTraits<RT, RP...>;
+
+ return std::is_same_v<typename lhs_traits::const_value_type,
+ typename rhs_traits::const_value_type> &&
+ std::is_same_v<typename lhs_traits::array_layout,
+ typename rhs_traits::array_layout> &&
+ std::is_same_v<typename lhs_traits::memory_space,
+ typename rhs_traits::memory_space> &&
+ lhs.rank() == rhs.rank() && lhs.data() == rhs.data() &&
+ lhs.span() == rhs.span() && lhs.extent(0) == rhs.extent(0) &&
+ lhs.extent(1) == rhs.extent(1) && lhs.extent(2) == rhs.extent(2) &&
+ lhs.extent(3) == rhs.extent(3) && lhs.extent(4) == rhs.extent(4) &&
+ lhs.extent(5) == rhs.extent(5) && lhs.extent(6) == rhs.extent(6) &&
+ lhs.extent(7) == rhs.extent(7);
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator!=(const DynRankView<LT, LP...>& lhs,
+ const DynRankView<RT, RP...>& rhs) {
+ return !(operator==(lhs, rhs));
+}
+
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+namespace Kokkos {
+namespace Impl {
+
+template <class OutputView, class Enable = void>
+struct DynRankViewFill {
+ using const_value_type = typename OutputView::traits::const_value_type;
+
+ const OutputView output;
+ const_value_type input;
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const size_t i0) const {
+ const size_t n1 = output.extent(1);
+ const size_t n2 = output.extent(2);
+ const size_t n3 = output.extent(3);
+ const size_t n4 = output.extent(4);
+ const size_t n5 = output.extent(5);
+ const size_t n6 = output.extent(6);
+
+ for (size_t i1 = 0; i1 < n1; ++i1) {
+ for (size_t i2 = 0; i2 < n2; ++i2) {
+ for (size_t i3 = 0; i3 < n3; ++i3) {
+ for (size_t i4 = 0; i4 < n4; ++i4) {
+ for (size_t i5 = 0; i5 < n5; ++i5) {
+ for (size_t i6 = 0; i6 < n6; ++i6) {
+ output.access(i0, i1, i2, i3, i4, i5, i6) = input;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ DynRankViewFill(const OutputView& arg_out, const_value_type& arg_in)
+ : output(arg_out), input(arg_in) {
+ using execution_space = typename OutputView::execution_space;
+ using Policy = Kokkos::RangePolicy<execution_space>;
+
+ Kokkos::parallel_for("Kokkos::DynRankViewFill", Policy(0, output.extent(0)),
+ *this);
+ }
+};
+
+template <class OutputView>
+struct DynRankViewFill<OutputView, std::enable_if_t<OutputView::rank == 0>> {
+ DynRankViewFill(const OutputView& dst,
+ const typename OutputView::const_value_type& src) {
+ Kokkos::Impl::DeepCopy<typename OutputView::memory_space,
+ Kokkos::HostSpace>(
+ dst.data(), &src, sizeof(typename OutputView::const_value_type));
+ }
+};
+
+template <class OutputView, class InputView,
+ class ExecSpace = typename OutputView::execution_space>
+struct DynRankViewRemap {
+ const OutputView output;
+ const InputView input;
+ const size_t n0;
+ const size_t n1;
+ const size_t n2;
+ const size_t n3;
+ const size_t n4;
+ const size_t n5;
+ const size_t n6;
+ const size_t n7;
+
+ DynRankViewRemap(const ExecSpace& exec_space, const OutputView& arg_out,
+ const InputView& arg_in)
+ : output(arg_out),
+ input(arg_in),
+ n0(std::min((size_t)arg_out.extent(0), (size_t)arg_in.extent(0))),
+ n1(std::min((size_t)arg_out.extent(1), (size_t)arg_in.extent(1))),
+ n2(std::min((size_t)arg_out.extent(2), (size_t)arg_in.extent(2))),
+ n3(std::min((size_t)arg_out.extent(3), (size_t)arg_in.extent(3))),
+ n4(std::min((size_t)arg_out.extent(4), (size_t)arg_in.extent(4))),
+ n5(std::min((size_t)arg_out.extent(5), (size_t)arg_in.extent(5))),
+ n6(std::min((size_t)arg_out.extent(6), (size_t)arg_in.extent(6))),
+ n7(std::min((size_t)arg_out.extent(7), (size_t)arg_in.extent(7))) {
+ using Policy = Kokkos::RangePolicy<ExecSpace>;
+
+ Kokkos::parallel_for("Kokkos::DynRankViewRemap", Policy(exec_space, 0, n0),
+ *this);
+ }
+
+ DynRankViewRemap(const OutputView& arg_out, const InputView& arg_in)
+ : output(arg_out),
+ input(arg_in),
+ n0(std::min((size_t)arg_out.extent(0), (size_t)arg_in.extent(0))),
+ n1(std::min((size_t)arg_out.extent(1), (size_t)arg_in.extent(1))),
+ n2(std::min((size_t)arg_out.extent(2), (size_t)arg_in.extent(2))),
+ n3(std::min((size_t)arg_out.extent(3), (size_t)arg_in.extent(3))),
+ n4(std::min((size_t)arg_out.extent(4), (size_t)arg_in.extent(4))),
+ n5(std::min((size_t)arg_out.extent(5), (size_t)arg_in.extent(5))),
+ n6(std::min((size_t)arg_out.extent(6), (size_t)arg_in.extent(6))),
+ n7(std::min((size_t)arg_out.extent(7), (size_t)arg_in.extent(7))) {
+ using Policy = Kokkos::RangePolicy<ExecSpace>;
+
+ Kokkos::parallel_for("Kokkos::DynRankViewRemap", Policy(0, n0), *this);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(const size_t i0) const {
+ for (size_t i1 = 0; i1 < n1; ++i1) {
+ for (size_t i2 = 0; i2 < n2; ++i2) {
+ for (size_t i3 = 0; i3 < n3; ++i3) {
+ for (size_t i4 = 0; i4 < n4; ++i4) {
+ for (size_t i5 = 0; i5 < n5; ++i5) {
+ for (size_t i6 = 0; i6 < n6; ++i6) {
+ output.access(i0, i1, i2, i3, i4, i5, i6) =
+ input.access(i0, i1, i2, i3, i4, i5, i6);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+namespace Kokkos {
+
+namespace Impl {
+
+/* \brief Returns a View of the requested rank, aliasing the
+ underlying memory, to facilitate implementation of deep_copy() and
+ other routines that are defined on View */
+template <unsigned N, typename T, typename... Args>
+KOKKOS_FUNCTION View<typename ViewDataTypeFromRank<T, N>::type, Args...>
+as_view_of_rank_n(
+ DynRankView<T, Args...> v,
+ std::enable_if_t<
+ std::is_same_v<typename ViewTraits<T, Args...>::specialize, void>>*) {
+ if (v.rank() != N) {
+ KOKKOS_IF_ON_HOST(
+ const std::string message =
+ "Converting DynRankView of rank " + std::to_string(v.rank()) +
+ " to a View of mis-matched rank " + std::to_string(N) + "!";
+ Kokkos::abort(message.c_str());)
+ KOKKOS_IF_ON_DEVICE(
+ Kokkos::abort("Converting DynRankView to a View of mis-matched rank!");)
+ }
+
+ auto layout = v.DownCast().layout();
+
+ if constexpr (std::is_same_v<decltype(layout), Kokkos::LayoutLeft> ||
+ std::is_same_v<decltype(layout), Kokkos::LayoutRight> ||
+ std::is_same_v<decltype(layout), Kokkos::LayoutStride>) {
+ for (int i = N; i < 7; ++i)
+ layout.dimension[i] = KOKKOS_IMPL_CTOR_DEFAULT_ARG;
+ }
+
+ return View<typename RankDataType<T, N>::type, Args...>(v.data(), layout);
+}
+
+template <typename Function, typename... Args>
+void apply_to_view_of_static_rank(Function&& f, DynRankView<Args...> a) {
+ switch (rank(a)) {
+ case 0: f(as_view_of_rank_n<0>(a)); break;
+ case 1: f(as_view_of_rank_n<1>(a)); break;
+ case 2: f(as_view_of_rank_n<2>(a)); break;
+ case 3: f(as_view_of_rank_n<3>(a)); break;
+ case 4: f(as_view_of_rank_n<4>(a)); break;
+ case 5: f(as_view_of_rank_n<5>(a)); break;
+ case 6: f(as_view_of_rank_n<6>(a)); break;
+ case 7: f(as_view_of_rank_n<7>(a)); break;
+ default:
+ KOKKOS_IF_ON_HOST(
+ Kokkos::abort(
+ std::string(
+ "Trying to apply a function to a view of unexpected rank " +
+ std::to_string(rank(a)))
+ .c_str());)
+ KOKKOS_IF_ON_DEVICE(
+ Kokkos::abort(
+ "Trying to apply a function to a view of unexpected rank");)
+ }
+}
+
+} // namespace Impl
+
+/** \brief Deep copy a value from Host memory into a view. */
+template <class ExecSpace, class DT, class... DP>
+inline void deep_copy(
+ const ExecSpace& e, const DynRankView<DT, DP...>& dst,
+ typename ViewTraits<DT, DP...>::const_value_type& value,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
+ static_assert(
+ std::is_same_v<typename ViewTraits<DT, DP...>::non_const_value_type,
+ typename ViewTraits<DT, DP...>::value_type>,
+ "deep_copy requires non-const type");
+
+ Impl::apply_to_view_of_static_rank(
+ [=](auto view) { deep_copy(e, view, value); }, dst);
+}
+
+template <class DT, class... DP>
+inline void deep_copy(
+ const DynRankView<DT, DP...>& dst,
+ typename ViewTraits<DT, DP...>::const_value_type& value,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
+ Impl::apply_to_view_of_static_rank([=](auto view) { deep_copy(view, value); },
+ dst);
+}
+
+/** \brief Deep copy into a value in Host memory from a view. */
+template <class ExecSpace, class ST, class... SP>
+inline void deep_copy(
+ const ExecSpace& e,
+ typename ViewTraits<ST, SP...>::non_const_value_type& dst,
+ const DynRankView<ST, SP...>& src,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<ST, SP...>::specialize,
+ void>>* = 0) {
+ deep_copy(e, dst, Impl::as_view_of_rank_n<0>(src));
+}
+
+template <class ST, class... SP>
+inline void deep_copy(
+ typename ViewTraits<ST, SP...>::non_const_value_type& dst,
+ const DynRankView<ST, SP...>& src,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<ST, SP...>::specialize,
+ void>>* = 0) {
+ deep_copy(dst, Impl::as_view_of_rank_n<0>(src));
+}
+
+//----------------------------------------------------------------------------
+/** \brief A deep copy between views of the default specialization, compatible
+ * type, same rank, same contiguous layout.
+ *
+ * A rank mismatch will error out in the attempt to convert to a View
+ */
+template <class ExecSpace, class DstType, class SrcType>
+inline void deep_copy(
+ const ExecSpace& exec_space, const DstType& dst, const SrcType& src,
+ std::enable_if_t<(std::is_void_v<typename DstType::traits::specialize> &&
+ std::is_void_v<typename SrcType::traits::specialize> &&
+ (Kokkos::is_dyn_rank_view<DstType>::value ||
+ Kokkos::is_dyn_rank_view<SrcType>::value))>* = nullptr) {
+ static_assert(std::is_same_v<typename DstType::traits::value_type,
+ typename DstType::traits::non_const_value_type>,
+ "deep_copy requires non-const destination type");
+
+ switch (rank(dst)) {
+ case 0:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<0>(dst),
+ Impl::as_view_of_rank_n<0>(src));
+ break;
+ case 1:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<1>(dst),
+ Impl::as_view_of_rank_n<1>(src));
+ break;
+ case 2:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<2>(dst),
+ Impl::as_view_of_rank_n<2>(src));
+ break;
+ case 3:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<3>(dst),
+ Impl::as_view_of_rank_n<3>(src));
+ break;
+ case 4:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<4>(dst),
+ Impl::as_view_of_rank_n<4>(src));
+ break;
+ case 5:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<5>(dst),
+ Impl::as_view_of_rank_n<5>(src));
+ break;
+ case 6:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<6>(dst),
+ Impl::as_view_of_rank_n<6>(src));
+ break;
+ case 7:
+ deep_copy(exec_space, Impl::as_view_of_rank_n<7>(dst),
+ Impl::as_view_of_rank_n<7>(src));
+ break;
+ default:
+ Kokkos::Impl::throw_runtime_exception(
+ "Calling DynRankView deep_copy with a view of unexpected rank " +
+ std::to_string(rank(dst)));
+ }
+}
+
+template <class DstType, class SrcType>
+inline void deep_copy(
+ const DstType& dst, const SrcType& src,
+ std::enable_if_t<(std::is_void_v<typename DstType::traits::specialize> &&
+ std::is_void_v<typename SrcType::traits::specialize> &&
+ (Kokkos::is_dyn_rank_view<DstType>::value ||
+ Kokkos::is_dyn_rank_view<SrcType>::value))>* = nullptr) {
+ static_assert(std::is_same_v<typename DstType::traits::value_type,
+ typename DstType::traits::non_const_value_type>,
+ "deep_copy requires non-const destination type");
+
+ switch (rank(dst)) {
+ case 0:
+ deep_copy(Impl::as_view_of_rank_n<0>(dst),
+ Impl::as_view_of_rank_n<0>(src));
+ break;
+ case 1:
+ deep_copy(Impl::as_view_of_rank_n<1>(dst),
+ Impl::as_view_of_rank_n<1>(src));
+ break;
+ case 2:
+ deep_copy(Impl::as_view_of_rank_n<2>(dst),
+ Impl::as_view_of_rank_n<2>(src));
+ break;
+ case 3:
+ deep_copy(Impl::as_view_of_rank_n<3>(dst),
+ Impl::as_view_of_rank_n<3>(src));
+ break;
+ case 4:
+ deep_copy(Impl::as_view_of_rank_n<4>(dst),
+ Impl::as_view_of_rank_n<4>(src));
+ break;
+ case 5:
+ deep_copy(Impl::as_view_of_rank_n<5>(dst),
+ Impl::as_view_of_rank_n<5>(src));
+ break;
+ case 6:
+ deep_copy(Impl::as_view_of_rank_n<6>(dst),
+ Impl::as_view_of_rank_n<6>(src));
+ break;
+ case 7:
+ deep_copy(Impl::as_view_of_rank_n<7>(dst),
+ Impl::as_view_of_rank_n<7>(src));
+ break;
+ default:
+ Kokkos::Impl::throw_runtime_exception(
+ "Calling DynRankView deep_copy with a view of unexpected rank " +
+ std::to_string(rank(dst)));
+ }
+}
+
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+// Deduce Mirror Types
+template <class Space, class T, class... P>
+struct MirrorDRViewType {
+ // The incoming view_type
+ using src_view_type = typename Kokkos::DynRankView<T, P...>;
+ // The memory space for the mirror view
+ using memory_space = typename Space::memory_space;
+ // Check whether it is the same memory space
+ enum {
+ is_same_memspace =
+ std::is_same_v<memory_space, typename src_view_type::memory_space>
+ };
+ // The array_layout
+ using array_layout = typename src_view_type::array_layout;
+ // The data type (we probably want it non-const since otherwise we can't even
+ // deep_copy to it.
+ using data_type = typename src_view_type::non_const_data_type;
+ // The destination view type if it is not the same memory space
+ using dest_view_type = Kokkos::DynRankView<data_type, array_layout, Space>;
+ // If it is the same memory_space return the existsing view_type
+ // This will also keep the unmanaged trait if necessary
+ using view_type =
+ std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
+};
+
+} // namespace Impl
+
+namespace Impl {
+
+// create a mirror
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(const DynRankView<T, P...>& src,
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ check_view_ctor_args_create_mirror<ViewCtorArgs...>();
+
+ auto prop_copy = Impl::with_properties_if_unset(
+ arg_prop, std::string(src.label()).append("_mirror"));
+
+ if constexpr (Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ using dst_type = typename Impl::MirrorDRViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+ P...>::dest_view_type;
+ return dst_type(prop_copy,
+ Impl::reconstructLayout(src.layout(), src.rank()));
+ } else {
+ using src_type = DynRankView<T, P...>;
+ using dst_type = typename src_type::HostMirror;
+
+ return dst_type(prop_copy,
+ Impl::reconstructLayout(src.layout(), src.rank()));
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+}
+
+} // namespace Impl
+
+// public interface
+template <class T, class... P,
+ class Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(const DynRankView<T, P...>& src) {
+ return Impl::create_mirror(src, Kokkos::view_alloc());
+}
+
+// public interface that accepts a without initializing flag
+template <class T, class... P,
+ class Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(Kokkos::Impl::WithoutInitializing_t wi,
+ const DynRankView<T, P...>& src) {
+ return Impl::create_mirror(src, Kokkos::view_alloc(wi));
+}
+
+// public interface that accepts a space
+template <class Space, class T, class... P,
+ class Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(const Space&,
+ const Kokkos::DynRankView<T, P...>& src) {
+ return Impl::create_mirror(
+ src, Kokkos::view_alloc(typename Space::memory_space{}));
+}
+
+// public interface that accepts a space and a without initializing flag
+template <class Space, class T, class... P,
+ class Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(Kokkos::Impl::WithoutInitializing_t wi, const Space&,
+ const Kokkos::DynRankView<T, P...>& src) {
+ return Impl::create_mirror(
+ src, Kokkos::view_alloc(wi, typename Space::memory_space{}));
+}
+
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs,
+ typename Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const DynRankView<T, P...>& src) {
+ return Impl::create_mirror(src, arg_prop);
+}
+
+namespace Impl {
+
+// create a mirror view
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror_view(
+ const DynRankView<T, P...>& src,
+ [[maybe_unused]] const typename Impl::ViewCtorProp<ViewCtorArgs...>&
+ arg_prop) {
+ if constexpr (!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ if constexpr (std::is_same_v<typename DynRankView<T, P...>::memory_space,
+ typename DynRankView<
+ T, P...>::HostMirror::memory_space> &&
+ std::is_same_v<
+ typename DynRankView<T, P...>::data_type,
+ typename DynRankView<T, P...>::HostMirror::data_type>) {
+ return typename DynRankView<T, P...>::HostMirror(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ } else {
+ if constexpr (Impl::MirrorDRViewType<typename Impl::ViewCtorProp<
+ ViewCtorArgs...>::memory_space,
+ T, P...>::is_same_memspace) {
+ return typename Impl::MirrorDRViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+ P...>::view_type(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+}
+
+} // namespace Impl
+
+// public interface
+template <class T, class... P>
+inline auto create_mirror_view(const Kokkos::DynRankView<T, P...>& src) {
+ return Impl::create_mirror_view(src, Kokkos::view_alloc());
+}
+
+// public interface that accepts a without initializing flag
+template <class T, class... P>
+inline auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi,
+ const DynRankView<T, P...>& src) {
+ return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
+}
+
+// public interface that accepts a space
+template <class Space, class T, class... P,
+ class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline auto create_mirror_view(const Space&,
+ const Kokkos::DynRankView<T, P...>& src) {
+ return Impl::create_mirror_view(
+ src, Kokkos::view_alloc(typename Space::memory_space()));
+}
+
+// public interface that accepts a space and a without initializing flag
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi,
+ const Space&,
+ const Kokkos::DynRankView<T, P...>& src) {
+ return Impl::create_mirror_view(
+ src, Kokkos::view_alloc(typename Space::memory_space{}, wi));
+}
+
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror_view(
+ const typename Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const Kokkos::DynRankView<T, P...>& src) {
+ return Impl::create_mirror_view(src, arg_prop);
+}
+
+// create a mirror view and deep copy it
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class... ViewCtorArgs, class T, class... P,
+ class Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+auto create_mirror_view_and_copy(
+ [[maybe_unused]] const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const Kokkos::DynRankView<T, P...>& src) {
+ using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+ Impl::check_view_ctor_args_create_mirror_view_and_copy<ViewCtorArgs...>();
+
+ if constexpr (Impl::MirrorDRViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space,
+ T, P...>::is_same_memspace) {
+ // same behavior as deep_copy(src, src)
+ if constexpr (!alloc_prop_input::has_execution_space)
+ fence(
+ "Kokkos::create_mirror_view_and_copy: fence before returning src "
+ "view");
+ return src;
+ } else {
+ using Space = typename alloc_prop_input::memory_space;
+ using Mirror = typename Impl::MirrorDRViewType<Space, T, P...>::view_type;
+
+ auto arg_prop_copy = Impl::with_properties_if_unset(
+ arg_prop, std::string{}, WithoutInitializing,
+ typename Space::execution_space{});
+
+ std::string& label = Impl::get_property<Impl::LabelTag>(arg_prop_copy);
+ if (label.empty()) label = src.label();
+ auto mirror = typename Mirror::non_const_type{
+ arg_prop_copy, Impl::reconstructLayout(src.layout(), src.rank())};
+ if constexpr (alloc_prop_input::has_execution_space) {
+ deep_copy(Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop_copy),
+ mirror, src);
+ } else
+ deep_copy(mirror, src);
+ return mirror;
+ }
+#if defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC)
+ __builtin_unreachable();
+#endif
+}
+
+template <class Space, class T, class... P>
+auto create_mirror_view_and_copy(const Space&,
+ const Kokkos::DynRankView<T, P...>& src,
+ std::string const& name = "") {
+ return create_mirror_view_and_copy(
+ Kokkos::view_alloc(typename Space::memory_space{}, name), src);
+}
+
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+/** \brief Resize a view with copying old data to new data at the corresponding
+ * indices. */
+template <class... ViewCtorArgs, class T, class... P>
+inline void impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ DynRankView<T, P...>& v, const size_t n0,
+ const size_t n1, const size_t n2, const size_t n3,
+ const size_t n4, const size_t n5, const size_t n6,
+ const size_t n7) {
+ using drview_type = DynRankView<T, P...>;
+ using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+ static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+ "Can only resize managed views");
+ static_assert(!alloc_prop_input::has_label,
+ "The view constructor arguments passed to Kokkos::resize "
+ "must not include a label!");
+ static_assert(!alloc_prop_input::has_pointer,
+ "The view constructor arguments passed to Kokkos::resize must "
+ "not include a pointer!");
+ static_assert(!alloc_prop_input::has_memory_space,
+ "The view constructor arguments passed to Kokkos::resize must "
+ "not include a memory space instance!");
+
+ auto prop_copy = Impl::with_properties_if_unset(
+ arg_prop, v.label(), typename drview_type::execution_space{});
+
+ drview_type v_resized(prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
+
+ if constexpr (alloc_prop_input::has_execution_space)
+ Kokkos::Impl::DynRankViewRemap<drview_type, drview_type>(
+ Impl::get_property<Impl::ExecutionSpaceTag>(prop_copy), v_resized, v);
+ else {
+ Kokkos::Impl::DynRankViewRemap<drview_type, drview_type>(v_resized, v);
+ Kokkos::fence("Kokkos::resize(DynRankView)");
+ }
+ v = v_resized;
+}
+
+template <class T, class... P>
+inline void resize(DynRankView<T, P...>& v,
+ const size_t n0 = KOKKOS_INVALID_INDEX,
+ const size_t n1 = KOKKOS_INVALID_INDEX,
+ const size_t n2 = KOKKOS_INVALID_INDEX,
+ const size_t n3 = KOKKOS_INVALID_INDEX,
+ const size_t n4 = KOKKOS_INVALID_INDEX,
+ const size_t n5 = KOKKOS_INVALID_INDEX,
+ const size_t n6 = KOKKOS_INVALID_INDEX,
+ const size_t n7 = KOKKOS_INVALID_INDEX) {
+ impl_resize(Impl::ViewCtorProp<>{}, v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class... ViewCtorArgs, class T, class... P>
+void resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ DynRankView<T, P...>& v,
+ const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+ impl_resize(arg_prop, v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<Impl::is_view_ctor_property<I>::value> resize(
+ const I& arg_prop, DynRankView<T, P...>& v,
+ const size_t n0 = KOKKOS_INVALID_INDEX,
+ const size_t n1 = KOKKOS_INVALID_INDEX,
+ const size_t n2 = KOKKOS_INVALID_INDEX,
+ const size_t n3 = KOKKOS_INVALID_INDEX,
+ const size_t n4 = KOKKOS_INVALID_INDEX,
+ const size_t n5 = KOKKOS_INVALID_INDEX,
+ const size_t n6 = KOKKOS_INVALID_INDEX,
+ const size_t n7 = KOKKOS_INVALID_INDEX) {
+ impl_resize(Kokkos::view_alloc(arg_prop), v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+/** \brief Resize a view with copying old data to new data at the corresponding
+ * indices. */
+template <class... ViewCtorArgs, class T, class... P>
+inline void impl_realloc(DynRankView<T, P...>& v, const size_t n0,
+ const size_t n1, const size_t n2, const size_t n3,
+ const size_t n4, const size_t n5, const size_t n6,
+ const size_t n7,
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ using drview_type = DynRankView<T, P...>;
+ using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+ static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+ "Can only realloc managed views");
+ static_assert(!alloc_prop_input::has_label,
+ "The view constructor arguments passed to Kokkos::realloc must "
+ "not include a label!");
+ static_assert(!alloc_prop_input::has_pointer,
+ "The view constructor arguments passed to Kokkos::realloc must "
+ "not include a pointer!");
+ static_assert(!alloc_prop_input::has_memory_space,
+ "The view constructor arguments passed to Kokkos::realloc must "
+ "not include a memory space instance!");
+
+ auto arg_prop_copy = Impl::with_properties_if_unset(arg_prop, v.label());
+
+ v = drview_type(); // Deallocate first, if the only view to allocation
+ v = drview_type(arg_prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline void realloc(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ DynRankView<T, P...>& v,
+ const size_t n0 = KOKKOS_INVALID_INDEX,
+ const size_t n1 = KOKKOS_INVALID_INDEX,
+ const size_t n2 = KOKKOS_INVALID_INDEX,
+ const size_t n3 = KOKKOS_INVALID_INDEX,
+ const size_t n4 = KOKKOS_INVALID_INDEX,
+ const size_t n5 = KOKKOS_INVALID_INDEX,
+ const size_t n6 = KOKKOS_INVALID_INDEX,
+ const size_t n7 = KOKKOS_INVALID_INDEX) {
+ impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, arg_prop);
+}
+
+template <class T, class... P>
+inline void realloc(DynRankView<T, P...>& v,
+ const size_t n0 = KOKKOS_INVALID_INDEX,
+ const size_t n1 = KOKKOS_INVALID_INDEX,
+ const size_t n2 = KOKKOS_INVALID_INDEX,
+ const size_t n3 = KOKKOS_INVALID_INDEX,
+ const size_t n4 = KOKKOS_INVALID_INDEX,
+ const size_t n5 = KOKKOS_INVALID_INDEX,
+ const size_t n6 = KOKKOS_INVALID_INDEX,
+ const size_t n7 = KOKKOS_INVALID_INDEX) {
+ impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Impl::ViewCtorProp<>{});
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<Impl::is_view_ctor_property<I>::value> realloc(
+ const I& arg_prop, DynRankView<T, P...>& v,
+ const size_t n0 = KOKKOS_INVALID_INDEX,
+ const size_t n1 = KOKKOS_INVALID_INDEX,
+ const size_t n2 = KOKKOS_INVALID_INDEX,
+ const size_t n3 = KOKKOS_INVALID_INDEX,
+ const size_t n4 = KOKKOS_INVALID_INDEX,
+ const size_t n5 = KOKKOS_INVALID_INDEX,
+ const size_t n6 = KOKKOS_INVALID_INDEX,
+ const size_t n7 = KOKKOS_INVALID_INDEX) {
+ impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Kokkos::view_alloc(arg_prop));
+}
+
+} // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
+#endif
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_DYNAMIC_VIEW_HPP
#define KOKKOS_DYNAMIC_VIEW_HPP
using pointer_type = ValueType*;
using track_type = Kokkos::Impl::SharedAllocationTracker;
- ChunkedArrayManager() = default;
- ChunkedArrayManager(ChunkedArrayManager const&) = default;
- ChunkedArrayManager(ChunkedArrayManager&&) = default;
- ChunkedArrayManager& operator=(ChunkedArrayManager&&) = default;
+ ChunkedArrayManager() = default;
+ ChunkedArrayManager(ChunkedArrayManager const&) = default;
+ ChunkedArrayManager(ChunkedArrayManager&&) = default;
+ ChunkedArrayManager& operator=(ChunkedArrayManager&&) = default;
ChunkedArrayManager& operator=(const ChunkedArrayManager&) = default;
template <typename Space, typename Value>
/// allocation
template <typename Space>
struct Destroy {
- Destroy() = default;
- Destroy(Destroy&&) = default;
- Destroy(const Destroy&) = default;
- Destroy& operator=(Destroy&&) = default;
+ Destroy() = default;
+ Destroy(Destroy&&) = default;
+ Destroy(const Destroy&) = default;
+ Destroy& operator=(Destroy&&) = default;
Destroy& operator=(const Destroy&) = default;
Destroy(std::string label, value_type** arg_chunk,
// It is assumed that the value_type is trivially copyable;
// when this is not the case, potential problems can occur.
- static_assert(std::is_void<typename traits::specialize>::value,
+ static_assert(std::is_void_v<typename traits::specialize>,
"DynamicView only implemented for non-specialized View type");
private:
enum {
reference_type_is_lvalue_reference =
- std::is_lvalue_reference<reference_type>::value
+ std::is_lvalue_reference_v<reference_type>
};
KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
//----------------------------------------------------------------------
- ~DynamicView() = default;
- DynamicView() = default;
- DynamicView(DynamicView&&) = default;
- DynamicView(const DynamicView&) = default;
- DynamicView& operator=(DynamicView&&) = default;
+ ~DynamicView() = default;
+ DynamicView() = default;
+ DynamicView(DynamicView&&) = default;
+ DynamicView(const DynamicView&) = default;
+ DynamicView& operator=(DynamicView&&) = default;
DynamicView& operator=(const DynamicView&) = default;
template <class RT, class... RP>
m_chunks = device_accessor(m_chunk_max, m_chunk_size);
const std::string& label =
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
- arg_prop)
- .value;
+ Kokkos::Impl::get_property<Kokkos::Impl::LabelTag>(arg_prop);
if (device_accessor::template IsAccessibleFrom<host_space>::value) {
m_chunks.template allocate_with_destroy<device_space>(label);
label, m_chunks.get_ptr());
m_chunks_host.initialize();
- // Add some properties if not provided to avoid need for if constexpr
using alloc_prop_input = Kokkos::Impl::ViewCtorProp<Prop...>;
- using alloc_prop = Kokkos::Impl::ViewCtorProp<
- Prop..., std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 15>,
- typename device_space::execution_space>>;
- alloc_prop arg_prop_copy(arg_prop);
-
- const auto& exec = static_cast<const Kokkos::Impl::ViewCtorProp<
- void, typename alloc_prop::execution_space>&>(arg_prop_copy)
- .value;
+
+ auto arg_prop_copy = ::Kokkos::Impl::with_properties_if_unset(
+ arg_prop, typename device_space::execution_space{});
+
+ const auto& exec =
+ Kokkos::Impl::get_property<Kokkos::Impl::ExecutionSpaceTag>(
+ arg_prop_copy);
m_chunks_host.deep_copy_to(exec, m_chunks);
if (!alloc_prop_input::has_execution_space)
exec.fence(
struct is_dynamic_view<Kokkos::Experimental::DynamicView<D, P...>>
: public std::true_type {};
+template <class T>
+inline constexpr bool is_dynamic_view_v = is_dynamic_view<T>::value;
+
} // namespace Kokkos
namespace Kokkos {
// Check whether it is the same memory space
enum {
is_same_memspace =
- std::is_same<memory_space, typename src_view_type::memory_space>::value
+ std::is_same_v<memory_space, typename src_view_type::memory_space>
};
// The array_layout
using array_layout = typename src_view_type::array_layout;
} // namespace Impl
namespace Impl {
+
+// create a mirror
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
template <class T, class... P, class... ViewCtorArgs>
-inline auto create_mirror(
- const Kokkos::Experimental::DynamicView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- std::enable_if_t<!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
- nullptr) {
+inline auto create_mirror(const Kokkos::Experimental::DynamicView<T, P...>& src,
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+ check_view_ctor_args_create_mirror<ViewCtorArgs...>();
- static_assert(
- !alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
- "must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
+ auto prop_copy = Impl::with_properties_if_unset(
+ arg_prop, std::string(src.label()).append("_mirror"));
- auto ret = typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror(
- prop_copy, src.chunk_size(), src.chunk_max() * src.chunk_size());
-
- ret.resize_serial(src.extent(0));
-
- return ret;
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-inline auto create_mirror(
- const Kokkos::Experimental::DynamicView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- std::enable_if_t<Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
- nullptr) {
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+ if constexpr (Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ using MemorySpace = typename alloc_prop_input::memory_space;
- static_assert(
- !alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
- "must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
+ auto ret = typename Kokkos::Impl::MirrorDynamicViewType<
+ MemorySpace, T, P...>::view_type(prop_copy, src.chunk_size(),
+ src.chunk_max() * src.chunk_size());
- using MemorySpace = typename alloc_prop_input::memory_space;
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
+ ret.resize_serial(src.extent(0));
- auto ret = typename Kokkos::Impl::MirrorDynamicViewType<
- MemorySpace, T, P...>::view_type(prop_copy, src.chunk_size(),
- src.chunk_max() * src.chunk_size());
+ return ret;
+ } else {
+ auto ret = typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror(
+ prop_copy, src.chunk_size(), src.chunk_max() * src.chunk_size());
- ret.resize_serial(src.extent(0));
+ ret.resize_serial(src.extent(0));
- return ret;
+ return ret;
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
}
+
} // namespace Impl
-// Create a mirror in host space
-template <class T, class... P>
+// public interface
+template <class T, class... P,
+ typename Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
inline auto create_mirror(
const Kokkos::Experimental::DynamicView<T, P...>& src) {
return Impl::create_mirror(src, Impl::ViewCtorProp<>{});
}
-template <class T, class... P>
+// public interface that accepts a without initializing flag
+template <class T, class... P,
+ typename Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
inline auto create_mirror(
Kokkos::Impl::WithoutInitializing_t wi,
const Kokkos::Experimental::DynamicView<T, P...>& src) {
return Impl::create_mirror(src, Kokkos::view_alloc(wi));
}
-// Create a mirror in a new space
-template <class Space, class T, class... P>
+// public interface that accepts a space
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
inline auto create_mirror(
const Space&, const Kokkos::Experimental::DynamicView<T, P...>& src) {
return Impl::create_mirror(
- src, Impl::ViewCtorProp<>{typename Space::memory_space{}});
+ src, Kokkos::view_alloc(typename Space::memory_space{}));
}
-template <class Space, class T, class... P>
-typename Kokkos::Impl::MirrorDynamicViewType<Space, T, P...>::view_type
-create_mirror(Kokkos::Impl::WithoutInitializing_t wi, const Space&,
- const Kokkos::Experimental::DynamicView<T, P...>& src) {
+// public interface that accepts a space and a without initializing flag
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(
+ Kokkos::Impl::WithoutInitializing_t wi, const Space&,
+ const Kokkos::Experimental::DynamicView<T, P...>& src) {
return Impl::create_mirror(
src, Kokkos::view_alloc(wi, typename Space::memory_space{}));
}
-template <class T, class... P, class... ViewCtorArgs>
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs,
+ typename Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
inline auto create_mirror(
const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
const Kokkos::Experimental::DynamicView<T, P...>& src) {
}
namespace Impl {
-template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- (std::is_same<
- typename Kokkos::Experimental::DynamicView<T, P...>::memory_space,
- typename Kokkos::Experimental::DynamicView<
- T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename Kokkos::Experimental::DynamicView<T, P...>::data_type,
- typename Kokkos::Experimental::DynamicView<
- T, P...>::HostMirror::data_type>::value),
- typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror>
-create_mirror_view(
- const typename Kokkos::Experimental::DynamicView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
-}
+// create a mirror view
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- !(std::is_same<
- typename Kokkos::Experimental::DynamicView<T, P...>::memory_space,
- typename Kokkos::Experimental::DynamicView<
- T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename Kokkos::Experimental::DynamicView<T, P...>::data_type,
- typename Kokkos::Experimental::DynamicView<
- T, P...>::HostMirror::data_type>::value),
- typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::Experimental::DynamicView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- return Kokkos::create_mirror(arg_prop, src);
+inline auto create_mirror_view(
+ const Kokkos::Experimental::DynamicView<T, P...>& src,
+ [[maybe_unused]] const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ if constexpr (!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ if constexpr (std::is_same_v<typename Kokkos::Experimental::DynamicView<
+ T, P...>::memory_space,
+ typename Kokkos::Experimental::DynamicView<
+ T, P...>::HostMirror::memory_space> &&
+ std::is_same_v<typename Kokkos::Experimental::DynamicView<
+ T, P...>::data_type,
+ typename Kokkos::Experimental::DynamicView<
+ T, P...>::HostMirror::data_type>) {
+ return
+ typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ } else {
+ if constexpr (Impl::MirrorDynamicViewType<
+ typename Impl::ViewCtorProp<
+ ViewCtorArgs...>::memory_space,
+ T, P...>::is_same_memspace) {
+ return typename Impl::MirrorDynamicViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+ P...>::view_type(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
}
-template <class Space, class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- Impl::MirrorDynamicViewType<Space, T, P...>::is_same_memspace,
- typename Kokkos::Impl::MirrorDynamicViewType<Space, T, P...>::view_type>
-create_mirror_view(const Space&,
- const Kokkos::Experimental::DynamicView<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
-}
} // namespace Impl
-// Create a mirror view in host space
+// public interface
template <class T, class... P>
inline auto create_mirror_view(
const typename Kokkos::Experimental::DynamicView<T, P...>& src) {
return Impl::create_mirror_view(src, Impl::ViewCtorProp<>{});
}
+// public interface that accepts a without initializing flag
template <class T, class... P>
inline auto create_mirror_view(
Kokkos::Impl::WithoutInitializing_t wi,
return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
}
-// Create a mirror in a new space
-template <class Space, class T, class... P>
+// public interface that accepts a space
+template <class Space, class T, class... P,
+ class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
inline auto create_mirror_view(
- const Space& space, const Kokkos::Experimental::DynamicView<T, P...>& src) {
- return Impl::create_mirror_view(space, src, Impl::ViewCtorProp<>{});
+ const Space&, const Kokkos::Experimental::DynamicView<T, P...>& src) {
+ return Impl::create_mirror_view(src,
+ view_alloc(typename Space::memory_space{}));
}
-template <class Space, class T, class... P>
+// public interface that accepts a space and a without initializing flag
+template <class Space, class T, class... P,
+ class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
inline auto create_mirror_view(
Kokkos::Impl::WithoutInitializing_t wi, const Space&,
const Kokkos::Experimental::DynamicView<T, P...>& src) {
src, Kokkos::view_alloc(wi, typename Space::memory_space{}));
}
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
template <class T, class... P, class... ViewCtorArgs>
inline auto create_mirror_view(
const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
using dst_execution_space = typename ViewTraits<T, DP...>::execution_space;
using src_memory_space = typename ViewTraits<T, SP...>::memory_space;
- enum {
- DstExecCanAccessSrc =
- Kokkos::SpaceAccessibility<dst_execution_space,
- src_memory_space>::accessible
- };
+ constexpr bool DstExecCanAccessSrc =
+ Kokkos::SpaceAccessibility<dst_execution_space,
+ src_memory_space>::accessible;
+ static_assert(
+ DstExecCanAccessSrc,
+ "deep_copy given views that would require a temporary allocation");
- if (DstExecCanAccessSrc) {
- // Copying data between views in accessible memory spaces and either
- // non-contiguous or incompatible shape.
- Kokkos::Impl::ViewRemap<dst_type, src_type>(dst, src);
- Kokkos::fence("Kokkos::deep_copy(DynamicView)");
- } else {
- Kokkos::Impl::throw_runtime_exception(
- "deep_copy given views that would require a temporary allocation");
- }
+ // Copying data between views in accessible memory spaces and either
+ // non-contiguous or incompatible shape.
+ Kokkos::Impl::ViewRemap<dst_type, src_type>(dst, src);
+ Kokkos::fence("Kokkos::deep_copy(DynamicView)");
}
template <class T, class... DP, class... SP>
inline void deep_copy(const Kokkos::Experimental::DynamicView<T, DP...>& dst,
const View<T, SP...>& src) {
- using dst_type = Kokkos::Experimental::DynamicView<T, SP...>;
- using src_type = View<T, DP...>;
+ using dst_type = Kokkos::Experimental::DynamicView<T, DP...>;
+ using src_type = View<T, SP...>;
using dst_execution_space = typename ViewTraits<T, DP...>::execution_space;
using src_memory_space = typename ViewTraits<T, SP...>::memory_space;
- enum {
- DstExecCanAccessSrc =
- Kokkos::SpaceAccessibility<dst_execution_space,
- src_memory_space>::accessible
- };
+ constexpr bool DstExecCanAccessSrc =
+ Kokkos::SpaceAccessibility<dst_execution_space,
+ src_memory_space>::accessible;
+ static_assert(
+ DstExecCanAccessSrc,
+ "deep_copy given views that would require a temporary allocation");
- if (DstExecCanAccessSrc) {
- // Copying data between views in accessible memory spaces and either
- // non-contiguous or incompatible shape.
- Kokkos::Impl::ViewRemap<dst_type, src_type>(dst, src);
- Kokkos::fence("Kokkos::deep_copy(DynamicView)");
- } else {
- Kokkos::Impl::throw_runtime_exception(
- "deep_copy given views that would require a temporary allocation");
- }
+ // Copying data between views in accessible memory spaces and either
+ // non-contiguous or incompatible shape.
+ Kokkos::Impl::ViewRemap<dst_type, src_type>(dst, src);
+ Kokkos::fence("Kokkos::deep_copy(DynamicView)");
}
namespace Impl {
} // namespace Impl
-template <class... ViewCtorArgs, class T, class... P>
+// create a mirror view and deep copy it
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class... ViewCtorArgs, class T, class... P,
+ class Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
auto create_mirror_view_and_copy(
- const Impl::ViewCtorProp<ViewCtorArgs...>&,
- const Kokkos::Experimental::DynamicView<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- Impl::MirrorDynamicViewType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::is_same_memspace>* = nullptr) {
+ [[maybe_unused]] const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const Kokkos::Experimental::DynamicView<T, P...>& src) {
using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
- static_assert(
- alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must include a memory space!");
- static_assert(!alloc_prop_input::has_pointer,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not include a pointer!");
- static_assert(!alloc_prop_input::allow_padding,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not explicitly allow padding!");
-
- // same behavior as deep_copy(src, src)
- if (!alloc_prop_input::has_execution_space)
- fence(
- "Kokkos::create_mirror_view_and_copy: fence before returning src view");
- return src;
-}
-template <class... ViewCtorArgs, class T, class... P>
-auto create_mirror_view_and_copy(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::Experimental::DynamicView<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- !Impl::MirrorDynamicViewType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::is_same_memspace>* = nullptr) {
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
- static_assert(
- alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must include a memory space!");
- static_assert(!alloc_prop_input::has_pointer,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not include a pointer!");
- static_assert(!alloc_prop_input::allow_padding,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not explicitly allow padding!");
- using Space = typename alloc_prop_input::memory_space;
- using Mirror =
- typename Impl::MirrorDynamicViewType<Space, T, P...>::view_type;
-
- // Add some properties if not provided to avoid need for if constexpr
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs...,
- std::conditional_t<alloc_prop_input::has_label,
- std::integral_constant<unsigned int, 12>, std::string>,
- std::conditional_t<!alloc_prop_input::initialize,
- std::integral_constant<unsigned int, 13>,
- Impl::WithoutInitializing_t>,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 14>,
- typename Space::execution_space>>;
- alloc_prop arg_prop_copy(arg_prop);
-
- std::string& label =
- static_cast<Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy).value;
- if (label.empty()) label = src.label();
- auto mirror = typename Mirror::non_const_type(
- arg_prop_copy, src.chunk_size(), src.chunk_max() * src.chunk_size());
- mirror.resize_serial(src.extent(0));
- if (alloc_prop_input::has_execution_space) {
- using ExecutionSpace = typename alloc_prop::execution_space;
- deep_copy(
- static_cast<Impl::ViewCtorProp<void, ExecutionSpace>&>(arg_prop_copy)
- .value,
- mirror, src);
- } else
- deep_copy(mirror, src);
- return mirror;
+ Impl::check_view_ctor_args_create_mirror_view_and_copy<ViewCtorArgs...>();
+
+ if constexpr (Impl::MirrorDynamicViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space,
+ T, P...>::is_same_memspace) {
+ // same behavior as deep_copy(src, src)
+ if constexpr (!alloc_prop_input::has_execution_space)
+ fence(
+ "Kokkos::create_mirror_view_and_copy: fence before returning src "
+ "view");
+ return src;
+ } else {
+ using Space = typename alloc_prop_input::memory_space;
+ using Mirror =
+ typename Impl::MirrorDynamicViewType<Space, T, P...>::view_type;
+
+ auto arg_prop_copy = Impl::with_properties_if_unset(
+ arg_prop, std::string{}, WithoutInitializing,
+ typename Space::execution_space{});
+
+ std::string& label = Impl::get_property<Impl::LabelTag>(arg_prop_copy);
+ if (label.empty()) label = src.label();
+ auto mirror = typename Mirror::non_const_type(
+ arg_prop_copy, src.chunk_size(), src.chunk_max() * src.chunk_size());
+ mirror.resize_serial(src.extent(0));
+ if constexpr (alloc_prop_input::has_execution_space) {
+ deep_copy(Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop_copy),
+ mirror, src);
+ } else
+ deep_copy(mirror, src);
+ return mirror;
+ }
+#if defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC)
+ __builtin_unreachable();
+#endif
}
-template <class Space, class T, class... P>
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
auto create_mirror_view_and_copy(
const Space&, const Kokkos::Experimental::DynamicView<T, P...>& src,
std::string const& name = "") {
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_EXPERIMENTAL_ERROR_REPORTER_HPP
#define KOKKOS_EXPERIMENTAL_ERROR_REPORTER_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_FUNCTIONAL_HPP
+#define KOKKOS_FUNCTIONAL_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Functional_impl.hpp>
+
+namespace Kokkos {
+
+// These should work for most types
+
+template <typename T>
+struct pod_hash {
+ KOKKOS_FORCEINLINE_FUNCTION
+ uint32_t operator()(T const& t) const {
+ return Impl::MurmurHash3_x86_32(&t, sizeof(T), 0);
+ }
+
+ KOKKOS_FORCEINLINE_FUNCTION
+ uint32_t operator()(T const& t, uint32_t seed) const {
+ return Impl::MurmurHash3_x86_32(&t, sizeof(T), seed);
+ }
+};
+
+template <typename T>
+struct pod_equal_to {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const {
+ return Impl::bitwise_equal(&a, &b);
+ }
+};
+
+template <typename T>
+struct pod_not_equal_to {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const {
+ return !Impl::bitwise_equal(&a, &b);
+ }
+};
+
+template <typename T>
+struct equal_to {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const { return a == b; }
+};
+
+template <typename T>
+struct not_equal_to {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const { return a != b; }
+};
+
+template <typename T>
+struct greater {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const { return a > b; }
+};
+
+template <typename T>
+struct less {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const { return a < b; }
+};
+
+template <typename T>
+struct greater_equal {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const { return a >= b; }
+};
+
+template <typename T>
+struct less_equal {
+ KOKKOS_FORCEINLINE_FUNCTION
+ bool operator()(T const& a, T const& b) const { return a <= b; }
+};
+
+} // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
+#endif
+#endif // KOKKOS_FUNCTIONAL_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OFFSETVIEW_HPP_
+#define KOKKOS_OFFSETVIEW_HPP_
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#include <Kokkos_View.hpp>
+
+namespace Kokkos {
+
+namespace Experimental {
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class DataType, class... Properties>
+class OffsetView;
+
+template <class>
+struct is_offset_view : public std::false_type {};
+
+template <class D, class... P>
+struct is_offset_view<OffsetView<D, P...>> : public std::true_type {};
+
+template <class D, class... P>
+struct is_offset_view<const OffsetView<D, P...>> : public std::true_type {};
+
+template <class T>
+inline constexpr bool is_offset_view_v = is_offset_view<T>::value;
+
+#define KOKKOS_INVALID_OFFSET int64_t(0x7FFFFFFFFFFFFFFFLL)
+#define KOKKOS_INVALID_INDEX_RANGE \
+ { KOKKOS_INVALID_OFFSET, KOKKOS_INVALID_OFFSET }
+
+template <typename iType,
+ std::enable_if_t<std::is_integral_v<iType> && std::is_signed_v<iType>,
+ iType> = 0>
+using IndexRange = Kokkos::Array<iType, 2>;
+
+using index_list_type = std::initializer_list<int64_t>;
+
+// template <typename iType,
+// std::enable_if_t< std::is_integral<iType>::value &&
+// std::is_signed<iType>::value, iType > = 0> using min_index_type =
+// std::initializer_list<iType>;
+
+namespace Impl {
+
+template <class ViewType>
+struct GetOffsetViewTypeFromViewType {
+ using type =
+ OffsetView<typename ViewType::data_type, typename ViewType::array_layout,
+ typename ViewType::device_type,
+ typename ViewType::memory_traits>;
+};
+
+template <unsigned, class MapType, class BeginsType>
+KOKKOS_INLINE_FUNCTION bool offsetview_verify_operator_bounds(
+ const MapType&, const BeginsType&) {
+ return true;
+}
+
+template <unsigned R, class MapType, class BeginsType, class iType,
+ class... Args>
+KOKKOS_INLINE_FUNCTION bool offsetview_verify_operator_bounds(
+ const MapType& map, const BeginsType& begins, const iType& i,
+ Args... args) {
+ const bool legalIndex =
+ (int64_t(i) >= begins[R]) &&
+ (int64_t(i) <= int64_t(begins[R] + map.extent(R) - 1));
+ return legalIndex &&
+ offsetview_verify_operator_bounds<R + 1>(map, begins, args...);
+}
+template <unsigned, class MapType, class BeginsType>
+inline void offsetview_error_operator_bounds(char*, int, const MapType&,
+ const BeginsType&) {}
+
+template <unsigned R, class MapType, class BeginsType, class iType,
+ class... Args>
+inline void offsetview_error_operator_bounds(char* buf, int len,
+ const MapType& map,
+ const BeginsType begins,
+ const iType& i, Args... args) {
+ const int64_t b = begins[R];
+ const int64_t e = b + map.extent(R) - 1;
+ const int n =
+ snprintf(buf, len, " %ld <= %ld <= %ld %c", static_cast<unsigned long>(b),
+ static_cast<unsigned long>(i), static_cast<unsigned long>(e),
+ (sizeof...(Args) ? ',' : ')'));
+ offsetview_error_operator_bounds<R + 1>(buf + n, len - n, map, begins,
+ args...);
+}
+
+template <class MemorySpace, class MapType, class BeginsType, class... Args>
+KOKKOS_INLINE_FUNCTION void offsetview_verify_operator_bounds(
+ Kokkos::Impl::SharedAllocationTracker const& tracker, const MapType& map,
+ const BeginsType& begins, Args... args) {
+ if (!offsetview_verify_operator_bounds<0>(map, begins, args...)) {
+ KOKKOS_IF_ON_HOST(
+ (enum {LEN = 1024}; char buffer[LEN];
+ const std::string label = tracker.template get_label<MemorySpace>();
+ int n = snprintf(buffer, LEN,
+ "OffsetView bounds error of view labeled %s (",
+ label.c_str());
+ offsetview_error_operator_bounds<0>(buffer + n, LEN - n, map, begins,
+ args...);
+ Kokkos::abort(buffer);))
+
+ KOKKOS_IF_ON_DEVICE(
+ (Kokkos::abort("OffsetView bounds error"); (void)tracker;))
+ }
+}
+
+inline void runtime_check_rank_host(const size_t rank_dynamic,
+ const size_t rank,
+ const index_list_type minIndices,
+ const std::string& label) {
+ bool isBad = false;
+ std::string message =
+ "Kokkos::Experimental::OffsetView ERROR: for OffsetView labeled '" +
+ label + "':";
+ if (rank_dynamic != rank) {
+ message +=
+ "The full rank must be the same as the dynamic rank. full rank = ";
+ message += std::to_string(rank) +
+ " dynamic rank = " + std::to_string(rank_dynamic) + "\n";
+ isBad = true;
+ }
+
+ size_t numOffsets = 0;
+ for (size_t i = 0; i < minIndices.size(); ++i) {
+ if (minIndices.begin()[i] != KOKKOS_INVALID_OFFSET) numOffsets++;
+ }
+ if (numOffsets != rank_dynamic) {
+ message += "The number of offsets provided ( " +
+ std::to_string(numOffsets) +
+ " ) must equal the dynamic rank ( " +
+ std::to_string(rank_dynamic) + " ).";
+ isBad = true;
+ }
+
+ if (isBad) Kokkos::abort(message.c_str());
+}
+
+KOKKOS_INLINE_FUNCTION
+void runtime_check_rank_device(const size_t rank_dynamic, const size_t rank,
+ const index_list_type minIndices) {
+ if (rank_dynamic != rank) {
+ Kokkos::abort(
+ "The full rank of an OffsetView must be the same as the dynamic rank.");
+ }
+ size_t numOffsets = 0;
+ for (size_t i = 0; i < minIndices.size(); ++i) {
+ if (minIndices.begin()[i] != KOKKOS_INVALID_OFFSET) numOffsets++;
+ }
+ if (numOffsets != rank) {
+ Kokkos::abort(
+ "The number of offsets provided to an OffsetView constructor must "
+ "equal the dynamic rank.");
+ }
+}
+} // namespace Impl
+
+template <class DataType, class... Properties>
+class OffsetView : public View<DataType, Properties...> {
+ private:
+ template <class, class...>
+ friend class OffsetView;
+
+ using base_t = View<DataType, Properties...>;
+
+ public:
+ // typedefs to reduce typing base_t:: further down
+ using traits = typename base_t::traits;
+ // FIXME: should be base_t::index_type after refactor
+ using index_type = typename base_t::memory_space::size_type;
+ using pointer_type = typename base_t::pointer_type;
+
+ using begins_type = Kokkos::Array<int64_t, base_t::rank()>;
+
+ template <typename iType,
+ std::enable_if_t<std::is_integral_v<iType>, iType> = 0>
+ KOKKOS_FUNCTION int64_t begin(const iType local_dimension) const {
+ return static_cast<size_t>(local_dimension) < base_t::rank()
+ ? m_begins[local_dimension]
+ : KOKKOS_INVALID_OFFSET;
+ }
+
+ KOKKOS_FUNCTION
+ begins_type begins() const { return m_begins; }
+
+ template <typename iType,
+ std::enable_if_t<std::is_integral_v<iType>, iType> = 0>
+ KOKKOS_FUNCTION int64_t end(const iType local_dimension) const {
+ return begin(local_dimension) + base_t::extent(local_dimension);
+ }
+
+ private:
+ begins_type m_begins;
+
+ public:
+ //----------------------------------------
+ /** \brief Compatible view of array of scalar types */
+ using array_type =
+ OffsetView<typename traits::scalar_array_type,
+ typename traits::array_layout, typename traits::device_type,
+ typename traits::memory_traits>;
+
+ /** \brief Compatible view of const data type */
+ using const_type =
+ OffsetView<typename traits::const_data_type,
+ typename traits::array_layout, typename traits::device_type,
+ typename traits::memory_traits>;
+
+ /** \brief Compatible view of non-const data type */
+ using non_const_type =
+ OffsetView<typename traits::non_const_data_type,
+ typename traits::array_layout, typename traits::device_type,
+ typename traits::memory_traits>;
+
+ /** \brief Compatible HostMirror view */
+ using HostMirror = OffsetView<typename traits::non_const_data_type,
+ typename traits::array_layout,
+ typename traits::host_mirror_space>;
+
+ template <size_t... I, class... OtherIndexTypes>
+ KOKKOS_FUNCTION typename base_t::reference_type offset_operator(
+ std::integer_sequence<size_t, I...>, OtherIndexTypes... indices) const {
+ return base_t::operator()((indices - m_begins[I])...);
+ }
+
+ template <class OtherIndexType>
+#ifndef KOKKOS_ENABLE_CXX17
+ requires(std::is_convertible_v<OtherIndexType, index_type> &&
+ std::is_nothrow_constructible_v<index_type, OtherIndexType> &&
+ (base_t::rank() == 1))
+#endif
+ KOKKOS_FUNCTION constexpr typename base_t::reference_type operator[](
+ const OtherIndexType& idx) const {
+#ifdef KOKKOS_ENABLE_CXX17
+ static_assert(std::is_convertible_v<OtherIndexType, index_type> &&
+ std::is_nothrow_constructible_v<index_type, OtherIndexType> &&
+ (base_t::rank() == 1));
+#endif
+ return base_t::operator[](idx - m_begins[0]);
+ }
+
+ template <class... OtherIndexTypes>
+#ifndef KOKKOS_ENABLE_CXX17
+ requires((std::is_convertible_v<OtherIndexTypes, index_type> && ...) &&
+ (std::is_nothrow_constructible_v<index_type, OtherIndexTypes> &&
+ ...) &&
+ (sizeof...(OtherIndexTypes) == base_t::rank()))
+#endif
+ KOKKOS_FUNCTION constexpr typename base_t::reference_type operator()(
+ OtherIndexTypes... indices) const {
+#ifdef KOKKOS_ENABLE_CXX17
+ static_assert(
+ (std::is_convertible_v<OtherIndexTypes, index_type> && ...) &&
+ (std::is_nothrow_constructible_v<index_type, OtherIndexTypes> && ...) &&
+ (sizeof...(OtherIndexTypes) == base_t::rank()));
+#endif
+ return offset_operator(std::make_index_sequence<base_t::rank()>(),
+ indices...);
+ }
+
+ template <class... OtherIndexTypes>
+ KOKKOS_FUNCTION constexpr typename base_t::reference_type access(
+ OtherIndexTypes... args) const = delete;
+
+ //----------------------------------------
+
+ //----------------------------------------
+ // Standard destructor, constructors, and assignment operators
+
+ KOKKOS_FUNCTION
+ OffsetView() : base_t() {
+ for (size_t i = 0; i < base_t::rank(); ++i)
+ m_begins[i] = KOKKOS_INVALID_OFFSET;
+ }
+
+ // interoperability with View
+ private:
+ using view_type =
+ View<typename traits::scalar_array_type, typename traits::array_layout,
+ typename traits::device_type, typename traits::memory_traits>;
+
+ public:
+ KOKKOS_FUNCTION
+ view_type view() const { return *this; }
+
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION OffsetView(const View<RT, RP...>& aview) : base_t(aview) {
+ for (size_t i = 0; i < View<RT, RP...>::rank(); ++i) {
+ m_begins[i] = 0;
+ }
+ }
+
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION OffsetView(const View<RT, RP...>& aview,
+ const index_list_type& minIndices)
+ : base_t(aview) {
+ KOKKOS_IF_ON_HOST(
+ (Kokkos::Experimental::Impl::runtime_check_rank_host(
+ traits::rank_dynamic, base_t::rank(), minIndices, aview.label());))
+
+ KOKKOS_IF_ON_DEVICE(
+ (Kokkos::Experimental::Impl::runtime_check_rank_device(
+ traits::rank_dynamic, base_t::rank(), minIndices);))
+ for (size_t i = 0; i < minIndices.size(); ++i) {
+ m_begins[i] = minIndices.begin()[i];
+ }
+ }
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION OffsetView(const View<RT, RP...>& aview,
+ const begins_type& beg)
+ : base_t(aview), m_begins(beg) {}
+
+ // may assign unmanaged from managed.
+
+ template <class RT, class... RP>
+ KOKKOS_FUNCTION OffsetView(const OffsetView<RT, RP...>& rhs)
+ : base_t(rhs.view()), m_begins(rhs.m_begins) {}
+
+ private:
+ enum class subtraction_failure {
+ none,
+ negative,
+ overflow,
+ };
+
+ // Subtraction should return a non-negative number and not overflow
+ KOKKOS_FUNCTION static subtraction_failure check_subtraction(int64_t lhs,
+ int64_t rhs) {
+ if (lhs < rhs) return subtraction_failure::negative;
+
+ if (static_cast<uint64_t>(-1) / static_cast<uint64_t>(2) <
+ static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs))
+ return subtraction_failure::overflow;
+
+ return subtraction_failure::none;
+ }
+
+ // Need a way to get at an element from both begins_type (aka Kokkos::Array
+ // which doesn't have iterators) and index_list_type (aka
+ // std::initializer_list which doesn't have .data() or operator[]).
+ // Returns by value
+ KOKKOS_FUNCTION
+ static int64_t at(const begins_type& a, size_t pos) { return a[pos]; }
+
+ KOKKOS_FUNCTION
+ static int64_t at(index_list_type a, size_t pos) {
+ return *(a.begin() + pos);
+ }
+
+ // Check that begins < ends for all elements
+ // B, E can be begins_type and/or index_list_type
+ template <typename B, typename E>
+ static subtraction_failure runtime_check_begins_ends_host(const B& begins,
+ const E& ends) {
+ std::string message;
+ if (begins.size() != base_t::rank())
+ message +=
+ "begins.size() "
+ "(" +
+ std::to_string(begins.size()) +
+ ")"
+ " != Rank "
+ "(" +
+ std::to_string(base_t::rank()) +
+ ")"
+ "\n";
+
+ if (ends.size() != base_t::rank())
+ message +=
+ "ends.size() "
+ "(" +
+ std::to_string(ends.size()) +
+ ")"
+ " != Rank "
+ "(" +
+ std::to_string(base_t::rank()) +
+ ")"
+ "\n";
+
+ // If there are no errors so far, then arg_rank == Rank
+ // Otherwise, check as much as possible
+ size_t arg_rank = begins.size() < ends.size() ? begins.size() : ends.size();
+ for (size_t i = 0; i != arg_rank; ++i) {
+ subtraction_failure sf = check_subtraction(at(ends, i), at(begins, i));
+ if (sf != subtraction_failure::none) {
+ message +=
+ "("
+ "ends[" +
+ std::to_string(i) +
+ "]"
+ " "
+ "(" +
+ std::to_string(at(ends, i)) +
+ ")"
+ " - "
+ "begins[" +
+ std::to_string(i) +
+ "]"
+ " "
+ "(" +
+ std::to_string(at(begins, i)) +
+ ")"
+ ")";
+ switch (sf) {
+ case subtraction_failure::negative:
+ message += " must be non-negative\n";
+ break;
+ case subtraction_failure::overflow: message += " overflows\n"; break;
+ default: break;
+ }
+ }
+ }
+
+ if (!message.empty()) {
+ message =
+ "Kokkos::Experimental::OffsetView ERROR: for unmanaged OffsetView\n" +
+ message;
+ Kokkos::abort(message.c_str());
+ }
+
+ return subtraction_failure::none;
+ }
+
+ // Check the begins < ends for all elements
+ template <typename B, typename E>
+ KOKKOS_FUNCTION static subtraction_failure runtime_check_begins_ends_device(
+ const B& begins, const E& ends) {
+ if (begins.size() != base_t::rank())
+ Kokkos::abort(
+ "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+ "OffsetView: begins has bad Rank");
+ if (ends.size() != base_t::rank())
+ Kokkos::abort(
+ "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+ "OffsetView: ends has bad Rank");
+
+ for (size_t i = 0; i != begins.size(); ++i) {
+ switch (check_subtraction(at(ends, i), at(begins, i))) {
+ case subtraction_failure::negative:
+ Kokkos::abort(
+ "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+ "OffsetView: bad range");
+ break;
+ case subtraction_failure::overflow:
+ Kokkos::abort(
+ "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+ "OffsetView: range overflows");
+ break;
+ default: break;
+ }
+ }
+
+ return subtraction_failure::none;
+ }
+
+ template <typename B, typename E>
+ KOKKOS_FUNCTION static subtraction_failure runtime_check_begins_ends(
+ const B& begins, const E& ends) {
+ KOKKOS_IF_ON_HOST((return runtime_check_begins_ends_host(begins, ends);))
+ KOKKOS_IF_ON_DEVICE(
+ (return runtime_check_begins_ends_device(begins, ends);))
+ }
+
+ // Constructor around unmanaged data after checking begins < ends for all
+ // elements
+ // Each of B, E can be begins_type and/or index_list_type
+ // Precondition: begins.size() == ends.size() == m_begins.size() == Rank
+ template <typename B, typename E>
+ KOKKOS_FUNCTION OffsetView(const pointer_type& p, const B& begins_,
+ const E& ends_, subtraction_failure)
+ : base_t(Kokkos::view_wrap(p),
+ typename traits::array_layout(
+ base_t::rank() > 0 ? at(ends_, 0) - at(begins_, 0)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ base_t::rank() > 1 ? at(ends_, 1) - at(begins_, 1)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ base_t::rank() > 2 ? at(ends_, 2) - at(begins_, 2)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ base_t::rank() > 3 ? at(ends_, 3) - at(begins_, 3)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ base_t::rank() > 4 ? at(ends_, 4) - at(begins_, 4)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ base_t::rank() > 5 ? at(ends_, 5) - at(begins_, 5)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ base_t::rank() > 6 ? at(ends_, 6) - at(begins_, 6)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ base_t::rank() > 7 ? at(ends_, 7) - at(begins_, 7)
+ : KOKKOS_IMPL_CTOR_DEFAULT_ARG)) {
+ for (size_t i = 0; i != m_begins.size(); ++i) {
+ m_begins[i] = at(begins_, i);
+ };
+ }
+
+ public:
+ // Constructor around unmanaged data
+ // Four overloads, as both begins and ends can be either
+ // begins_type or index_list_type
+ KOKKOS_FUNCTION
+ OffsetView(const pointer_type& p, const begins_type& begins_,
+ const begins_type& ends_)
+ : OffsetView(p, begins_, ends_,
+ runtime_check_begins_ends(begins_, ends_)) {}
+
+ KOKKOS_FUNCTION
+ OffsetView(const pointer_type& p, const begins_type& begins_,
+ index_list_type ends_)
+ : OffsetView(p, begins_, ends_,
+ runtime_check_begins_ends(begins_, ends_)) {}
+
+ KOKKOS_FUNCTION
+ OffsetView(const pointer_type& p, index_list_type begins_,
+ const begins_type& ends_)
+ : OffsetView(p, begins_, ends_,
+ runtime_check_begins_ends(begins_, ends_)) {}
+
+ KOKKOS_FUNCTION
+ OffsetView(const pointer_type& p, index_list_type begins_,
+ index_list_type ends_)
+ : OffsetView(p, begins_, ends_,
+ runtime_check_begins_ends(begins_, ends_)) {}
+
+ // Choosing std::pair as type for the arguments allows constructing an
+ // OffsetView using list initialization syntax, e.g.,
+ // OffsetView dummy("dummy", {-1, 3}, {-2,2});
+ // We could allow arbitrary types RangeType that support
+ // std::get<{0,1}>(RangeType const&) with std::tuple_size<RangeType>::value==2
+ // but this wouldn't allow using the syntax in the example above.
+ template <typename Label>
+ explicit OffsetView(
+ const Label& arg_label,
+ std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
+ const std::pair<int64_t, int64_t>>
+ range0,
+ const std::pair<int64_t, int64_t> range1 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range2 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range3 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range4 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range5 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range6 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range7 = KOKKOS_INVALID_INDEX_RANGE
+
+ )
+ : OffsetView(Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
+ typename traits::array_layout(
+ range0.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG - 1
+ : range0.second - range0.first + 1,
+ range1.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range1.second - range1.first + 1,
+ range2.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range2.second - range2.first + 1,
+ range3.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range3.second - range3.first + 1,
+ range4.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range4.second - range4.first + 1,
+ range5.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range5.second - range5.first + 1,
+ range6.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range6.second - range6.first + 1,
+ range7.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range7.second - range7.first + 1),
+ {range0.first, range1.first, range2.first, range3.first,
+ range4.first, range5.first, range6.first, range7.first}) {}
+
+ template <class... P>
+ explicit OffsetView(
+ const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+ const std::pair<int64_t, int64_t> range0 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range1 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range2 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range3 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range4 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range5 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range6 = KOKKOS_INVALID_INDEX_RANGE,
+ const std::pair<int64_t, int64_t> range7 = KOKKOS_INVALID_INDEX_RANGE)
+ : OffsetView(arg_prop,
+ typename traits::array_layout(
+ range0.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range0.second - range0.first + 1,
+ range1.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range1.second - range1.first + 1,
+ range2.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range2.second - range2.first + 1,
+ range3.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range3.second - range3.first + 1,
+ range4.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range4.second - range4.first + 1,
+ range5.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range5.second - range5.first + 1,
+ range6.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range6.second - range6.first + 1,
+ range7.first == KOKKOS_INVALID_OFFSET
+ ? KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ : range7.second - range7.first + 1),
+ {range0.first, range1.first, range2.first, range3.first,
+ range4.first, range5.first, range6.first, range7.first}) {}
+
+ template <class... P>
+ explicit KOKKOS_FUNCTION OffsetView(
+ const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+ std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+ typename traits::array_layout> const& arg_layout,
+ const index_list_type minIndices)
+ : base_t(arg_prop, arg_layout) {
+ KOKKOS_IF_ON_HOST((Kokkos::Experimental::Impl::runtime_check_rank_host(
+ traits::rank_dynamic, base_t::rank(), minIndices,
+ base_t::label());))
+
+ KOKKOS_IF_ON_DEVICE(
+ (Kokkos::Experimental::Impl::runtime_check_rank_device(
+ traits::rank_dynamic, base_t::rank(), minIndices);))
+ for (size_t i = 0; i < minIndices.size(); ++i) {
+ m_begins[i] = minIndices.begin()[i];
+ }
+ static_assert(
+ std::is_same<pointer_type, typename Kokkos::Impl::ViewCtorProp<
+ P...>::pointer_type>::value,
+ "When constructing OffsetView to wrap user memory, you must supply "
+ "matching pointer type");
+ }
+
+ template <class... P>
+ explicit OffsetView(
+ const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+ std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+ typename traits::array_layout> const& arg_layout,
+ const index_list_type minIndices)
+ : base_t(arg_prop, arg_layout) {
+ for (size_t i = 0; i < base_t::rank(); ++i)
+ m_begins[i] = minIndices.begin()[i];
+ }
+};
+
+/** \brief Temporary free function rank()
+ * until rank() is implemented
+ * in the View
+ */
+template <typename D, class... P>
+KOKKOS_INLINE_FUNCTION constexpr unsigned rank(const OffsetView<D, P...>& V) {
+ return V.rank();
+} // Temporary until added to view
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+namespace Impl {
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral_v<T>, T> shift_input(
+ const T arg, const int64_t offset) {
+ return arg - offset;
+}
+
+KOKKOS_INLINE_FUNCTION
+Kokkos::ALL_t shift_input(const Kokkos::ALL_t arg, const int64_t /*offset*/) {
+ return arg;
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<std::is_integral_v<T>, Kokkos::pair<T, T>>
+ shift_input(const Kokkos::pair<T, T> arg, const int64_t offset) {
+ return Kokkos::make_pair<T, T>(arg.first - offset, arg.second - offset);
+}
+template <class T>
+inline std::enable_if_t<std::is_integral_v<T>, std::pair<T, T>> shift_input(
+ const std::pair<T, T> arg, const int64_t offset) {
+ return std::make_pair<T, T>(arg.first - offset, arg.second - offset);
+}
+
+template <size_t N, class Arg, class A>
+KOKKOS_INLINE_FUNCTION void map_arg_to_new_begin(
+ const size_t i, Kokkos::Array<int64_t, N>& subviewBegins,
+ std::enable_if_t<N != 0, const Arg> shiftedArg, const Arg arg,
+ const A viewBegins, size_t& counter) {
+ if (!std::is_integral_v<Arg>) {
+ subviewBegins[counter] = shiftedArg == arg ? viewBegins[i] : 0;
+ counter++;
+ }
+}
+
+template <size_t N, class Arg, class A>
+KOKKOS_INLINE_FUNCTION void map_arg_to_new_begin(
+ const size_t /*i*/, Kokkos::Array<int64_t, N>& /*subviewBegins*/,
+ std::enable_if_t<N == 0, const Arg> /*shiftedArg*/, const Arg /*arg*/,
+ const A /*viewBegins*/, size_t& /*counter*/) {}
+
+template <class D, class... P, class T>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<void /* deduce subview type from
+ source view traits */
+ ,
+ ViewTraits<D, P...>, T>::type>::type
+ subview_offset(const OffsetView<D, P...>& src, T arg) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T shiftedArg = shift_input(arg, begins[0]);
+
+ constexpr size_t rank =
+ Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
+ traits */
+ ,
+ ViewTraits<D, P...>, T>::type::rank;
+
+ auto theSubview = Kokkos::subview(theView, shiftedArg);
+
+ Kokkos::Array<int64_t, rank> subviewBegins;
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(0, subviewBegins, shiftedArg,
+ arg, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<void /* deduce subview type from source
+ view traits */
+ ,
+ ViewTraits<D, P...>, T>::type>::type
+ offsetView(theSubview, subviewBegins);
+
+ return offsetView;
+}
+
+template <class D, class... P, class T0, class T1>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1>::type>::type
+ subview_offset(const Kokkos::Experimental::OffsetView<D, P...>& src,
+ T0 arg0, T1 arg1) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T0 shiftedArg0 = shift_input(arg0, begins[0]);
+ T1 shiftedArg1 = shift_input(arg1, begins[1]);
+
+ auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1);
+ constexpr size_t rank =
+ Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
+ traits */
+ ,
+ ViewTraits<D, P...>, T0, T1>::type::rank;
+
+ Kokkos::Array<int64_t, rank> subviewBegins;
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 0, subviewBegins, shiftedArg0, arg0, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 1, subviewBegins, shiftedArg1, arg1, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1>::type>::type offsetView(theSubview,
+ subviewBegins);
+
+ return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2>::type>::type
+ subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T0 shiftedArg0 = shift_input(arg0, begins[0]);
+ T1 shiftedArg1 = shift_input(arg1, begins[1]);
+ T2 shiftedArg2 = shift_input(arg2, begins[2]);
+
+ auto theSubview =
+ Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2);
+
+ constexpr size_t rank =
+ Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
+ traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2>::type::rank;
+
+ Kokkos::Array<int64_t, rank> subviewBegins;
+
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 0, subviewBegins, shiftedArg0, arg0, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 1, subviewBegins, shiftedArg1, arg1, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 2, subviewBegins, shiftedArg2, arg2, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2>::type>::type
+ offsetView(theSubview, subviewBegins);
+
+ return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3>::type>::type
+ subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+ T3 arg3) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T0 shiftedArg0 = shift_input(arg0, begins[0]);
+ T1 shiftedArg1 = shift_input(arg1, begins[1]);
+ T2 shiftedArg2 = shift_input(arg2, begins[2]);
+ T3 shiftedArg3 = shift_input(arg3, begins[3]);
+
+ auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
+ shiftedArg2, shiftedArg3);
+
+ constexpr size_t rank = Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3>::type::rank;
+ Kokkos::Array<int64_t, rank> subviewBegins;
+
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 0, subviewBegins, shiftedArg0, arg0, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 1, subviewBegins, shiftedArg1, arg1, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 2, subviewBegins, shiftedArg2, arg2, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 3, subviewBegins, shiftedArg3, arg3, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3>::type>::type
+ offsetView(theSubview, subviewBegins);
+
+ return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type>::type
+ subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+ T3 arg3, T4 arg4) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T0 shiftedArg0 = shift_input(arg0, begins[0]);
+ T1 shiftedArg1 = shift_input(arg1, begins[1]);
+ T2 shiftedArg2 = shift_input(arg2, begins[2]);
+ T3 shiftedArg3 = shift_input(arg3, begins[3]);
+ T4 shiftedArg4 = shift_input(arg4, begins[4]);
+
+ auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
+ shiftedArg2, shiftedArg3, shiftedArg4);
+
+ constexpr size_t rank = Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type::rank;
+ Kokkos::Array<int64_t, rank> subviewBegins;
+
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 0, subviewBegins, shiftedArg0, arg0, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 1, subviewBegins, shiftedArg1, arg1, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 2, subviewBegins, shiftedArg2, arg2, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 3, subviewBegins, shiftedArg3, arg3, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 4, subviewBegins, shiftedArg4, arg4, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type>::type
+ offsetView(theSubview, subviewBegins);
+
+ return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
+ class T5>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type>::type
+ subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+ T3 arg3, T4 arg4, T5 arg5) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T0 shiftedArg0 = shift_input(arg0, begins[0]);
+ T1 shiftedArg1 = shift_input(arg1, begins[1]);
+ T2 shiftedArg2 = shift_input(arg2, begins[2]);
+ T3 shiftedArg3 = shift_input(arg3, begins[3]);
+ T4 shiftedArg4 = shift_input(arg4, begins[4]);
+ T5 shiftedArg5 = shift_input(arg5, begins[5]);
+
+ auto theSubview =
+ Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2,
+ shiftedArg3, shiftedArg4, shiftedArg5);
+
+ constexpr size_t rank = Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type::rank;
+
+ Kokkos::Array<int64_t, rank> subviewBegins;
+
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 0, subviewBegins, shiftedArg0, arg0, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 1, subviewBegins, shiftedArg1, arg1, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 2, subviewBegins, shiftedArg2, arg2, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 3, subviewBegins, shiftedArg3, arg3, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 4, subviewBegins, shiftedArg4, arg4, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 5, subviewBegins, shiftedArg5, arg5, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type>::type
+ offsetView(theSubview, subviewBegins);
+
+ return offsetView;
+}
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
+ class T5, class T6>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type>::type
+ subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+ T3 arg3, T4 arg4, T5 arg5, T6 arg6) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T0 shiftedArg0 = shift_input(arg0, begins[0]);
+ T1 shiftedArg1 = shift_input(arg1, begins[1]);
+ T2 shiftedArg2 = shift_input(arg2, begins[2]);
+ T3 shiftedArg3 = shift_input(arg3, begins[3]);
+ T4 shiftedArg4 = shift_input(arg4, begins[4]);
+ T5 shiftedArg5 = shift_input(arg5, begins[5]);
+ T6 shiftedArg6 = shift_input(arg6, begins[6]);
+
+ auto theSubview =
+ Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2,
+ shiftedArg3, shiftedArg4, shiftedArg5, shiftedArg6);
+
+ constexpr size_t rank = Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type::rank;
+
+ Kokkos::Array<int64_t, rank> subviewBegins;
+
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 0, subviewBegins, shiftedArg0, arg0, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 1, subviewBegins, shiftedArg1, arg1, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 2, subviewBegins, shiftedArg2, arg2, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 3, subviewBegins, shiftedArg3, arg3, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 4, subviewBegins, shiftedArg4, arg4, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 5, subviewBegins, shiftedArg5, arg5, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 6, subviewBegins, shiftedArg6, arg6, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type>::type
+ offsetView(theSubview, subviewBegins);
+
+ return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
+ class T5, class T6, class T7>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type>::type
+ subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+ T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) {
+ auto theView = src.view();
+ auto begins = src.begins();
+
+ T0 shiftedArg0 = shift_input(arg0, begins[0]);
+ T1 shiftedArg1 = shift_input(arg1, begins[1]);
+ T2 shiftedArg2 = shift_input(arg2, begins[2]);
+ T3 shiftedArg3 = shift_input(arg3, begins[3]);
+ T4 shiftedArg4 = shift_input(arg4, begins[4]);
+ T5 shiftedArg5 = shift_input(arg5, begins[5]);
+ T6 shiftedArg6 = shift_input(arg6, begins[6]);
+ T7 shiftedArg7 = shift_input(arg7, begins[7]);
+
+ auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
+ shiftedArg2, shiftedArg3, shiftedArg4,
+ shiftedArg5, shiftedArg6, shiftedArg7);
+
+ constexpr size_t rank = Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type::rank;
+
+ Kokkos::Array<int64_t, rank> subviewBegins;
+
+ size_t counter = 0;
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 0, subviewBegins, shiftedArg0, arg0, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 1, subviewBegins, shiftedArg1, arg1, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 2, subviewBegins, shiftedArg2, arg2, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 3, subviewBegins, shiftedArg3, arg3, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 4, subviewBegins, shiftedArg4, arg4, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 5, subviewBegins, shiftedArg5, arg5, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 6, subviewBegins, shiftedArg6, arg6, begins, counter);
+ Kokkos::Experimental::Impl::map_arg_to_new_begin(
+ 7, subviewBegins, shiftedArg7, arg7, begins, counter);
+
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type>::type
+ offsetView(theSubview, subviewBegins);
+
+ return offsetView;
+}
+} // namespace Impl
+
+template <class D, class... P, class... Args>
+KOKKOS_INLINE_FUNCTION
+ typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+ typename Kokkos::Impl::ViewMapping<
+ void /* deduce subview type from source view traits */
+ ,
+ ViewTraits<D, P...>, Args...>::type>::type
+ subview(const OffsetView<D, P...>& src, Args... args) {
+ static_assert(
+ OffsetView<D, P...>::rank() == sizeof...(Args),
+ "subview requires one argument for each source OffsetView rank");
+
+ return Kokkos::Experimental::Impl::subview_offset(src, args...);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Experimental {
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const OffsetView<LT, LP...>& lhs,
+ const OffsetView<RT, RP...>& rhs) {
+ // Same data, layout, dimensions
+ using lhs_traits = ViewTraits<LT, LP...>;
+ using rhs_traits = ViewTraits<RT, RP...>;
+
+ return std::is_same_v<typename lhs_traits::const_value_type,
+ typename rhs_traits::const_value_type> &&
+ std::is_same_v<typename lhs_traits::array_layout,
+ typename rhs_traits::array_layout> &&
+ std::is_same_v<typename lhs_traits::memory_space,
+ typename rhs_traits::memory_space> &&
+ unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
+ lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
+ lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
+ lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
+ lhs.extent(4) == rhs.extent(4) && lhs.extent(5) == rhs.extent(5) &&
+ lhs.extent(6) == rhs.extent(6) && lhs.extent(7) == rhs.extent(7) &&
+ lhs.begin(0) == rhs.begin(0) && lhs.begin(1) == rhs.begin(1) &&
+ lhs.begin(2) == rhs.begin(2) && lhs.begin(3) == rhs.begin(3) &&
+ lhs.begin(4) == rhs.begin(4) && lhs.begin(5) == rhs.begin(5) &&
+ lhs.begin(6) == rhs.begin(6) && lhs.begin(7) == rhs.begin(7);
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator!=(const OffsetView<LT, LP...>& lhs,
+ const OffsetView<RT, RP...>& rhs) {
+ return !(operator==(lhs, rhs));
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const View<LT, LP...>& lhs,
+ const OffsetView<RT, RP...>& rhs) {
+ // Same data, layout, dimensions
+ using lhs_traits = ViewTraits<LT, LP...>;
+ using rhs_traits = ViewTraits<RT, RP...>;
+
+ return std::is_same_v<typename lhs_traits::const_value_type,
+ typename rhs_traits::const_value_type> &&
+ std::is_same_v<typename lhs_traits::array_layout,
+ typename rhs_traits::array_layout> &&
+ std::is_same_v<typename lhs_traits::memory_space,
+ typename rhs_traits::memory_space> &&
+ unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
+ lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
+ lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
+ lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
+ lhs.extent(4) == rhs.extent(4) && lhs.extent(5) == rhs.extent(5) &&
+ lhs.extent(6) == rhs.extent(6) && lhs.extent(7) == rhs.extent(7);
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const OffsetView<LT, LP...>& lhs,
+ const View<RT, RP...>& rhs) {
+ return rhs == lhs;
+}
+
+} // namespace Experimental
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class DT, class... DP>
+inline void deep_copy(
+ const Experimental::OffsetView<DT, DP...>& dst,
+ typename ViewTraits<DT, DP...>::const_value_type& value,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
+ static_assert(
+ std::is_same_v<typename ViewTraits<DT, DP...>::non_const_value_type,
+ typename ViewTraits<DT, DP...>::value_type>,
+ "deep_copy requires non-const type");
+
+ auto dstView = dst.view();
+ Kokkos::deep_copy(dstView, value);
+}
+
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+ const Experimental::OffsetView<DT, DP...>& dst,
+ const Experimental::OffsetView<ST, SP...>& value,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
+ static_assert(
+ std::is_same_v<typename ViewTraits<DT, DP...>::value_type,
+ typename ViewTraits<ST, SP...>::non_const_value_type>,
+ "deep_copy requires matching non-const destination type");
+
+ auto dstView = dst.view();
+ Kokkos::deep_copy(dstView, value.view());
+}
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+ const Experimental::OffsetView<DT, DP...>& dst,
+ const View<ST, SP...>& value,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
+ static_assert(
+ std::is_same_v<typename ViewTraits<DT, DP...>::value_type,
+ typename ViewTraits<ST, SP...>::non_const_value_type>,
+ "deep_copy requires matching non-const destination type");
+
+ auto dstView = dst.view();
+ Kokkos::deep_copy(dstView, value);
+}
+
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+ const View<DT, DP...>& dst,
+ const Experimental::OffsetView<ST, SP...>& value,
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
+ static_assert(
+ std::is_same_v<typename ViewTraits<DT, DP...>::value_type,
+ typename ViewTraits<ST, SP...>::non_const_value_type>,
+ "deep_copy requires matching non-const destination type");
+
+ Kokkos::deep_copy(dst, value.view());
+}
+
+namespace Impl {
+
+// Deduce Mirror Types
+template <class Space, class T, class... P>
+struct MirrorOffsetViewType {
+ // The incoming view_type
+ using src_view_type = typename Kokkos::Experimental::OffsetView<T, P...>;
+ // The memory space for the mirror view
+ using memory_space = typename Space::memory_space;
+ // Check whether it is the same memory space
+ enum {
+ is_same_memspace =
+ std::is_same_v<memory_space, typename src_view_type::memory_space>
+ };
+ // The array_layout
+ using array_layout = typename src_view_type::array_layout;
+ // The data type (we probably want it non-const since otherwise we can't even
+ // deep_copy to it.)
+ using data_type = typename src_view_type::non_const_data_type;
+ // The destination view type if it is not the same memory space
+ using dest_view_type =
+ Kokkos::Experimental::OffsetView<data_type, array_layout, Space>;
+ // If it is the same memory_space return the existing view_type
+ // This will also keep the unmanaged trait if necessary
+ using view_type =
+ std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
+};
+
+} // namespace Impl
+
+namespace Impl {
+
+// create a mirror
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(const Kokkos::Experimental::OffsetView<T, P...>& src,
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ check_view_ctor_args_create_mirror<ViewCtorArgs...>();
+
+ if constexpr (Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ using Space = typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space;
+
+ auto prop_copy = Impl::with_properties_if_unset(
+ arg_prop, std::string(src.label()).append("_mirror"));
+
+ return typename Kokkos::Impl::MirrorOffsetViewType<
+ Space, T, P...>::dest_view_type(prop_copy, src.layout(),
+ {src.begin(0), src.begin(1),
+ src.begin(2), src.begin(3),
+ src.begin(4), src.begin(5),
+ src.begin(6), src.begin(7)});
+ } else {
+ return typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror(
+ Kokkos::create_mirror(arg_prop, src.view()), src.begins());
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+}
+
+} // namespace Impl
+
+// public interface
+template <class T, class... P,
+ typename = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(
+ const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror(src, Impl::ViewCtorProp<>{});
+}
+
+// public interface that accepts a without initializing flag
+template <class T, class... P,
+ typename = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(
+ Kokkos::Impl::WithoutInitializing_t wi,
+ const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror(src, Kokkos::view_alloc(wi));
+}
+
+// public interface that accepts a space
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(
+ const Space&, const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror(
+ src, Kokkos::view_alloc(typename Space::memory_space{}));
+}
+
+// public interface that accepts a space and a without initializing flag
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(
+ Kokkos::Impl::WithoutInitializing_t wi, const Space&,
+ const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror(
+ src, Kokkos::view_alloc(typename Space::memory_space{}, wi));
+}
+
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs,
+ typename = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+inline auto create_mirror(
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror(src, arg_prop);
+}
+
+namespace Impl {
+
+// create a mirror view
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror_view(
+ const Kokkos::Experimental::OffsetView<T, P...>& src,
+ [[maybe_unused]] const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ if constexpr (!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ if constexpr (std::is_same_v<typename Kokkos::Experimental::OffsetView<
+ T, P...>::memory_space,
+ typename Kokkos::Experimental::OffsetView<
+ T, P...>::HostMirror::memory_space> &&
+ std::is_same_v<typename Kokkos::Experimental::OffsetView<
+ T, P...>::data_type,
+ typename Kokkos::Experimental::OffsetView<
+ T, P...>::HostMirror::data_type>) {
+ return
+ typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ } else {
+ if constexpr (Impl::MirrorOffsetViewType<typename Impl::ViewCtorProp<
+ ViewCtorArgs...>::memory_space,
+ T, P...>::is_same_memspace) {
+ return typename Impl::MirrorOffsetViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+ P...>::view_type(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
+}
+
+} // namespace Impl
+
+// public interface
+template <class T, class... P>
+inline auto create_mirror_view(
+ const typename Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror_view(src, Impl::ViewCtorProp<>{});
+}
+
+// public interface that accepts a without initializing flag
+template <class T, class... P>
+inline auto create_mirror_view(
+ Kokkos::Impl::WithoutInitializing_t wi,
+ const typename Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
+}
+
+// public interface that accepts a space
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline auto create_mirror_view(
+ const Space&, const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror_view(
+ src, Kokkos::view_alloc(typename Space::memory_space{}));
+}
+
+// public interface that accepts a space and a without initializing flag
+template <class Space, class T, class... P,
+ typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline auto create_mirror_view(
+ Kokkos::Impl::WithoutInitializing_t wi, const Space&,
+ const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror_view(
+ src, Kokkos::view_alloc(typename Space::memory_space{}, wi));
+}
+
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror_view(
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return Impl::create_mirror_view(src, arg_prop);
+}
+
+// create a mirror view and deep copy it
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class... ViewCtorArgs, class T, class... P>
+typename Kokkos::Impl::MirrorOffsetViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+ P...>::view_type
+create_mirror_view_and_copy(
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const Kokkos::Experimental::OffsetView<T, P...>& src) {
+ return {create_mirror_view_and_copy(arg_prop, src.view()), src.begins()};
+}
+
+template <class Space, class T, class... P>
+typename Kokkos::Impl::MirrorOffsetViewType<Space, T, P...>::view_type
+create_mirror_view_and_copy(
+ const Space& space, const Kokkos::Experimental::OffsetView<T, P...>& src,
+ std::string const& name = "") {
+ return {create_mirror_view_and_copy(space, src.view(), name), src.begins()};
+}
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
+#endif
+#endif /* KOKKOS_OFFSETVIEW_HPP_ */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
/// \file Kokkos_ScatterView.hpp
/// \brief Declaration and definition of Kokkos::ScatterView.
#ifdef KOKKOS_ENABLE_HIP
template <>
-struct DefaultDuplication<Kokkos::Experimental::HIP> {
+struct DefaultDuplication<Kokkos::HIP> {
using type = Kokkos::Experimental::ScatterNonDuplicated;
};
template <>
-struct DefaultContribution<Kokkos::Experimental::HIP,
+struct DefaultContribution<Kokkos::HIP,
Kokkos::Experimental::ScatterNonDuplicated> {
using type = Kokkos::Experimental::ScatterAtomic;
};
template <>
-struct DefaultContribution<Kokkos::Experimental::HIP,
+struct DefaultContribution<Kokkos::HIP,
Kokkos::Experimental::ScatterDuplicated> {
using type = Kokkos::Experimental::ScatterAtomic;
};
#ifdef KOKKOS_ENABLE_SYCL
template <>
-struct DefaultDuplication<Kokkos::Experimental::SYCL> {
+struct DefaultDuplication<Kokkos::SYCL> {
using type = Kokkos::Experimental::ScatterNonDuplicated;
};
template <>
-struct DefaultContribution<Kokkos::Experimental::SYCL,
+struct DefaultContribution<Kokkos::SYCL,
Kokkos::Experimental::ScatterNonDuplicated> {
using type = Kokkos::Experimental::ScatterAtomic;
};
template <>
-struct DefaultContribution<Kokkos::Experimental::SYCL,
+struct DefaultContribution<Kokkos::SYCL,
Kokkos::Experimental::ScatterDuplicated> {
using type = Kokkos::Experimental::ScatterAtomic;
};
subview where the index specified is the largest-stride one. */
template <typename Layout, int rank, typename V, typename... Args>
struct Slice {
- using next = Slice<Layout, rank - 1, V, Kokkos::Impl::ALL_t, Args...>;
- using value_type = typename next::value_type;
-
- static value_type get(V const& src, const size_t i, Args... args) {
+ using next = Slice<Layout, rank - 1, V, Kokkos::ALL_t, Args...>;
+ static auto get(V const& src, const size_t i, Args... args) {
return next::get(src, i, Kokkos::ALL, args...);
}
};
template <typename V, typename... Args>
struct Slice<Kokkos::LayoutRight, 1, V, Args...> {
- using value_type =
- typename Kokkos::Impl::ViewMapping<void, V, const size_t, Args...>::type;
- static value_type get(V const& src, const size_t i, Args... args) {
+ static auto get(V const& src, const size_t i, Args... args) {
return Kokkos::subview(src, i, args...);
}
};
template <typename V, typename... Args>
struct Slice<Kokkos::LayoutLeft, 1, V, Args...> {
- using value_type =
- typename Kokkos::Impl::ViewMapping<void, V, Args..., const size_t>::type;
- static value_type get(V const& src, const size_t i, Args... args) {
+ static auto get(V const& src, const size_t i, Args... args) {
+ return Kokkos::subview(src, args..., i);
+ }
+};
+
+#ifdef KOKKOS_ENABLE_IMPL_MDSPAN
+template <typename V, typename... Args>
+struct Slice<Kokkos::layout_right, 1, V, Args...> {
+ static auto get(V const& src, const size_t i, Args... args) {
+ return Kokkos::subview(src, i, args...);
+ }
+};
+
+template <typename V, typename... Args>
+struct Slice<Kokkos::layout_left, 1, V, Args...> {
+ static auto get(V const& src, const size_t i, Args... args) {
return Kokkos::subview(src, args..., i);
}
};
+template <size_t Pad, typename V, typename... Args>
+struct Slice<Kokkos::Experimental::layout_right_padded<Pad>, 1, V, Args...> {
+ static auto get(V const& src, const size_t i, Args... args) {
+ return Kokkos::subview(src, i, args...);
+ }
+};
+
+template <size_t Pad, typename V, typename... Args>
+struct Slice<Kokkos::Experimental::layout_left_padded<Pad>, 1, V, Args...> {
+ static auto get(V const& src, const size_t i, Args... args) {
+ return Kokkos::subview(src, args..., i);
+ }
+};
+#endif
+
template <typename ExecSpace, typename ValueType, typename Op>
struct ReduceDuplicates;
typename DeviceType::execution_space, Duplication>::type>
class ScatterView;
+template <class>
+struct is_scatter_view : public std::false_type {};
+
+template <class D, class... P>
+struct is_scatter_view<ScatterView<D, P...>> : public std::true_type {};
+
+template <class D, class... P>
+struct is_scatter_view<const ScatterView<D, P...>> : public std::true_type {};
+
+template <class T>
+inline constexpr bool is_scatter_view_v = is_scatter_view<T>::value;
+
template <typename DataType, typename Op, typename DeviceType, typename Layout,
typename Duplication, typename Contribution,
typename OverrideContribution>
: internal_view(other_view.internal_view) {}
template <typename OtherDataType, typename OtherDeviceType>
- KOKKOS_FUNCTION void operator=(
+ KOKKOS_FUNCTION ScatterView& operator=(
const ScatterView<OtherDataType, Layout, OtherDeviceType, Op,
ScatterNonDuplicated, Contribution>& other_view) {
internal_view = other_view.internal_view;
+ return *this;
}
template <typename OverrideContribution = Contribution>
template <typename Arg>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- view_type::original_view_type::rank == 1 && std::is_integral<Arg>::value,
+ std::is_integral_v<Arg> && view_type::original_view_type::rank == 1,
value_type>
operator[](Arg arg) const {
return view.at(arg);
internal_view(other_view.internal_view) {}
template <typename OtherDataType, typename OtherDeviceType>
- KOKKOS_FUNCTION void operator=(
+ KOKKOS_FUNCTION ScatterView& operator=(
const ScatterView<OtherDataType, Kokkos::LayoutRight, OtherDeviceType, Op,
ScatterDuplicated, Contribution>& other_view) {
unique_token = other_view.unique_token;
internal_view = other_view.internal_view;
+ return *this;
}
template <typename RT, typename... RP>
check_scatter_view_allocation_properties_argument;
check_scatter_view_allocation_properties_argument(arg_prop);
- auto const exec_space =
- static_cast<::Kokkos::Impl::ViewCtorProp<void, execution_space> const&>(
- arg_prop)
- .value;
+ auto const& exec_space =
+ Kokkos::Impl::get_property<Kokkos::Impl::ExecutionSpaceTag>(arg_prop);
reset(exec_space);
}
*this);
}
- typename Kokkos::Impl::Experimental::Slice<Kokkos::LayoutRight,
- internal_view_type::rank,
- internal_view_type>::value_type
- subview() const {
+ auto subview() const {
return Kokkos::Impl::Experimental::Slice<
- Kokkos::LayoutRight, internal_view_type::Rank,
+ Kokkos::LayoutRight, internal_view_type::rank,
internal_view_type>::get(internal_view, 0);
}
arg_N[internal_view_type::rank - 1] = unique_token.size();
internal_view = internal_view_type(
view_alloc(WithoutInitializing,
- std::string("duplicated_") + original_view.label(),
- exec_space),
+ std::string("duplicated_") + original_view.label(),
+ exec_space),
arg_N[0], arg_N[1], arg_N[2], arg_N[3], arg_N[4], arg_N[5], arg_N[6],
arg_N[7]);
reset(exec_space);
Kokkos::Impl::Experimental::args_to_array(arg_N, 0, dims...);
arg_N[internal_view_type::rank - 1] = unique_token.size();
- auto const name =
- static_cast<::Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
- arg_prop)
- .value;
+ auto const& name =
+ Kokkos::Impl::get_property<Kokkos::Impl::LabelTag>(arg_prop);
internal_view = internal_view_type(view_alloc(WithoutInitializing, name),
arg_N[0], arg_N[1], arg_N[2], arg_N[3],
arg_N[4], arg_N[5], arg_N[6], arg_N[7]);
- auto const exec_space =
- static_cast<::Kokkos::Impl::ViewCtorProp<void, execution_space> const&>(
- arg_prop)
- .value;
+ auto const& exec_space =
+ Kokkos::Impl::get_property<Kokkos::Impl::ExecutionSpaceTag>(arg_prop);
reset(exec_space);
}
internal_view(other_view.internal_view) {}
template <typename OtherDataType, typename OtherDeviceType>
- KOKKOS_FUNCTION void operator=(
+ KOKKOS_FUNCTION ScatterView& operator=(
const ScatterView<OtherDataType, Kokkos::LayoutLeft, OtherDeviceType, Op,
ScatterDuplicated, Contribution>& other_view) {
unique_token = other_view.unique_token;
internal_view = other_view.internal_view;
+ return *this;
}
template <typename OverrideContribution = Contribution>
*this);
}
- typename Kokkos::Impl::Experimental::Slice<Kokkos::LayoutLeft,
- internal_view_type::rank,
- internal_view_type>::value_type
- subview() const {
+ auto subview() const {
return Kokkos::Impl::Experimental::Slice<
Kokkos::LayoutLeft, internal_view_type::rank,
internal_view_type>::get(internal_view, 0);
template <typename Arg>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- view_type::original_view_type::rank == 1 && std::is_integral<Arg>::value,
+ std::is_integral_v<Arg> && view_type::original_view_type::rank == 1,
value_type>
operator[](Arg arg) const {
return view.at(thread_id, arg);
view_type const& view;
// simplify RAII by disallowing copies
- ScatterAccess(ScatterAccess const& other) = delete;
+ ScatterAccess(ScatterAccess const& other) = delete;
ScatterAccess& operator=(ScatterAccess const& other) = delete;
- ScatterAccess& operator=(ScatterAccess&& other) = delete;
+ ScatterAccess& operator=(ScatterAccess&& other) = delete;
public:
// do need to allow moves though, for the common
RT, typename ViewTraits<RT, RP...>::array_layout,
typename ViewTraits<RT, RP...>::device_type, Op,
std::conditional_t<
- std::is_void<Duplication>::value,
+ std::is_void_v<Duplication>,
typename Kokkos::Impl::Experimental::DefaultDuplication<
typename ViewTraits<RT, RP...>::execution_space>::type,
Duplication>,
std::conditional_t<
- std::is_void<Contribution>::value,
+ std::is_void_v<Contribution>,
typename Kokkos::Impl::Experimental::DefaultContribution<
typename ViewTraits<RT, RP...>::execution_space,
typename std::conditional_t<
- std::is_void<Duplication>::value,
+ std::is_void_v<Duplication>,
typename Kokkos::Impl::Experimental::DefaultDuplication<
typename ViewTraits<RT, RP...>::execution_space>::type,
Duplication>>::type,
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STATICCRSGRAPH_HPP
#define KOKKOS_STATICCRSGRAPH_HPP
const typename GraphType::entries_type& colidx_in,
const ordinal_type& stride, const ordinal_type& count,
const OffsetType& idx,
- const std::enable_if_t<std::is_integral<OffsetType>::value, int>& = 0)
+ const std::enable_if_t<std::is_integral_v<OffsetType>, int>& = 0)
: colidx_(&colidx_in(idx)), stride_(stride), length(count) {}
/// \brief Number of entries in the row.
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
/// \file Kokkos_UnorderedMap.hpp
/// \brief Declaration and definition of Kokkos::UnorderedMap.
#include <impl/Kokkos_Traits.hpp>
#include <impl/Kokkos_UnorderedMap_impl.hpp>
-
-#include <iostream>
+#include <View/Kokkos_ViewCtor.hpp>
#include <cstdint>
uint32_t m_status;
};
+/// \class UnorderedMapInsertOpTypes
+///
+/// \brief Operations applied to the values array upon subsequent insertions.
+///
+/// The default behavior when a k,v pair already exists in the UnorderedMap is
+/// to perform no operation. Alternatively, the caller may select to
+/// instantiate the UnorderedMap with the AtomicAdd insert operator such that
+/// duplicate keys accumulate values into the given values array entry.
+/// \tparam ValueTypeView The UnorderedMap value array type.
+/// \tparam ValuesIdxType The index type for lookups in the value array.
+///
+/// Supported operations:
+/// NoOp: the first key inserted stores the associated value.
+/// AtomicAdd: duplicate key insertions sum values together.
+template <class ValueTypeView, class ValuesIdxType>
+struct UnorderedMapInsertOpTypes {
+ using value_type = typename ValueTypeView::non_const_value_type;
+ struct NoOp {
+ KOKKOS_FUNCTION
+ void op(ValueTypeView, ValuesIdxType, const value_type) const {}
+ };
+ struct AtomicAdd {
+ KOKKOS_FUNCTION
+ void op(ValueTypeView values, ValuesIdxType values_idx,
+ const value_type v) const {
+ Kokkos::atomic_add(values.data() + values_idx, v);
+ }
+ };
+};
+
/// \class UnorderedMap
/// \brief Thread-safe, performance-portable lookup table.
///
public:
//! \name Public types and constants
//@{
-
// key_types
using declared_key_type = Key;
using key_type = std::remove_const_t<declared_key_type>;
using const_map_type = UnorderedMap<const_key_type, const_value_type,
device_type, hasher_type, equal_to_type>;
- static const bool is_set = std::is_void<value_type>::value;
- static const bool has_const_key =
- std::is_same<const_key_type, declared_key_type>::value;
- static const bool has_const_value =
- is_set || std::is_same<const_value_type, declared_value_type>::value;
+ static constexpr bool is_set = std::is_void_v<value_type>;
+ static constexpr bool has_const_key =
+ std::is_same_v<const_key_type, declared_key_type>;
+ static constexpr bool has_const_value =
+ is_set || std::is_same_v<const_value_type, declared_value_type>;
- static const bool is_insertable_map =
+ static constexpr bool is_insertable_map =
!has_const_key && (is_set || !has_const_value);
- static const bool is_modifiable_map = has_const_key && !has_const_value;
- static const bool is_const_map = has_const_key && has_const_value;
+ static constexpr bool is_modifiable_map = has_const_key && !has_const_value;
+ static constexpr bool is_const_map = has_const_key && has_const_value;
using insert_result = UnorderedMapInsertResult;
UnorderedMap<Key, Value, host_mirror_space, Hasher, EqualTo>;
using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
-
//@}
private:
public:
//! \name Public member functions
//@{
+ using default_op_type =
+ typename UnorderedMapInsertOpTypes<value_type_view, uint32_t>::NoOp;
/// \brief Constructor
///
/// \param capacity_hint [in] Initial guess of how many unique keys will be
- /// inserted into the map \param hash [in] Hasher function for \c Key
- /// instances. The
- /// default value usually suffices.
+ /// inserted into the map.
+ /// \param hash [in] Hasher function for \c Key instances. The
+ /// default value usually suffices.
+ /// \param equal_to [in] The operator used for determining if two
+ /// keys are equal.
UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
equal_to_type equal_to = equal_to_type())
- : m_bounded_insert(true),
- m_hasher(hasher),
- m_equal_to(equal_to),
- m_size(),
- m_available_indexes(calculate_capacity(capacity_hint)),
- m_hash_lists(view_alloc(WithoutInitializing, "UnorderedMap hash list"),
- Impl::find_hash_size(capacity())),
- m_next_index(view_alloc(WithoutInitializing, "UnorderedMap next index"),
- capacity() + 1) // +1 so that the *_at functions can
- // always return a valid reference
- ,
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- m_keys("UnorderedMap keys", capacity() + 1),
- m_values("UnorderedMap values", (is_set ? 1 : capacity() + 1)),
-#else
- m_keys("UnorderedMap keys", capacity()),
- m_values("UnorderedMap values", (is_set ? 0 : capacity())),
-#endif
- m_scalars("UnorderedMap scalars") {
+ : UnorderedMap(Kokkos::view_alloc(), capacity_hint, hasher, equal_to) {}
+
+ template <class... P>
+ UnorderedMap(const Impl::ViewCtorProp<P...> &arg_prop,
+ size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
+ equal_to_type equal_to = equal_to_type())
+ : m_bounded_insert(true), m_hasher(hasher), m_equal_to(equal_to) {
if (!is_insertable_map) {
Kokkos::Impl::throw_runtime_exception(
"Cannot construct a non-insertable (i.e. const key_type) "
"unordered_map");
}
- Kokkos::deep_copy(m_hash_lists, invalid_index);
- Kokkos::deep_copy(m_next_index, invalid_index);
+ //! Ensure that allocation properties are consistent.
+ using alloc_prop_t = std::decay_t<decltype(arg_prop)>;
+ static_assert(alloc_prop_t::initialize,
+ "Allocation property 'initialize' should be true.");
+ static_assert(
+ !alloc_prop_t::has_pointer,
+ "Allocation properties should not contain the 'pointer' property.");
+
+ /// Update allocation properties with 'label' and 'without initializing'
+ /// properties.
+ const auto prop_copy =
+ Impl::with_properties_if_unset(arg_prop, std::string("UnorderedMap"));
+ const auto prop_copy_noinit =
+ Impl::with_properties_if_unset(prop_copy, Kokkos::WithoutInitializing);
+
+ //! Initialize member views.
+ m_size = shared_size_t(Kokkos::view_alloc(
+ Kokkos::DefaultHostExecutionSpace{},
+ Impl::get_property<Impl::LabelTag>(prop_copy) + " - size"));
+
+ m_available_indexes =
+ bitset_type(Kokkos::Impl::append_to_label(prop_copy, " - bitset"),
+ calculate_capacity(capacity_hint));
+
+ m_hash_lists = size_type_view(
+ Kokkos::Impl::append_to_label(prop_copy_noinit, " - hash list"),
+ Impl::find_hash_size(capacity()));
+
+ m_next_index = size_type_view(
+ Kokkos::Impl::append_to_label(prop_copy_noinit, " - next index"),
+ capacity() + 1); // +1 so that the *_at functions can always return a
+ // valid reference
+
+ m_keys = key_type_view(Kokkos::Impl::append_to_label(prop_copy, " - keys"),
+ capacity());
+
+ m_values =
+ value_type_view(Kokkos::Impl::append_to_label(prop_copy, " - values"),
+ is_set ? 0 : capacity());
+
+ m_scalars =
+ scalars_view(Kokkos::Impl::append_to_label(prop_copy, " - scalars"));
+
+ /**
+ * Deep copies should also be done using the space instance if given.
+ * Instead of the if/else we could use the
+ * @c get_property_or_default, but giving even the default execution space
+ * instance will change the behavior of @c deep_copy.
+ */
+ if constexpr (alloc_prop_t::has_execution_space) {
+ const auto &space = Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
+ Kokkos::deep_copy(space, m_hash_lists, invalid_index);
+ Kokkos::deep_copy(space, m_next_index, invalid_index);
+ } else {
+ Kokkos::deep_copy(m_hash_lists, invalid_index);
+ Kokkos::deep_copy(m_next_index, invalid_index);
+ }
}
void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
const key_type tmp = key_type();
Kokkos::deep_copy(m_keys, tmp);
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- if (is_set) {
- const impl_value_type tmp = impl_value_type();
- Kokkos::deep_copy(m_values, tmp);
- }
-#endif
Kokkos::deep_copy(m_scalars, 0);
- m_size = 0;
+ m_size() = 0;
}
KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- return (m_keys.is_allocated() && m_values.is_allocated() &&
- m_scalars.is_allocated());
-#else
return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
m_scalars.is_allocated());
-#endif
}
/// \brief Change the capacity of the the map
size_type size() const {
if (capacity() == 0u) return 0u;
if (modified()) {
- m_size = m_available_indexes.count();
+ m_size() = m_available_indexes.count();
reset_flag(modified_idx);
}
- return m_size;
+ return m_size();
}
/// \brief The current number of failed insert() calls.
/// \param v [in] The corresponding value to attempt to insert. If
/// using this class as a set (with Value = void), then you need not
/// provide this value.
- KOKKOS_INLINE_FUNCTION
- insert_result insert(key_type const &k,
- impl_value_type const &v = impl_value_type()) const {
+ /// \param insert_op [in] The operator used for combining values if a
+ /// key already exists. See
+ /// Kokkos::UnorderedMapInsertOpTypes for more ops.
+ template <typename InsertOpType = default_op_type>
+ KOKKOS_INLINE_FUNCTION insert_result
+ insert(key_type const &k, impl_value_type const &v = impl_value_type(),
+ [[maybe_unused]] InsertOpType arg_insert_op = InsertOpType()) const {
+ if constexpr (is_set) {
+ static_assert(std::is_same_v<InsertOpType, default_op_type>,
+ "Insert Operations are not supported on sets.");
+ }
+
insert_result result;
if (!is_insertable_map || capacity() == 0u ||
// Previously claimed an unused entry that was not inserted.
// Release this unused entry immediately.
if (!m_available_indexes.reset(new_index)) {
- KOKKOS_IMPL_DO_NOT_USE_PRINTF("Unable to free existing\n");
+ Kokkos::printf("Unable to free existing\n");
}
}
result.set_existing(curr, free_existing);
+ if constexpr (!is_set) {
+ arg_insert_op.op(m_values, curr, v);
+ }
not_done = false;
}
//------------------------------------------------------------
/// 'const value_type' via Cuda texture fetch must return by value.
template <typename Dummy = value_type>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- !std::is_void<Dummy>::value, // !is_set
+ !std::is_void_v<Dummy>, // !is_set
std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
value_at(size_type i) const {
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- return m_values[i < capacity() ? i : capacity()];
-#else
KOKKOS_EXPECTS(i < capacity());
return m_values[i];
-#endif
- }
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- template <typename Dummy = value_type>
- KOKKOS_DEPRECATED_WITH_COMMENT(
- "Calling value_at for value_type==void is deprecated!")
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- std::is_void<Dummy>::value, // is_set
- std::conditional_t<has_const_value, impl_value_type,
- impl_value_type &>> value_at(size_type /*i*/) const {
- return m_values[0];
}
-#endif
/// \brief Get the key with \c i as its direct index.
///
/// kernel.
KOKKOS_FORCEINLINE_FUNCTION
key_type key_at(size_type i) const {
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- return m_keys[i < capacity() ? i : capacity()];
-#else
KOKKOS_EXPECTS(i < capacity());
return m_keys[i];
-#endif
}
KOKKOS_FORCEINLINE_FUNCTION
return *this;
}
+ // Re-allocate the views of the calling UnorderedMap according to src
+ // capacity, and deep copy the src data.
template <typename SKey, typename SValue, typename SDevice>
- std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
- std::is_same<std::remove_const_t<SValue>, value_type>::value>
+ std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
+ std::is_same_v<std::remove_const_t<SValue>, value_type>>
create_copy_view(
UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
if (m_hash_lists.data() != src.m_hash_lists.data()) {
- insertable_map_type tmp;
-
- tmp.m_bounded_insert = src.m_bounded_insert;
- tmp.m_hasher = src.m_hasher;
- tmp.m_equal_to = src.m_equal_to;
- tmp.m_size = src.size();
- tmp.m_available_indexes = bitset_type(src.capacity());
- tmp.m_hash_lists = size_type_view(
- view_alloc(WithoutInitializing, "UnorderedMap hash list"),
- src.m_hash_lists.extent(0));
- tmp.m_next_index = size_type_view(
- view_alloc(WithoutInitializing, "UnorderedMap next index"),
- src.m_next_index.extent(0));
- tmp.m_keys =
- key_type_view(view_alloc(WithoutInitializing, "UnorderedMap keys"),
- src.m_keys.extent(0));
- tmp.m_values = value_type_view(
- view_alloc(WithoutInitializing, "UnorderedMap values"),
- src.m_values.extent(0));
- tmp.m_scalars = scalars_view("UnorderedMap scalars");
-
- Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
+ allocate_view(src);
+ deep_copy_view(src);
+ }
+ }
+
+ // Allocate views of the calling UnorderedMap with the same capacity as the
+ // src.
+ template <typename SKey, typename SValue, typename SDevice>
+ std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
+ std::is_same_v<std::remove_const_t<SValue>, value_type>>
+ allocate_view(
+ UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
+ insertable_map_type tmp;
+
+ tmp.m_bounded_insert = src.m_bounded_insert;
+ tmp.m_hasher = src.m_hasher;
+ tmp.m_equal_to = src.m_equal_to;
+ tmp.m_size() = src.m_size();
+ tmp.m_available_indexes = bitset_type(src.capacity());
+ tmp.m_hash_lists = size_type_view(
+ view_alloc(WithoutInitializing, "UnorderedMap hash list"),
+ src.m_hash_lists.extent(0));
+ tmp.m_next_index = size_type_view(
+ view_alloc(WithoutInitializing, "UnorderedMap next index"),
+ src.m_next_index.extent(0));
+ tmp.m_keys =
+ key_type_view(view_alloc(WithoutInitializing, "UnorderedMap keys"),
+ src.m_keys.extent(0));
+ tmp.m_values =
+ value_type_view(view_alloc(WithoutInitializing, "UnorderedMap values"),
+ src.m_values.extent(0));
+ tmp.m_scalars = scalars_view("UnorderedMap scalars");
+
+ *this = tmp;
+ }
+
+ // Deep copy view data from src. This requires that the src capacity is
+ // identical to the capacity of the calling UnorderedMap.
+ template <typename SKey, typename SValue, typename SDevice>
+ std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
+ std::is_same_v<std::remove_const_t<SValue>, value_type>>
+ deep_copy_view(
+ UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ // To deep copy UnorderedMap, capacity must be identical
+ KOKKOS_EXPECTS(capacity() == src.capacity());
+#else
+ if (capacity() != src.capacity()) {
+ allocate_view(src);
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+ Kokkos::Impl::log_warning(
+ "Warning: deep_copy_view() allocating views is deprecated. Must call "
+ "with UnorderedMaps of identical capacity, or use "
+ "create_copy_view().\n");
+#endif
+ }
+#endif
+
+ if (m_hash_lists.data() != src.m_hash_lists.data()) {
+ Kokkos::deep_copy(m_available_indexes, src.m_available_indexes);
using raw_deep_copy =
Kokkos::Impl::DeepCopy<typename device_type::memory_space,
typename SDevice::memory_space>;
- raw_deep_copy(tmp.m_hash_lists.data(), src.m_hash_lists.data(),
+ raw_deep_copy(m_hash_lists.data(), src.m_hash_lists.data(),
sizeof(size_type) * src.m_hash_lists.extent(0));
- raw_deep_copy(tmp.m_next_index.data(), src.m_next_index.data(),
+ raw_deep_copy(m_next_index.data(), src.m_next_index.data(),
sizeof(size_type) * src.m_next_index.extent(0));
- raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
+ raw_deep_copy(m_keys.data(), src.m_keys.data(),
sizeof(key_type) * src.m_keys.extent(0));
if (!is_set) {
- raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
+ raw_deep_copy(m_values.data(), src.m_values.data(),
sizeof(impl_value_type) * src.m_values.extent(0));
}
- raw_deep_copy(tmp.m_scalars.data(), src.m_scalars.data(),
+ raw_deep_copy(m_scalars.data(), src.m_scalars.data(),
sizeof(int) * num_scalars);
Kokkos::fence(
- "Kokkos::UnorderedMap::create_copy_view: fence after copy to tmp");
-
- *this = tmp;
+ "Kokkos::UnorderedMap::deep_copy_view: fence after copy to dst.");
}
}
bool m_bounded_insert;
hasher_type m_hasher;
equal_to_type m_equal_to;
- mutable size_type m_size;
+ using shared_size_t = View<size_type, Kokkos::DefaultHostExecutionSpace>;
+ shared_size_t m_size;
bitset_type m_available_indexes;
size_type_view m_hash_lists;
size_type_view m_next_index;
friend struct Impl::UnorderedMapPrint;
};
-// Specialization of deep_copy for two UnorderedMap objects.
+// Specialization of deep_copy() for two UnorderedMap objects.
template <typename DKey, typename DT, typename DDevice, typename SKey,
typename ST, typename SDevice, typename Hasher, typename EqualTo>
inline void deep_copy(
UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
- dst.create_copy_view(src);
+ dst.deep_copy_view(src);
+}
+
+// Specialization of create_mirror() for an UnorderedMap object.
+template <typename Key, typename ValueType, typename Device, typename Hasher,
+ typename EqualTo>
+typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
+create_mirror(
+ const UnorderedMap<Key, ValueType, Device, Hasher, EqualTo> &src) {
+ typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
+ dst;
+ dst.allocate_view(src);
+ return dst;
}
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_VECTOR_HPP
#define KOKKOS_VECTOR_HPP
#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_VECTOR
#endif
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_DEPRECATED_CODE_4)
+#if defined(KOKKOS_ENABLE_DEPRECATION_WARNINGS)
+namespace {
+[[deprecated("Deprecated <Kokkos_Vector.hpp> header is included")]] int
+emit_warning_kokkos_vector_deprecated() {
+ return 0;
+}
+static auto do_not_include = emit_warning_kokkos_vector_deprecated();
+} // namespace
+#endif
+#else
+#error "Deprecated <Kokkos_Vector.hpp> header is included"
+#endif
+
#include <Kokkos_Core_fwd.hpp>
#include <Kokkos_DualView.hpp>
*/
namespace Kokkos {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
template <class Scalar, class Arg1Type = void>
-class vector : public DualView<Scalar*, LayoutLeft, Arg1Type> {
+class KOKKOS_DEPRECATED vector
+ : public DualView<Scalar*, LayoutLeft, Arg1Type> {
public:
using value_type = Scalar;
using pointer = Scalar*;
private:
template <class T>
- struct impl_is_input_iterator
- : /* TODO replace this */ std::integral_constant<
- bool, !std::is_convertible<T, size_type>::value> {};
+ struct impl_is_input_iterator : /* TODO replace this */ std::bool_constant<
+ !std::is_convertible_v<T, size_type>> {};
public:
// TODO: can use detection idiom to generate better error message here later
iterator begin() const { return DV::h_view.data(); }
+ const_iterator cbegin() const { return DV::h_view.data(); }
+
iterator end() const {
return _size > 0 ? DV::h_view.data() + _size : DV::h_view.data();
}
+ const_iterator cend() const {
+ return _size > 0 ? DV::h_view.data() + _size : DV::h_view.data();
+ }
+
reference front() { return DV::h_view(0); }
reference back() { return DV::h_view(_size - 1); }
void operator()(const int& i) const { _data(i) = _val; }
};
};
+#endif
} // namespace Kokkos
#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_VECTOR
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_BITSET_IMPL_HPP
+#define KOKKOS_BITSET_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_BitOps.hpp>
+#include <cstdint>
+
+#include <cstdio>
+#include <climits>
+#include <iomanip>
+
+namespace Kokkos {
+namespace Impl {
+
+KOKKOS_FORCEINLINE_FUNCTION
+unsigned rotate_right(unsigned i, int r) {
+ constexpr int size = static_cast<int>(sizeof(unsigned) * CHAR_BIT);
+ return r ? ((i >> r) | (i << (size - r))) : i;
+}
+
+template <typename Bitset>
+struct BitsetCount {
+ using bitset_type = Bitset;
+ using execution_space =
+ typename bitset_type::execution_space::execution_space;
+ using size_type = typename bitset_type::size_type;
+ using value_type = size_type;
+
+ bitset_type m_bitset;
+
+ BitsetCount(bitset_type const& bitset) : m_bitset(bitset) {}
+
+ size_type apply() const {
+ size_type count = 0u;
+ parallel_reduce("Kokkos::Impl::BitsetCount::apply",
+ m_bitset.m_blocks.extent(0), *this, count);
+ return count;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ void init(value_type& count) const { count = 0u; }
+
+ KOKKOS_INLINE_FUNCTION
+ void join(value_type& count, const size_type& incr) const { count += incr; }
+
+ KOKKOS_INLINE_FUNCTION
+ void operator()(size_type i, value_type& count) const {
+ count += bit_count(m_bitset.m_blocks[i]);
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // KOKKOS_BITSET_IMPL_HPP
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
#ifndef KOKKOS_FUNCTIONAL_IMPL_HPP
uint32_t k1 = 0;
switch (len & 3) {
- case 3: k1 ^= tail[2] << 16; KOKKOS_IMPL_FALLTHROUGH
- case 2: k1 ^= tail[1] << 8; KOKKOS_IMPL_FALLTHROUGH
+ case 3: k1 ^= tail[2] << 16; [[fallthrough]];
+ case 2: k1 ^= tail[1] << 8; [[fallthrough]];
case 1:
k1 ^= tail[0];
k1 *= c1;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_STATICCRSGRAPH_FACTORY_HPP
#define KOKKOS_IMPL_STATICCRSGRAPH_FACTORY_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_UNORDERED_MAP_IMPL_HPP
#define KOKKOS_UNORDERED_MAP_IMPL_HPP
#include <cstdio>
#include <climits>
-#include <iostream>
#include <iomanip>
namespace Kokkos {
namespace Impl {
+//! Append to the label contained in view_ctor_prop.
+template <typename... P>
+auto append_to_label(const ViewCtorProp<P...>& view_ctor_prop,
+ const std::string& label) {
+ using vcp_t = ViewCtorProp<P...>;
+ static_assert(vcp_t::has_label);
+ vcp_t new_ctor_props(view_ctor_prop);
+ static_cast<ViewCtorProp<void, std::string>&>(new_ctor_props)
+ .value.append(label);
+ return new_ctor_props;
+}
+
uint32_t find_hash_size(uint32_t size);
template <typename Map>
*this);
}
- template <typename Dummy = typename map_type::value_type>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_void<Dummy>::value>
- operator()(size_type i) const {
- if (m_src.valid_at(i)) m_dst.insert(m_src.key_at(i));
- }
-
- template <typename Dummy = typename map_type::value_type>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<!std::is_void<Dummy>::value>
- operator()(size_type i) const {
- if (m_src.valid_at(i)) m_dst.insert(m_src.key_at(i), m_src.value_at(i));
+ KOKKOS_INLINE_FUNCTION
+ void operator()(size_type i) const {
+ if constexpr (std::is_void_v<typename map_type::value_type>) {
+ if (m_src.valid_at(i)) m_dst.insert(m_src.key_at(i));
+ } else {
+ if (m_src.valid_at(i)) m_dst.insert(m_src.key_at(i), m_src.value_at(i));
+ }
}
};
uint32_t list = m_map.m_hash_lists(i);
for (size_type curr = list, ii = 0; curr != invalid_index;
curr = m_map.m_next_index[curr], ++ii) {
- KOKKOS_IMPL_DO_NOT_USE_PRINTF("%d[%d]: %d->%d\n", list, ii,
- m_map.key_at(curr), m_map.value_at(curr));
+ Kokkos::printf("%d[%d]: %d->%d\n", list, ii, m_map.key_at(curr),
+ m_map.value_at(curr));
}
}
};
--- /dev/null
+if(NOT Kokkos_INSTALL_TESTING)
+ add_subdirectory(src)
+endif()
+
+function(KOKKOS_ADD_BENCHMARK_DIRECTORY DIR_NAME)
+ if(NOT Kokkos_ENABLE_BENCHMARKS)
+ return()
+ endif()
+
+ add_subdirectory(${DIR_NAME})
+endfunction()
+
+kokkos_add_test_directories(unit_test)
+kokkos_add_benchmark_directory(perf_test)
--- /dev/null
+kokkos_include_directories(${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR} ${KOKKOS_TOP_BUILD_DIR})
+if(NOT desul_FOUND)
+ if(KOKKOS_ENABLE_CUDA)
+ set(DESUL_ATOMICS_ENABLE_CUDA ON)
+ endif()
+ if(KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE)
+ set(DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION ON)
+ endif()
+ if(KOKKOS_ENABLE_HIP)
+ set(DESUL_ATOMICS_ENABLE_HIP ON)
+ endif()
+ if(KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE)
+ set(DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION ON)
+ endif()
+ if(KOKKOS_ENABLE_SYCL)
+ set(DESUL_ATOMICS_ENABLE_SYCL ON)
+ if(KOKKOS_IMPL_SYCL_DEVICE_GLOBAL_SUPPORTED AND NOT KOKKOS_IMPL_HAVE_SYCL_EXT_ONEAPI_DEVICE_GLOBAL)
+ set(DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION ON)
+ endif()
+ endif()
+ if(KOKKOS_ENABLE_OPENMPTARGET)
+ set(DESUL_ATOMICS_ENABLE_OPENMP ON) # not a typo Kokkos OpenMPTarget -> Desul OpenMP
+ endif()
+ if(KOKKOS_ENABLE_OPENACC)
+ # FIXME_OPENACC FIXME_CLACC - Below condition will be removed if Clacc can compile atomics.
+ if(KOKKOS_CXX_COMPILER_ID STREQUAL NVHPC)
+ set(DESUL_ATOMICS_ENABLE_OPENACC ON)
+ endif()
+ endif()
+ configure_file(
+ ${KOKKOS_SOURCE_DIR}/tpls/desul/Config.hpp.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/desul/atomics/Config.hpp
+ )
+ kokkos_include_directories(${KOKKOS_SOURCE_DIR}/tpls/desul/include)
+endif()
+
+install(
+ DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/"
+ DESTINATION ${KOKKOS_HEADER_DIR}
+ FILES_MATCHING
+ PATTERN "*.hpp"
+ PATTERN "*.h"
+)
+
+set(KOKKOS_CORE_SRCS)
+append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/impl/*.cpp)
+set(KOKKOS_CORE_HEADERS)
+append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
+append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/impl/*.hpp)
+
+if(KOKKOS_ENABLE_CUDA)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/Cuda/*.cpp)
+ if(NOT Kokkos_ENABLE_DEPRECATED_CODE_4)
+ list(REMOVE_ITEM KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/Cuda/Kokkos_Cuda_Task.cpp)
+ endif()
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/Cuda/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_OPENMP)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/OpenMP/*.cpp)
+ if(NOT Kokkos_ENABLE_DEPRECATED_CODE_4)
+ list(REMOVE_ITEM KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/OpenMP/Kokkos_OpenMP_Task.cpp)
+ endif()
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/OpenMP/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_OPENMPTARGET)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/OpenMPTarget/*.cpp)
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/OpenMPTarget/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_OPENACC)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/OpenACC/*.cpp)
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/OpenACC/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_THREADS)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/Threads/*.cpp)
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/Threads/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_HIP)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/HIP/*.cpp)
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/HIP/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_HPX)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/HPX/*.cpp)
+ if(NOT Kokkos_ENABLE_DEPRECATED_CODE_4)
+ list(REMOVE_ITEM KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/HPX/Kokkos_HPX_Task.cpp)
+ endif()
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/HPX/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_SERIAL)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/Serial/*.cpp)
+ if(NOT Kokkos_ENABLE_DEPRECATED_CODE_4)
+ list(REMOVE_ITEM KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/Serial/Kokkos_Serial_Task.cpp)
+ endif()
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/Serial/*.hpp)
+endif()
+
+if(KOKKOS_ENABLE_SYCL)
+ append_glob(KOKKOS_CORE_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/SYCL/*.cpp)
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/SYCL/*.hpp)
+endif()
+
+if(NOT desul_FOUND)
+ if(KOKKOS_ENABLE_CUDA)
+ append_glob(KOKKOS_CORE_SRCS ${KOKKOS_SOURCE_DIR}/tpls/desul/src/Lock_Array_CUDA.cpp)
+ elseif(KOKKOS_ENABLE_HIP)
+ append_glob(KOKKOS_CORE_SRCS ${KOKKOS_SOURCE_DIR}/tpls/desul/src/Lock_Array_HIP.cpp)
+ elseif(KOKKOS_ENABLE_SYCL)
+ append_glob(KOKKOS_CORE_SRCS ${KOKKOS_SOURCE_DIR}/tpls/desul/src/Lock_Array_SYCL.cpp)
+ endif()
+ append_glob(KOKKOS_CORE_HEADERS ${KOKKOS_SOURCE_DIR}/tpls/desul/include/desul/*.hpp)
+ append_glob(KOKKOS_CORE_HEADERS ${KOKKOS_SOURCE_DIR}/tpls/desul/include/desul/*/*.hpp)
+ append_glob(KOKKOS_CORE_HEADERS ${KOKKOS_SOURCE_DIR}/tpls/desul/include/desul/*/*/*.hpp)
+ append_glob(KOKKOS_CORE_HEADERS ${KOKKOS_SOURCE_DIR}/tpls/desul/include/*/*/*.inc*)
+ append_glob(KOKKOS_CORE_HEADERS ${CMAKE_CURRENT_BINARY_DIR}/desul/*.hpp)
+
+ install(
+ DIRECTORY "${KOKKOS_SOURCE_DIR}/tpls/desul/include/desul" "${CMAKE_CURRENT_BINARY_DIR}/desul"
+ DESTINATION ${KOKKOS_HEADER_DIR}
+ FILES_MATCHING
+ PATTERN "*.inc"
+ PATTERN "*.inc_*"
+ PATTERN "*.hpp"
+ )
+
+ message(STATUS "Using internal desul_atomics copy")
+else()
+ message(STATUS "Using external desul_atomics install found at:")
+ message(STATUS " " ${desul_DIR})
+endif()
+
+kokkos_add_library(
+ kokkoscore SOURCES ${KOKKOS_CORE_SRCS} HEADERS ${KOKKOS_CORE_HEADERS}
+ ADD_BUILD_OPTIONS # core should be given all the necessary compiler/linker flags
+)
+
+kokkos_lib_include_directories(
+ kokkoscore ${KOKKOS_TOP_BUILD_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
+)
+if(NOT desul_FOUND)
+ kokkos_lib_include_directories(kokkoscore ${KOKKOS_SOURCE_DIR}/tpls/desul/include)
+endif()
+
+if(Kokkos_ENABLE_IMPL_MDSPAN)
+ message(STATUS "Experimental mdspan support is enabled")
+
+ # Some compilers now include mdspan... we just flag on their version
+ # for now until we can get some compiler detection support
+ include(CheckIncludeFileCXX)
+ check_include_file_cxx(experimental/mdspan KOKKOS_COMPILER_SUPPORTS_EXPERIMENTAL_MDSPAN)
+ check_include_file_cxx(mdspan KOKKOS_COMPILER_SUPPORTS_MDSPAN)
+
+ if(Kokkos_ENABLE_MDSPAN_EXTERNAL)
+ message(STATUS "Using external mdspan")
+ target_link_libraries(kokkoscore PUBLIC std::mdspan)
+ elseif(KOKKOS_COMPILER_SUPPORTS_MDSPAN AND NOT Kokkos_ENABLE_IMPL_SKIP_COMPILER_MDSPAN)
+ message(STATUS "Using compiler-supplied mdspan")
+ elseif(KOKKOS_COMPILER_SUPPORTS_EXPERIMENTAL_MDSPAN AND NOT Kokkos_ENABLE_IMPL_SKIP_COMPILER_MDSPAN)
+ message(STATUS "Using compiler-supplied experimental/mdspan")
+ else()
+ kokkos_lib_include_directories(kokkoscore ${KOKKOS_SOURCE_DIR}/tpls/mdspan/include)
+
+ append_glob(KOKKOS_CORE_HEADERS ${KOKKOS_SOURCE_DIR}/tpls/mdspan/include/experimental/__p0009_bits/*.hpp)
+ append_glob(KOKKOS_CORE_HEADERS ${KOKKOS_SOURCE_DIR}/tpls/mdspan/include/experimental/mdspan)
+
+ install(
+ DIRECTORY "${KOKKOS_SOURCE_DIR}/tpls/mdspan/include/"
+ DESTINATION ${KOKKOS_HEADER_DIR}
+ FILES_MATCHING
+ PATTERN "mdspan"
+ PATTERN "*.hpp"
+ )
+ message(STATUS "Using internal mdspan directory ${KOKKOS_SOURCE_DIR}/tpls/mdspan/include")
+ endif()
+endif()
+
+kokkos_link_tpl(kokkoscore PUBLIC HWLOC)
+kokkos_link_tpl(kokkoscore PUBLIC CUDA)
+kokkos_link_tpl(kokkoscore PUBLIC HPX)
+kokkos_link_tpl(kokkoscore PUBLIC LIBDL)
+# On *nix-like systems (Linux, macOS) we need pthread for C++ std::thread
+if(NOT WIN32)
+ kokkos_link_tpl(kokkoscore PUBLIC THREADS)
+endif()
+if(NOT KOKKOS_ENABLE_COMPILE_AS_CMAKE_LANGUAGE)
+ kokkos_link_tpl(kokkoscore PUBLIC ROCM)
+endif()
+
+# FIXME: We need a proper solution to figure out whether to enable
+# libatomic
+# Most compilers only require libatomic for 128-bit CAS
+# I (CT) had removed 128bit CAS from desul to not need libatomic.
+if(KOKKOS_ENABLE_OPENMPTARGET)
+ target_link_libraries(kokkoscore PUBLIC atomic)
+endif()
+
+if(desul_FOUND)
+ target_link_libraries(kokkoscore PUBLIC desul_atomics)
+endif()
+
+if(Kokkos_ENABLE_OPENMP)
+ target_link_libraries(kokkoscore PUBLIC OpenMP::OpenMP_CXX)
+endif()
+
+kokkos_link_tpl(kokkoscore PUBLIC LIBQUADMATH)
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_CUDA_HPP
#define KOKKOS_CUDA_HPP
#include <vector>
#include <impl/Kokkos_AnalyzePolicy.hpp>
-#include <Kokkos_CudaSpace.hpp>
+#include <Cuda/Kokkos_CudaSpace.hpp>
#include <Cuda/Kokkos_Cuda_Error.hpp> // CUDA_SAFE_CALL
#include <Kokkos_Parallel.hpp>
-#include <Kokkos_TaskScheduler.hpp>
#include <Kokkos_Layout.hpp>
#include <Kokkos_ScratchSpace.hpp>
#include <Kokkos_MemoryTraits.hpp>
namespace Kokkos {
namespace Impl {
-class CudaExec;
class CudaInternal;
} // namespace Impl
} // namespace Kokkos
CudaLaunchMechanism launch_mechanism = l;
};
} // namespace Experimental
+
+enum class ManageStream : bool { no, yes };
+
} // namespace Impl
/// \class Cuda
/// \brief Kokkos Execution Space that uses CUDA to run on GPUs.
/// \brief True if and only if this method is being called in a
/// thread-parallel function.
- KOKKOS_INLINE_FUNCTION static int in_parallel() {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION static int in_parallel() {
#if defined(__CUDA_ARCH__)
return true;
#else
return false;
#endif
}
-
- /** \brief Set the device in a "sleep" state.
- *
- * This function sets the device in a "sleep" state in which it is
- * not ready for work. This may consume less resources than if the
- * device were in an "awake" state, but it may also take time to
- * bring the device from a sleep state to be ready for work.
- *
- * \return True if the device is in the "sleep" state, else false if
- * the device is actively working and could not enter the "sleep"
- * state.
- */
- static bool sleep();
-
- /// \brief Wake the device from the 'sleep' state so it is ready for work.
- ///
- /// \return True if the device is in the "ready" state, else "false"
- /// if the device is actively working (which also means that it's
- /// awake).
- static bool wake();
+#endif
/// \brief Wait until all dispatched functors complete.
///
"Kokkos::Cuda::fence(): Unnamed Instance Fence") const;
/** \brief Return the maximum amount of concurrency. */
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
static int concurrency();
+#else
+ int concurrency() const;
+#endif
//! Print configuration information to the given output stream.
void print_configuration(std::ostream& os, bool verbose = false) const;
Cuda();
- Cuda(cudaStream_t stream, bool manage_stream = false);
+ explicit Cuda(cudaStream_t stream) : Cuda(stream, Impl::ManageStream::no) {}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ template <typename T = void>
+ KOKKOS_DEPRECATED_WITH_COMMENT(
+ "Cuda execution space should be constructed explicitly.")
+ Cuda(cudaStream_t stream)
+ : Cuda(stream) {}
+#endif
+
+ Cuda(cudaStream_t stream, Impl::ManageStream manage_stream);
+
+ KOKKOS_DEPRECATED Cuda(cudaStream_t stream, bool manage_stream);
//--------------------------------------------------------------------------
//! Free any resources being consumed by the device.
//! Initialize, telling the CUDA run-time library which device to use.
static void impl_initialize(InitializationSettings const&);
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
/// \brief Cuda device architecture of the selected device.
///
/// This matches the __CUDA_ARCH__ specification.
- static size_type device_arch();
+ KOKKOS_DEPRECATED static size_type device_arch() {
+ const cudaDeviceProp cudaProp = Cuda().cuda_device_prop();
+ return cudaProp.major * 100 + cudaProp.minor;
+ }
//! Query device count.
- static size_type detect_device_count();
+ KOKKOS_DEPRECATED static size_type detect_device_count() {
+ int count;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceCount(&count));
+ return count;
+ }
/** \brief Detect the available devices and their architecture
* as defined by the __CUDA_ARCH__ specification.
*/
- static std::vector<unsigned> detect_device_arch();
+ KOKKOS_DEPRECATED static std::vector<unsigned> detect_device_arch() {
+ int count;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceCount(&count));
+ std::vector<unsigned> out;
+ for (int i = 0; i < count; ++i) {
+ cudaDeviceProp prop;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceProperties(&prop, i));
+ out.push_back(prop.major * 100 + prop.minor);
+ }
+ return out;
+ }
+#endif
cudaStream_t cuda_stream() const;
int cuda_device() const;
uint32_t impl_instance_id() const noexcept;
private:
+ friend bool operator==(Cuda const& lhs, Cuda const& rhs) {
+ return lhs.impl_internal_space_instance() ==
+ rhs.impl_internal_space_instance();
+ }
+ friend bool operator!=(Cuda const& lhs, Cuda const& rhs) {
+ return !(lhs == rhs);
+ }
Kokkos::Impl::HostSharedPtr<Impl::CudaInternal> m_space_instance;
};
};
} // namespace Experimental
} // namespace Tools
-
-namespace Impl {
-
-template <class DT, class... DP>
-struct ZeroMemset<Kokkos::Cuda, DT, DP...> {
- ZeroMemset(const Kokkos::Cuda& exec_space_instance,
- const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemsetAsync(
- dst.data(), 0,
- dst.size() * sizeof(typename View<DT, DP...>::value_type),
- exec_space_instance.cuda_stream()));
- }
-
- ZeroMemset(const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type&) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaMemset(dst.data(), 0,
- dst.size() * sizeof(typename View<DT, DP...>::value_type)));
- }
-};
-} // namespace Impl
} // namespace Kokkos
/*--------------------------------------------------------------------------*/
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <Kokkos_Core.hpp>
+#include <Cuda/Kokkos_Cuda.hpp>
+#include <Cuda/Kokkos_CudaSpace.hpp>
+
+#include <cstdlib>
+#include <iostream>
+#include <sstream>
+#include <algorithm>
+#include <atomic>
+
+#include <impl/Kokkos_Error.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+cudaStream_t Kokkos::Impl::cuda_get_deep_copy_stream() {
+ static cudaStream_t s = nullptr;
+ if (s == nullptr) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (CudaInternal::singleton().cuda_stream_create_wrapper(&s)));
+ }
+ return s;
+}
+
+const std::unique_ptr<Kokkos::Cuda> &Kokkos::Impl::cuda_get_deep_copy_space(
+ bool initialize) {
+ static std::unique_ptr<Cuda> space = nullptr;
+ if (!space && initialize)
+ space = std::make_unique<Cuda>(Kokkos::Impl::cuda_get_deep_copy_stream());
+ return space;
+}
+
+namespace Kokkos {
+namespace Impl {
+
+void DeepCopyCuda(void *dst, const void *src, size_t n) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL((CudaInternal::singleton().cuda_memcpy_wrapper(
+ dst, src, n, cudaMemcpyDefault)));
+}
+
+void DeepCopyAsyncCuda(const Cuda &instance, void *dst, const void *src,
+ size_t n) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (instance.impl_internal_space_instance()->cuda_memcpy_async_wrapper(
+ dst, src, n, cudaMemcpyDefault)));
+}
+
+void DeepCopyAsyncCuda(void *dst, const void *src, size_t n) {
+ cudaStream_t s = cuda_get_deep_copy_stream();
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (CudaInternal::singleton().cuda_memcpy_async_wrapper(
+ dst, src, n, cudaMemcpyDefault, s)));
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
+ "Kokkos::Impl::DeepCopyAsyncCuda: Deep Copy Stream Sync",
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ DeepCopyResourceSynchronization,
+ [&]() { KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(s)); });
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+bool CudaUVMSpace::available() { return true; }
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+// The purpose of the following variable is to allow a state-based choice
+// for pinning UVM allocations to the CPU. For now this is considered
+// an experimental debugging capability - with the potential to work around
+// some CUDA issues.
+bool CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = false;
+
+bool CudaUVMSpace::cuda_pin_uvm_to_host() {
+ return CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v;
+}
+void CudaUVMSpace::cuda_set_pin_uvm_to_host(bool val) {
+ CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = val;
+}
+#endif
+} // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+bool kokkos_impl_cuda_pin_uvm_to_host() {
+ return Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host();
+}
+
+void kokkos_impl_cuda_set_pin_uvm_to_host(bool val) {
+ Kokkos::CudaUVMSpace::cuda_set_pin_uvm_to_host(val);
+}
+#endif
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+CudaSpace::CudaSpace()
+ : m_device(Kokkos::Cuda().cuda_device()),
+ m_stream(Kokkos::Cuda().cuda_stream()) {}
+CudaSpace::CudaSpace(int device_id, cudaStream_t stream)
+ : m_device(device_id), m_stream(stream) {}
+
+CudaUVMSpace::CudaUVMSpace()
+ : m_device(Kokkos::Cuda().cuda_device()),
+ m_stream(Kokkos::Cuda().cuda_stream()) {}
+CudaUVMSpace::CudaUVMSpace(int device_id, cudaStream_t stream)
+ : m_device(device_id), m_stream(stream) {}
+
+CudaHostPinnedSpace::CudaHostPinnedSpace()
+ : m_device(Kokkos::Cuda().cuda_device()),
+ m_stream(Kokkos::Cuda().cuda_stream()) {}
+CudaHostPinnedSpace::CudaHostPinnedSpace(int device_id, cudaStream_t stream)
+ : m_device(device_id), m_stream(stream) {}
+
+size_t memory_threshold_g = 40000; // 40 kB
+
+//==============================================================================
+// <editor-fold desc="allocate()"> {{{1
+
+void *CudaSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void *CudaSpace::allocate(const Cuda &exec_space, const char *arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(exec_space, arg_label, arg_alloc_size, arg_logical_size);
+}
+void *CudaSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+
+namespace {
+void *impl_allocate_common(const int device_id,
+ [[maybe_unused]] const cudaStream_t stream,
+ const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle,
+ [[maybe_unused]] bool stream_sync_only) {
+ void *ptr = nullptr;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(device_id));
+
+ cudaError_t error_code = cudaSuccess;
+#ifndef CUDART_VERSION
+#error CUDART_VERSION undefined!
+#elif defined(KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY)
+ // This is intended for Grace-Hopper (and future unified memory architectures)
+ // The idea is to use host allocator and then advise to keep it in HBM on the
+ // device, but that requires CUDA 12.2
+ static_assert(CUDART_VERSION >= 12020,
+ "CUDA runtime version >=12.2 required when "
+ "Kokkos_ENABLE_IMPL_CUDA_UNIFIED_MEMORY is set. "
+ "Please update your CUDA runtime version or "
+ "reconfigure with "
+ "-D Kokkos_ENABLE_IMPL_CUDA_UNIFIED_MEMORY=OFF");
+ if (arg_alloc_size) { // cudaMemAdvise_v2 does not work with nullptr
+ error_code = cudaMallocManaged(&ptr, arg_alloc_size, cudaMemAttachGlobal);
+ if (error_code == cudaSuccess) {
+ // One would think cudaMemLocation{device_id,
+ // cudaMemLocationTypeDevice} would work but it doesn't. I.e. the order of
+ // members doesn't seem to be defined.
+ cudaMemLocation loc;
+ loc.id = device_id;
+ loc.type = cudaMemLocationTypeDevice;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemAdvise_v2(
+ ptr, arg_alloc_size, cudaMemAdviseSetPreferredLocation, loc));
+ }
+ }
+#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
+ // FIXME_KEPLER Everything after Kepler should support cudaMallocAsync
+ int device_supports_cuda_malloc_async;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaDeviceGetAttribute(&device_supports_cuda_malloc_async,
+ cudaDevAttrMemoryPoolsSupported, device_id));
+
+ if (arg_alloc_size >= memory_threshold_g &&
+ device_supports_cuda_malloc_async == 1) {
+ error_code = cudaMallocAsync(&ptr, arg_alloc_size, stream);
+
+ if (error_code == cudaSuccess) {
+ if (stream_sync_only) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
+ } else {
+ Impl::cuda_device_synchronize(
+ "Kokkos::Cuda: backend fence after async malloc");
+ }
+ }
+ } else {
+ error_code = cudaMalloc(&ptr, arg_alloc_size);
+ }
+#else
+ error_code = cudaMalloc(&ptr, arg_alloc_size);
+#endif
+
+ if (error_code != cudaSuccess) { // TODO tag as unlikely branch
+ // This is the only way to clear the last error, which
+ // we should do here since we're turning it into an
+ // exception here
+ cudaGetLastError();
+ Kokkos::Impl::throw_bad_alloc(arg_handle.name, arg_alloc_size, arg_label);
+ }
+
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+ return ptr;
+}
+} // namespace
+
+void *CudaSpace::impl_allocate(
+ const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ return impl_allocate_common(m_device, m_stream, arg_label, arg_alloc_size,
+ arg_logical_size, arg_handle, false);
+}
+
+void *CudaSpace::impl_allocate(
+ const Cuda &exec_space, const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ return impl_allocate_common(
+ exec_space.cuda_device(), exec_space.cuda_stream(), arg_label,
+ arg_alloc_size, arg_logical_size, arg_handle, true);
+}
+
+void *CudaUVMSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+void *CudaUVMSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void *CudaUVMSpace::impl_allocate(
+ const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ void *ptr = nullptr;
+
+ Cuda::impl_static_fence(
+ "Kokkos::CudaUVMSpace::impl_allocate: Pre UVM Allocation");
+ if (arg_alloc_size > 0) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ cudaError_t error_code =
+ cudaMallocManaged(&ptr, arg_alloc_size, cudaMemAttachGlobal);
+
+ if (error_code != cudaSuccess) { // TODO tag as unlikely branch
+ // This is the only way to clear the last error, which
+ // we should do here since we're turning it into an
+ // exception here
+ cudaGetLastError();
+ Kokkos::Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+ if (Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host())
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaMemAdvise(ptr, arg_alloc_size, cudaMemAdviseSetPreferredLocation,
+ cudaCpuDeviceId));
+#endif
+ }
+ Cuda::impl_static_fence(
+ "Kokkos::CudaUVMSpace::impl_allocate: Post UVM Allocation");
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+ return ptr;
+}
+void *CudaHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+void *CudaHostPinnedSpace::allocate(const char *arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void *CudaHostPinnedSpace::impl_allocate(
+ const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ void *ptr = nullptr;
+
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ cudaError_t error_code =
+ cudaHostAlloc(&ptr, arg_alloc_size, cudaHostAllocDefault);
+ if (error_code != cudaSuccess) { // TODO tag as unlikely branch
+ // This is the only way to clear the last error, which
+ // we should do here since we're turning it into an
+ // exception here
+ cudaGetLastError();
+ Kokkos::Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+ return ptr;
+}
+
+// </editor-fold> end allocate() }}}1
+//==============================================================================
+void CudaSpace::deallocate(void *const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void CudaSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void CudaSpace::impl_deallocate(
+ const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+#ifndef CUDART_VERSION
+#error CUDART_VERSION undefined!
+#elif defined(KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY)
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
+#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
+ if (arg_alloc_size >= memory_threshold_g) {
+ Impl::cuda_device_synchronize(
+ "Kokkos::Cuda: backend fence before async free");
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeAsync(arg_alloc_ptr, m_stream));
+ Impl::cuda_device_synchronize(
+ "Kokkos::Cuda: backend fence after async free");
+ } else {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
+ }
+#else
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
+#endif
+}
+void CudaUVMSpace::deallocate(void *const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void CudaUVMSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size
+
+ ,
+ const size_t arg_logical_size) const {
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void CudaUVMSpace::impl_deallocate(
+ const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ Cuda::impl_static_fence(
+ "Kokkos::CudaUVMSpace::impl_deallocate: Pre UVM Deallocation");
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+ if (arg_alloc_ptr != nullptr) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
+ }
+ Cuda::impl_static_fence(
+ "Kokkos::CudaUVMSpace::impl_deallocate: Post UVM Deallocation");
+}
+
+void CudaHostPinnedSpace::deallocate(void *const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void CudaHostPinnedSpace::deallocate(const char *arg_label,
+ void *const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+
+void CudaHostPinnedSpace::impl_deallocate(
+ const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeHost(arg_alloc_ptr));
+}
+
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+void cuda_prefetch_pointer(const Cuda &space, const void *ptr, size_t bytes,
+ bool to_device) {
+ if ((ptr == nullptr) || (bytes == 0)) return;
+ cudaPointerAttributes attr;
+ KOKKOS_IMPL_CUDA_SAFE_CALL((
+ space.impl_internal_space_instance()->cuda_pointer_get_attributes_wrapper(
+ &attr, ptr)));
+ // I measured this and it turns out prefetching towards the host slows
+ // DualView syncs down. Probably because the latency is not too bad in the
+ // first place for the pull down. If we want to change that provde
+ // cudaCpuDeviceId as the device if to_device is false
+ bool is_managed = attr.type == cudaMemoryTypeManaged;
+ if (to_device && is_managed &&
+ space.cuda_device_prop().concurrentManagedAccess) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (space.impl_internal_space_instance()->cuda_mem_prefetch_async_wrapper(
+ ptr, bytes, space.cuda_device())));
+ }
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+#if !defined(KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY)
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::CudaSpace);
+#else
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(Kokkos::CudaSpace);
+#endif
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::CudaUVMSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::CudaHostPinnedSpace);
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
+
+#else
+void KOKKOS_CORE_SRC_CUDA_CUDASPACE_PREVENT_LINK_ERROR() {}
+#endif // KOKKOS_ENABLE_CUDA
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_CUDASPACE_HPP
#define KOKKOS_CUDASPACE_HPP
/*--------------------------------*/
CudaSpace();
- CudaSpace(CudaSpace&& rhs) = default;
- CudaSpace(const CudaSpace& rhs) = default;
- CudaSpace& operator=(CudaSpace&& rhs) = default;
+
+ private:
+ CudaSpace(int device_id, cudaStream_t stream);
+
+ public:
+ CudaSpace(CudaSpace&& rhs) = default;
+ CudaSpace(const CudaSpace& rhs) = default;
+ CudaSpace& operator=(CudaSpace&& rhs) = default;
CudaSpace& operator=(const CudaSpace& rhs) = default;
~CudaSpace() = default;
void* allocate(const char* arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size = 0) const;
+#if defined(KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY)
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+#endif
+
/**\brief Deallocate untracked memory in the cuda space */
void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
void deallocate(const char* arg_label, void* const arg_alloc_ptr,
const size_t arg_alloc_size,
const size_t arg_logical_size = 0) const;
+ static CudaSpace impl_create(int device_id, cudaStream_t stream) {
+ return CudaSpace(device_id, stream);
+ }
+
private:
- template <class, class, class, class>
- friend class Kokkos::Experimental::LogicalMemorySpace;
void* impl_allocate(const Cuda& exec_space, const char* arg_label,
const size_t arg_alloc_size,
const size_t arg_logical_size = 0,
/**\brief Return Name of the MemorySpace */
static constexpr const char* name() { return m_name; }
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- /*--------------------------------*/
- /** \brief Error reporting for HostSpace attempt to access CudaSpace */
- KOKKOS_DEPRECATED static void access_error();
- KOKKOS_DEPRECATED static void access_error(const void* const);
-#endif
-
private:
- int m_device; ///< Which Cuda device
+ int m_device;
+ cudaStream_t m_stream;
static constexpr const char* m_name = "Cuda";
- friend class Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
};
template <>
using device_type = Kokkos::Device<execution_space, memory_space>;
using size_type = unsigned int;
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
/** \brief If UVM capability is available */
- static bool available();
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- /*--------------------------------*/
- /** \brief CudaUVMSpace specific routine */
- KOKKOS_DEPRECATED static int number_of_allocations();
+ KOKKOS_DEPRECATED static bool available();
#endif
/*--------------------------------*/
/*--------------------------------*/
CudaUVMSpace();
- CudaUVMSpace(CudaUVMSpace&& rhs) = default;
- CudaUVMSpace(const CudaUVMSpace& rhs) = default;
- CudaUVMSpace& operator=(CudaUVMSpace&& rhs) = default;
+
+ private:
+ CudaUVMSpace(int device_id, cudaStream_t stream);
+
+ public:
+ CudaUVMSpace(CudaUVMSpace&& rhs) = default;
+ CudaUVMSpace(const CudaUVMSpace& rhs) = default;
+ CudaUVMSpace& operator=(CudaUVMSpace&& rhs) = default;
CudaUVMSpace& operator=(const CudaUVMSpace& rhs) = default;
~CudaUVMSpace() = default;
/**\brief Allocate untracked memory in the cuda space */
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
void* allocate(const size_t arg_alloc_size) const;
void* allocate(const char* arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size = 0) const;
const size_t arg_logical_size = 0) const;
private:
- template <class, class, class, class>
- friend class Kokkos::Experimental::LogicalMemorySpace;
void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size = 0,
const Kokkos::Tools::SpaceHandle =
#endif
/*--------------------------------*/
+ static CudaUVMSpace impl_create(int device_id, cudaStream_t stream) {
+ return CudaUVMSpace(device_id, stream);
+ }
+
private:
- int m_device; ///< Which Cuda device
+ int m_device;
+ cudaStream_t m_stream;
#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
static bool kokkos_impl_cuda_pin_uvm_to_host_v;
/*--------------------------------*/
CudaHostPinnedSpace();
- CudaHostPinnedSpace(CudaHostPinnedSpace&& rhs) = default;
- CudaHostPinnedSpace(const CudaHostPinnedSpace& rhs) = default;
- CudaHostPinnedSpace& operator=(CudaHostPinnedSpace&& rhs) = default;
+
+ private:
+ CudaHostPinnedSpace(int device_id, cudaStream_t stream);
+
+ public:
+ CudaHostPinnedSpace(CudaHostPinnedSpace&& rhs) = default;
+ CudaHostPinnedSpace(const CudaHostPinnedSpace& rhs) = default;
+ CudaHostPinnedSpace& operator=(CudaHostPinnedSpace&& rhs) = default;
CudaHostPinnedSpace& operator=(const CudaHostPinnedSpace& rhs) = default;
~CudaHostPinnedSpace() = default;
/**\brief Allocate untracked memory in the space */
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
void* allocate(const size_t arg_alloc_size) const;
void* allocate(const char* arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size = 0) const;
const size_t arg_alloc_size,
const size_t arg_logical_size = 0) const;
+ static CudaHostPinnedSpace impl_create(int device_id, cudaStream_t stream) {
+ return CudaHostPinnedSpace(device_id, stream);
+ }
+
private:
- template <class, class, class, class>
- friend class Kokkos::Experimental::LogicalMemorySpace;
void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
const size_t arg_logical_size = 0,
const Kokkos::Tools::SpaceHandle =
static constexpr const char* name() { return m_name; }
private:
+ int m_device;
+ cudaStream_t m_stream;
+
static constexpr const char* m_name = "CudaHostPinned";
/*--------------------------------*/
bool initialize = true);
static_assert(Kokkos::Impl::MemorySpaceAccess<Kokkos::CudaSpace,
- Kokkos::CudaSpace>::assignable,
- "");
-static_assert(Kokkos::Impl::MemorySpaceAccess<Kokkos::CudaUVMSpace,
- Kokkos::CudaUVMSpace>::assignable,
- "");
+ Kokkos::CudaSpace>::assignable);
+static_assert(Kokkos::Impl::MemorySpaceAccess<
+ Kokkos::CudaUVMSpace, Kokkos::CudaUVMSpace>::assignable);
static_assert(
Kokkos::Impl::MemorySpaceAccess<Kokkos::CudaHostPinnedSpace,
- Kokkos::CudaHostPinnedSpace>::assignable,
- "");
+ Kokkos::CudaHostPinnedSpace>::assignable);
//----------------------------------------
template <>
struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::CudaSpace> {
enum : bool { assignable = false };
- enum : bool { accessible = false };
+#if !defined(KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY)
+ enum : bool{accessible = false};
+#else
+ enum : bool { accessible = true };
+#endif
enum : bool { deepcopy = true };
};
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-namespace Kokkos {
-namespace Impl {
-
-template <>
-class SharedAllocationRecord<Kokkos::CudaSpace, void>
- : public HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace> {
- private:
- friend class SharedAllocationRecord<Kokkos::CudaUVMSpace, void>;
- friend class SharedAllocationRecordCommon<Kokkos::CudaSpace>;
- friend class HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace>;
-
- using RecordBase = SharedAllocationRecord<void, void>;
- using base_t =
- HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
- static ::cudaTextureObject_t attach_texture_object(
- const unsigned sizeof_alias, void* const alloc_ptr,
- const size_t alloc_size);
-
-#ifdef KOKKOS_ENABLE_DEBUG
- static RecordBase s_root_record;
-#endif
-
- ::cudaTextureObject_t m_tex_obj = 0;
- const Kokkos::CudaSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
- SharedAllocationRecord() = default;
-
- // This constructor does not forward to the one without exec_space arg
- // in order to work around https://github.com/kokkos/kokkos/issues/5258
- // This constructor is templated so I can't just put it into the cpp file
- // like the other constructor.
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/, const Kokkos::CudaSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_tex_obj(0),
- m_space(arg_space) {
-
- SharedAllocationHeader header;
-
- this->base_t::_fill_host_accessible_header_info(header, arg_label);
-
- // Copy to device memory
- // workaround for issue with NVCC and MSVC
- // https://github.com/kokkos/kokkos/issues/5258
- deep_copy_header_no_exec(RecordBase::m_alloc_ptr, &header);
- }
-
- SharedAllocationRecord(
- const Kokkos::Cuda& exec_space, const Kokkos::CudaSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-
- SharedAllocationRecord(
- const Kokkos::CudaSpace& arg_space, const std::string& arg_label,
- const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-
- // helper function to work around MSVC+NVCC issue
- // https://github.com/kokkos/kokkos/issues/5258
- static void deep_copy_header_no_exec(void*, const void*);
-
- public:
- template <typename AliasType>
- inline ::cudaTextureObject_t attach_texture_object() {
- static_assert((std::is_same<AliasType, int>::value ||
- std::is_same<AliasType, ::int2>::value ||
- std::is_same<AliasType, ::int4>::value),
- "Cuda texture fetch only supported for alias types of int, "
- "::int2, or ::int4");
-
- if (m_tex_obj == 0) {
- m_tex_obj = attach_texture_object(sizeof(AliasType),
- (void*)RecordBase::m_alloc_ptr,
- RecordBase::m_alloc_size);
- }
-
- return m_tex_obj;
- }
-
- template <typename AliasType>
- inline int attach_texture_object_offset(const AliasType* const ptr) {
- // Texture object is attached to the entire allocation range
- return ptr - reinterpret_cast<AliasType*>(RecordBase::m_alloc_ptr);
- }
-};
-
-template <>
-class SharedAllocationRecord<Kokkos::CudaUVMSpace, void>
- : public SharedAllocationRecordCommon<Kokkos::CudaUVMSpace> {
- private:
- friend class SharedAllocationRecordCommon<Kokkos::CudaUVMSpace>;
-
- using base_t = SharedAllocationRecordCommon<Kokkos::CudaUVMSpace>;
- using RecordBase = SharedAllocationRecord<void, void>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
- static RecordBase s_root_record;
-
- ::cudaTextureObject_t m_tex_obj = 0;
- const Kokkos::CudaUVMSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
- SharedAllocationRecord() = default;
-
- // This constructor does not forward to the one without exec_space arg
- // in order to work around https://github.com/kokkos/kokkos/issues/5258
- // This constructor is templated so I can't just put it into the cpp file
- // like the other constructor.
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::CudaUVMSpace& arg_space, const std::string& arg_label,
- const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record,
-#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_tex_obj(0),
- m_space(arg_space) {
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
- }
-
- SharedAllocationRecord(
- const Kokkos::CudaUVMSpace& arg_space, const std::string& arg_label,
- const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-
- public:
- template <typename AliasType>
- inline ::cudaTextureObject_t attach_texture_object() {
- static_assert((std::is_same<AliasType, int>::value ||
- std::is_same<AliasType, ::int2>::value ||
- std::is_same<AliasType, ::int4>::value),
- "Cuda texture fetch only supported for alias types of int, "
- "::int2, or ::int4");
-
- if (m_tex_obj == 0) {
- m_tex_obj = SharedAllocationRecord<Kokkos::CudaSpace, void>::
- attach_texture_object(sizeof(AliasType),
- (void*)RecordBase::m_alloc_ptr,
- RecordBase::m_alloc_size);
- }
-
- return m_tex_obj;
- }
-
- template <typename AliasType>
- inline int attach_texture_object_offset(const AliasType* const ptr) {
- // Texture object is attached to the entire allocation range
- return ptr - reinterpret_cast<AliasType*>(RecordBase::m_alloc_ptr);
- }
-};
-
-template <>
-class SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>
- : public SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace> {
- private:
- friend class SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace>;
-
- using RecordBase = SharedAllocationRecord<void, void>;
- using base_t = SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace>;
-
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
-
- static RecordBase s_root_record;
-
- const Kokkos::CudaHostPinnedSpace m_space;
-
- protected:
- ~SharedAllocationRecord();
- SharedAllocationRecord() = default;
-
- // This constructor does not forward to the one without exec_space arg
- // in order to work around https://github.com/kokkos/kokkos/issues/5258
- // This constructor is templated so I can't just put it into the cpp file
- // like the other constructor.
- template <typename ExecutionSpace>
- SharedAllocationRecord(
- const ExecutionSpace& /*exec_space*/,
- const Kokkos::CudaHostPinnedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate)
- : base_t(
-#ifdef KOKKOS_ENABLE_DEBUG
- &SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
- void>::s_root_record,
+#if !defined(KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY)
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_SPECIALIZATION(
+ Kokkos::CudaSpace);
+#else
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::CudaSpace);
#endif
- Impl::checked_allocation_with_header(arg_space, arg_label,
- arg_alloc_size),
- sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
- arg_label),
- m_space(arg_space) {
- this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
- arg_label);
- }
-
- SharedAllocationRecord(
- const Kokkos::CudaHostPinnedSpace& arg_space,
- const std::string& arg_label, const size_t arg_alloc_size,
- const RecordBase::function_type arg_dealloc = &base_t::deallocate);
-};
-
-} // namespace Impl
-} // namespace Kokkos
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::CudaUVMSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::CudaHostPinnedSpace);
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_INTERNAL_HPP
#define KOKKOS_CUDA_INTERNAL_HPP
namespace Kokkos {
namespace Impl {
+inline int cuda_warp_per_sm_allocation_granularity(
+ cudaDeviceProp const& properties) {
+ // Allocation granularity of warps in each sm
+ switch (properties.major) {
+ case 3:
+ case 5:
+ case 7:
+ case 8:
+ case 9: return 4;
+ case 6: return (properties.minor == 0 ? 2 : 4);
+ default:
+ throw_runtime_exception(
+ "Unknown device in cuda warp per sm allocation granularity");
+ return 0;
+ }
+}
+
+inline int cuda_max_warps_per_sm_registers(
+ cudaDeviceProp const& properties, cudaFuncAttributes const& attributes) {
+ // Maximum number of warps per sm as a function of register counts,
+ // subject to the constraint that warps are allocated with a fixed granularity
+ int const max_regs_per_block = properties.regsPerBlock;
+ int const regs_per_warp = attributes.numRegs * properties.warpSize;
+ int const warp_granularity =
+ cuda_warp_per_sm_allocation_granularity(properties);
+ // The granularity of register allocation is chunks of 256 registers per warp,
+ // which implies a need to over-allocate, so we round up
+ int const allocated_regs_per_warp = 256 * ((regs_per_warp + 256 - 1) / 256);
+
+ // The maximum number of warps per SM is constrained from above by register
+ // allocation. To satisfy the constraint that warps per SM is allocated at a
+ // finite granularity, we need to round down.
+ int const max_warps_per_sm =
+ warp_granularity *
+ (max_regs_per_block / (allocated_regs_per_warp * warp_granularity));
+
+ return max_warps_per_sm;
+}
+
inline int cuda_max_active_blocks_per_sm(cudaDeviceProp const& properties,
cudaFuncAttributes const& attributes,
int block_size, size_t dynamic_shmem) {
- // Limits due do registers/SM
+ // Limits due to registers/SM
int const regs_per_sm = properties.regsPerMultiprocessor;
int const regs_per_thread = attributes.numRegs;
- int const max_blocks_regs = regs_per_sm / (regs_per_thread * block_size);
+ // The granularity of register allocation is chunks of 256 registers per warp
+ // -> 8 registers per thread
+ int const allocated_regs_per_thread = 8 * ((regs_per_thread + 8 - 1) / 8);
+ int max_blocks_regs = regs_per_sm / (allocated_regs_per_thread * block_size);
+
+ // Compute the maximum number of warps as a function of the number of
+ // registers
+ int const max_warps_per_sm_registers =
+ cuda_max_warps_per_sm_registers(properties, attributes);
+
+ // Correct the number of blocks to respect the maximum number of warps per
+ // SM, which is constrained to be a multiple of the warp allocation
+ // granularity defined in `cuda_warp_per_sm_allocation_granularity`.
+ while ((max_blocks_regs * block_size / properties.warpSize) >
+ max_warps_per_sm_registers)
+ max_blocks_regs--;
// Limits due to shared memory/SM
size_t const shmem_per_sm = properties.sharedMemPerMultiprocessor;
const FunctorType& f, const size_t vector_length,
const size_t shmem_block,
const size_t shmem_thread) {
- (void)cuda_instance;
-
- auto const& prop = Kokkos::Cuda().cuda_device_prop();
+ auto const& prop = cuda_instance->m_deviceProp;
auto const block_size_to_dynamic_shmem = [&f, vector_length, shmem_block,
shmem_thread](int block_size) {
const FunctorType& f, const size_t vector_length,
const size_t shmem_block,
const size_t shmem_thread) {
- (void)cuda_instance;
-
- auto const& prop = Kokkos::Cuda().cuda_device_prop();
+ auto const& prop = cuda_instance->m_deviceProp;
auto const block_size_to_dynamic_shmem = [&f, vector_length, shmem_block,
shmem_thread](int block_size) {
LaunchBounds{});
}
-// Assuming cudaFuncSetCacheConfig(MyKernel, cudaFuncCachePreferL1)
-// NOTE these number can be obtained several ways:
-// * One option is to download the CUDA Occupancy Calculator spreadsheet, select
-// "Compute Capability" first and check what is the smallest "Shared Memory
-// Size Config" that is available. The "Shared Memory Per Multiprocessor" in
-// bytes is then to be found below in the summary.
-// * Another option would be to look for the information in the "Tuning
-// Guide(s)" of the CUDA Toolkit Documentation for each GPU architecture, in
-// the "Shared Memory" section (more tedious)
-inline size_t get_shmem_per_sm_prefer_l1(cudaDeviceProp const& properties) {
- int const compute_capability = properties.major * 10 + properties.minor;
- return [compute_capability]() {
- switch (compute_capability) {
- case 30:
- case 32:
- case 35: return 16;
- case 37: return 80;
- case 50:
- case 53:
- case 60:
- case 62: return 64;
- case 52:
- case 61: return 96;
- case 70:
- case 80:
- case 86: return 8;
- case 75: return 32;
- default:
- Kokkos::Impl::throw_runtime_exception(
- "Unknown device in cuda block size deduction");
- }
- return 0;
- }() * 1024;
+// Thin version of cuda_get_opt_block_size for cases where there is no shared
+// memory
+template <class LaunchBounds>
+int cuda_get_opt_block_size_no_shmem(const cudaDeviceProp& prop,
+ const cudaFuncAttributes& attr,
+ LaunchBounds) {
+ return cuda_deduce_block_size(
+ false, prop, attr, [](int /*block_size*/) { return 0; }, LaunchBounds{});
}
+
} // namespace Impl
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_ERROR_HPP
+#define KOKKOS_CUDA_ERROR_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+void cuda_device_synchronize(const std::string& name);
+void cuda_stream_synchronize(const cudaStream_t stream,
+ const std::string& name);
+
+[[noreturn]] void cuda_internal_error_throw(cudaError e, const char* name,
+ const char* file = nullptr,
+ const int line = 0);
+
+#ifndef KOKKOS_COMPILER_NVHPC
+[[noreturn]]
+#endif
+ void cuda_internal_error_abort(cudaError e, const char* name,
+ const char* file = nullptr,
+ const int line = 0);
+
+inline void cuda_internal_safe_call(cudaError e, const char* name,
+ const char* file = nullptr,
+ const int line = 0) {
+ // 1. Success -> normal continuation.
+ // 2. Error codes for which, to continue using CUDA, the process must be
+ // terminated and relaunched -> call abort on the host-side.
+ // 3. Any other error code -> throw a runtime error.
+ switch (e) {
+ case cudaSuccess: break;
+ case cudaErrorIllegalAddress:
+ case cudaErrorAssert:
+ case cudaErrorHardwareStackError:
+ case cudaErrorIllegalInstruction:
+ case cudaErrorMisalignedAddress:
+ case cudaErrorInvalidAddressSpace:
+ case cudaErrorInvalidPc:
+ case cudaErrorLaunchFailure:
+ cuda_internal_error_abort(e, name, file, line);
+ break;
+ default: cuda_internal_error_throw(e, name, file, line); break;
+ }
+}
+
+#define KOKKOS_IMPL_CUDA_SAFE_CALL(call) \
+ Kokkos::Impl::cuda_internal_safe_call(call, #call, __FILE__, __LINE__)
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // KOKKOS_ENABLE_CUDA
+#endif // KOKKOS_CUDA_ERROR_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_CUDA_GRAPHNODEKERNEL_IMPL_HPP
#define KOKKOS_KOKKOS_CUDA_GRAPHNODEKERNEL_IMPL_HPP
#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_CUDA_ENABLE_GRAPHS)
+#if defined(KOKKOS_ENABLE_CUDA)
#include <Kokkos_Graph_fwd.hpp>
-#include <impl/Kokkos_GraphImpl.hpp> // GraphAccess needs to be complete
-#include <impl/Kokkos_SharedAlloc.hpp> // SharedAllocationRecord
+#include <impl/Kokkos_GraphImpl.hpp> // GraphAccess needs to be complete
#include <Kokkos_Parallel.hpp>
#include <Kokkos_Parallel_Reduce.hpp>
#include <Kokkos_PointerOwnership.hpp>
-#include <Kokkos_Cuda.hpp>
-#include <cuda_runtime_api.h>
+#include <Cuda/Kokkos_Cuda.hpp>
namespace Kokkos {
namespace Impl {
// covers and we're not modifying it
Kokkos::ObservingRawPtr<const cudaGraph_t> m_graph_ptr = nullptr;
Kokkos::ObservingRawPtr<cudaGraphNode_t> m_graph_node_ptr = nullptr;
- // Note: owned pointer to CudaSpace memory (used for global memory launches),
- // which we're responsible for deallocating, but not responsible for calling
- // its destructor.
- using Record = Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
// Basically, we have to make this mutable for the same reasons that the
// global kernel buffers in the Cuda instance are mutable...
- mutable Kokkos::OwningRawPtr<base_t> m_driver_storage = nullptr;
+ mutable std::shared_ptr<base_t> m_driver_storage = nullptr;
+ std::string label;
public:
using Policy = PolicyType;
// attached to the policy?
// TODO @graph kernel name info propagation
template <class PolicyDeduced, class... ArgsDeduced>
- GraphNodeKernelImpl(std::string, Kokkos::Cuda const&, Functor arg_functor,
+ GraphNodeKernelImpl(std::string label_, Cuda const&, Functor arg_functor,
PolicyDeduced&& arg_policy, ArgsDeduced&&... args)
// This is super ugly, but it works for now and is the most minimal change
// to the codebase for now...
- : base_t(std::move(arg_functor), (PolicyDeduced &&) arg_policy,
- (ArgsDeduced &&) args...) {}
+ : base_t(std::move(arg_functor), (PolicyDeduced&&)arg_policy,
+ (ArgsDeduced&&)args...),
+ label(std::move(label_)) {}
// FIXME @graph Forward through the instance once that works in the backends
template <class PolicyDeduced>
GraphNodeKernelImpl(Kokkos::Cuda const& ex, Functor arg_functor,
PolicyDeduced&& arg_policy)
- : GraphNodeKernelImpl("", ex, std::move(arg_functor),
- (PolicyDeduced &&) arg_policy) {}
-
- ~GraphNodeKernelImpl() {
- if (m_driver_storage) {
- // We should be the only owner, but this is still the easiest way to
- // allocate and deallocate aligned memory for these sorts of things
- Record::decrement(Record::get_record(m_driver_storage));
- }
- }
+ : GraphNodeKernelImpl("[unlabeled]", ex, std::move(arg_functor),
+ (PolicyDeduced&&)arg_policy) {}
void set_cuda_graph_ptr(cudaGraph_t* arg_graph_ptr) {
m_graph_ptr = arg_graph_ptr;
cudaGraphNode_t* get_cuda_graph_node_ptr() const { return m_graph_node_ptr; }
cudaGraph_t const* get_cuda_graph_ptr() const { return m_graph_ptr; }
- Kokkos::ObservingRawPtr<base_t> allocate_driver_memory_buffer() const {
+ Kokkos::ObservingRawPtr<base_t> allocate_driver_memory_buffer(
+ const CudaSpace& mem) const {
KOKKOS_EXPECTS(m_driver_storage == nullptr)
-
- auto* record = Record::allocate(
- Kokkos::CudaSpace{}, "GraphNodeKernel global memory functor storage",
- sizeof(base_t));
-
- Record::increment(record);
- m_driver_storage = reinterpret_cast<base_t*>(record->data());
+ std::string alloc_label =
+ label + " - GraphNodeKernel global memory functor storage";
+ m_driver_storage = std::shared_ptr<base_t>(
+ static_cast<base_t*>(mem.allocate(alloc_label.c_str(), sizeof(base_t))),
+ [alloc_label, mem](base_t* ptr) {
+ mem.deallocate(alloc_label.c_str(), ptr, sizeof(base_t));
+ });
KOKKOS_ENSURES(m_driver_storage != nullptr)
- return m_driver_storage;
+ return m_driver_storage.get();
}
+
+ auto get_driver_storage() const { return m_driver_storage; }
};
struct CudaGraphNodeAggregateKernel {
class Tag =
typename PatternTagFromImplSpecialization<KernelType>::type>
struct get_graph_node_kernel_type
- : identity<GraphNodeKernelImpl<Kokkos::Cuda, typename KernelType::Policy,
- typename KernelType::functor_type, Tag>> {};
+ : type_identity<
+ GraphNodeKernelImpl<Kokkos::Cuda, typename KernelType::Policy,
+ typename KernelType::functor_type, Tag>> {};
template <class KernelType>
struct get_graph_node_kernel_type<KernelType, Kokkos::ParallelReduceTag>
- : identity<GraphNodeKernelImpl<Kokkos::Cuda, typename KernelType::Policy,
- typename KernelType::functor_type,
- Kokkos::ParallelReduceTag,
- typename KernelType::reducer_type>> {};
+ : type_identity<GraphNodeKernelImpl<
+ Kokkos::Cuda, typename KernelType::Policy,
+ CombinedFunctorReducer<typename KernelType::functor_type,
+ typename KernelType::reducer_type>,
+ Kokkos::ParallelReduceTag>> {};
//==============================================================================
// <editor-fold desc="get_cuda_graph_*() helper functions"> {{{1
template <class KernelType>
-auto* allocate_driver_storage_for_kernel(KernelType const& kernel) {
+auto* allocate_driver_storage_for_kernel(const CudaSpace& mem,
+ KernelType const& kernel) {
using graph_node_kernel_t =
typename get_graph_node_kernel_type<KernelType>::type;
auto const& kernel_as_graph_kernel =
// TODO @graphs we need to somehow indicate the need for a fence in the
// destructor of the GraphImpl object (so that we don't have to
// just always do it)
- return kernel_as_graph_kernel.allocate_driver_memory_buffer();
+ return kernel_as_graph_kernel.allocate_driver_memory_buffer(mem);
}
template <class KernelType>
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_CUDA_GRAPHNODE_IMPL_HPP
#define KOKKOS_KOKKOS_CUDA_GRAPHNODE_IMPL_HPP
#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_CUDA_ENABLE_GRAPHS)
+#if defined(KOKKOS_ENABLE_CUDA)
#include <Kokkos_Graph_fwd.hpp>
#include <impl/Kokkos_GraphImpl.hpp> // GraphAccess needs to be complete
-#include <Kokkos_Cuda.hpp>
-#include <cuda_runtime_api.h>
+#include <Cuda/Kokkos_Cuda.hpp>
namespace Kokkos {
namespace Impl {
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_CUDA_GRAPH_IMPL_HPP
#define KOKKOS_KOKKOS_CUDA_GRAPH_IMPL_HPP
#include <Kokkos_Macros.hpp>
-#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_CUDA_ENABLE_GRAPHS)
+#if defined(KOKKOS_ENABLE_CUDA)
#include <Kokkos_Graph_fwd.hpp>
#include <impl/Kokkos_GraphNodeImpl.hpp>
#include <Cuda/Kokkos_Cuda_GraphNode_Impl.hpp>
-#include <Kokkos_Cuda.hpp>
-#include <cuda_runtime_api.h>
+#include <Cuda/Kokkos_Cuda.hpp>
#include <Cuda/Kokkos_Cuda_Error.hpp>
+#include <Cuda/Kokkos_Cuda_Instance.hpp>
namespace Kokkos {
namespace Impl {
using node_details_t = GraphNodeBackendSpecificDetails<Kokkos::Cuda>;
- void _instantiate_graph() {
+ // Store drivers for the kernel nodes that launch in global memory.
+ // This is required as lifetime of drivers must be bounded to this instance's
+ // lifetime.
+ std::vector<std::shared_ptr<void>> m_driver_storage;
+
+ public:
+ void instantiate() {
+ KOKKOS_EXPECTS(!m_graph_exec);
constexpr size_t error_log_size = 256;
cudaGraphNode_t error_node = nullptr;
char error_log[error_log_size];
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphInstantiate(
- &m_graph_exec, m_graph, &error_node, error_log, error_log_size));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (m_execution_space.impl_internal_space_instance()
+ ->cuda_graph_instantiate_wrapper(&m_graph_exec, m_graph,
+ &error_node, error_log,
+ error_log_size)));
+ KOKKOS_ENSURES(m_graph_exec);
// TODO @graphs print out errors
}
- public:
using root_node_impl_t =
GraphNodeImpl<Kokkos::Cuda, Kokkos::Experimental::TypeErasedTag,
Kokkos::Experimental::TypeErasedTag>;
GraphNodeImpl<Kokkos::Cuda, aggregate_kernel_impl_t,
Kokkos::Experimental::TypeErasedTag>;
- // Not moveable or copyable; it spends its whole life as a shared_ptr in the
+ // Not movable or copyable; it spends its whole life as a shared_ptr in the
// Graph object
- GraphImpl() = delete;
- GraphImpl(GraphImpl const&) = delete;
- GraphImpl(GraphImpl&&) = delete;
+ GraphImpl() = delete;
+ GraphImpl(GraphImpl const&) = delete;
+ GraphImpl(GraphImpl&&) = delete;
GraphImpl& operator=(GraphImpl const&) = delete;
- GraphImpl& operator=(GraphImpl&&) = delete;
+ GraphImpl& operator=(GraphImpl&&) = delete;
~GraphImpl() {
// TODO @graphs we need to somehow indicate the need for a fence in the
// destructor of the GraphImpl object (so that we don't have to
m_execution_space.fence("Kokkos::GraphImpl::~GraphImpl: Graph Destruction");
KOKKOS_EXPECTS(bool(m_graph))
if (bool(m_graph_exec)) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphExecDestroy(m_graph_exec));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (m_execution_space.impl_internal_space_instance()
+ ->cuda_graph_exec_destroy_wrapper(m_graph_exec)));
}
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphDestroy(m_graph));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (m_execution_space.impl_internal_space_instance()
+ ->cuda_graph_destroy_wrapper(m_graph)));
};
explicit GraphImpl(Kokkos::Cuda arg_instance)
: m_execution_space(std::move(arg_instance)) {
KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaGraphCreate(&m_graph, cuda_graph_flags_t{0}));
+ (m_execution_space.impl_internal_space_instance()
+ ->cuda_graph_create_wrapper(&m_graph, cuda_graph_flags_t{0})));
}
void add_node(std::shared_ptr<aggregate_node_impl_t> const& arg_node_ptr) {
// All of the predecessors are just added as normal, so all we need to
// do here is add an empty node
KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaGraphAddEmptyNode(&(arg_node_ptr->node_details_t::node), m_graph,
- /* dependencies = */ nullptr,
- /* numDependencies = */ 0));
+ (m_execution_space.impl_internal_space_instance()
+ ->cuda_graph_add_empty_node_wrapper(
+ &(arg_node_ptr->node_details_t::node), m_graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0)));
}
template <class NodeImpl>
// requires NodeImplPtr is a shared_ptr to specialization of GraphNodeImpl
- // Also requires that the kernel has the graph node tag in it's policy
+ // Also requires that the kernel has the graph node tag in its policy
void add_node(std::shared_ptr<NodeImpl> const& arg_node_ptr) {
- static_assert(
- NodeImpl::kernel_type::Policy::is_graph_kernel::value,
- "Something has gone horribly wrong, but it's too complicated to "
- "explain here. Buy Daisy a coffee and she'll explain it to you.");
+ static_assert(NodeImpl::kernel_type::Policy::is_graph_kernel::value);
KOKKOS_EXPECTS(bool(arg_node_ptr));
// The Kernel launch from the execute() method has been shimmed to insert
// the node into the graph
kernel.set_cuda_graph_node_ptr(&cuda_node);
kernel.execute();
KOKKOS_ENSURES(bool(cuda_node));
+ if (std::shared_ptr<void> tmp = kernel.get_driver_storage())
+ m_driver_storage.push_back(std::move(tmp));
}
template <class NodeImplPtr, class PredecessorRef>
KOKKOS_EXPECTS(bool(cuda_node))
KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaGraphAddDependencies(m_graph, &pred_cuda_node, &cuda_node, 1));
+ (m_execution_space.impl_internal_space_instance()
+ ->cuda_graph_add_dependencies_wrapper(m_graph, &pred_cuda_node,
+ &cuda_node, 1)));
}
- void submit() {
+ void submit(const execution_space& exec) {
if (!bool(m_graph_exec)) {
- _instantiate_graph();
+ instantiate();
}
KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaGraphLaunch(m_graph_exec, m_execution_space.cuda_stream()));
+ (exec.impl_internal_space_instance()->cuda_graph_launch_wrapper(
+ m_graph_exec)));
}
execution_space const& get_execution_space() const noexcept {
auto rv = std::make_shared<root_node_impl_t>(
get_execution_space(), _graph_node_is_root_ctor_tag{});
KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaGraphAddEmptyNode(&(rv->node_details_t::node), m_graph,
- /* dependencies = */ nullptr,
- /* numDependencies = */ 0));
+ (m_execution_space.impl_internal_space_instance()
+ ->cuda_graph_add_empty_node_wrapper(&(rv->node_details_t::node),
+ m_graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0)));
KOKKOS_ENSURES(bool(rv->node_details_t::node))
return rv;
}
m_execution_space, _graph_node_kernel_ctor_tag{},
aggregate_kernel_impl_t{});
}
+
+ cudaGraph_t cuda_graph() { return m_graph; }
+ cudaGraphExec_t cuda_graph_exec() { return m_graph_exec; }
};
} // end namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_HALF_HPP_
#define KOKKOS_CUDA_HALF_HPP_
#ifdef KOKKOS_IMPL_CUDA_HALF_TYPE_DEFINED
#include <Kokkos_Half.hpp>
-#include <Kokkos_NumericTraits.hpp> // reduction_identity
+#include <Kokkos_ReductionIdentity.hpp>
#if CUDA_VERSION >= 11000
#include <cuda_bf16.h>
/************************** bhalf conversions *********************************/
// Go in this branch if CUDA version is >= 11.0.0 and less than 11.1.0 or if the
-// architecture is not Ampere
+// architecture is older than Ampere
+#if !defined(KOKKOS_ARCH_KEPLER) && !defined(KOKKOS_ARCH_MAXWELL) && \
+ !defined(KOKKOS_ARCH_PASCAL) && !defined(KOKKOS_ARCH_VOLTA) && \
+ !defined(KOKKOS_ARCH_TURING75)
+#define KOKKOS_IMPL_NVIDIA_GPU_ARCH_SUPPORT_BHALF
+#endif
+
#if CUDA_VERSION >= 11000 && \
- (CUDA_VERSION < 11010 || !defined(KOKKOS_ARCH_AMPERE))
+ (CUDA_VERSION < 11010 || \
+ !defined(KOKKOS_IMPL_NVIDIA_GPU_ARCH_SUPPORT_BHALF))
KOKKOS_INLINE_FUNCTION
bhalf_t cast_to_bhalf(bhalf_t val) { return val; }
}
#endif // CUDA_VERSION >= 11000 && CUDA_VERSION < 11010
-#if CUDA_VERSION >= 11010 && \
- ((defined(KOKKOS_ARCH_AMPERE80) || defined(KOKKOS_ARCH_AMPERE86)))
+#if CUDA_VERSION >= 11010 && defined(KOKKOS_IMPL_NVIDIA_GPU_ARCH_SUPPORT_BHALF)
KOKKOS_INLINE_FUNCTION
bhalf_t cast_to_bhalf(bhalf_t val) { return val; }
KOKKOS_INLINE_FUNCTION
return static_cast<T>(cast_from_bhalf<unsigned long long>(val));
}
#endif // CUDA_VERSION >= 11010
+
+#undef KOKKOS_IMPL_NVIDIA_GPU_ARCH_SUPPORT_BHALF
} // namespace Experimental
#if (CUDA_VERSION >= 11000)
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_HALF_IMPL_TYPE_HPP_
+#define KOKKOS_CUDA_HALF_IMPL_TYPE_HPP_
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+#if !(defined(KOKKOS_COMPILER_CLANG) && KOKKOS_COMPILER_CLANG < 900) && \
+ !(defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL50) || \
+ defined(KOKKOS_ARCH_MAXWELL52))
+#include <cuda_fp16.h>
+#if (CUDA_VERSION >= 11000)
+#include <cuda_bf16.h>
+#endif // CUDA_VERSION >= 11000
+
+#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
+// Make sure no one else tries to define half_t
+#define KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_CUDA_HALF_TYPE_DEFINED
+
+namespace Kokkos {
+namespace Impl {
+struct half_impl_t {
+ using type = __half;
+};
+#if (CUDA_VERSION >= 11000)
+#define KOKKOS_IMPL_BHALF_TYPE_DEFINED
+struct bhalf_impl_t {
+ using type = __nv_bfloat16;
+};
+#endif // CUDA_VERSION >= 11000
+} // namespace Impl
+} // namespace Kokkos
+#endif // KOKKOS_IMPL_HALF_TYPE_DEFINED
+#endif // Disables for half_t on cuda:
+ // Clang/8||KEPLER30||KEPLER32||KEPLER37||MAXWELL50||MAXWELL52
+#endif // KOKKOS_ENABLE_CUDA
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+/*--------------------------------------------------------------------------*/
+/* Kokkos interfaces */
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <Kokkos_Core.hpp>
+
+// #include <Cuda/Kokkos_Cuda_Error.hpp>
+// #include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+// #include <Cuda/Kokkos_Cuda_Instance.hpp>
+// #include <Cuda/Kokkos_Cuda_UniqueToken.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_CheckedIntegerOps.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+/*--------------------------------------------------------------------------*/
+/* Standard 'C' libraries */
+#include <cstdlib>
+
+/* Standard 'C++' libraries */
+#include <vector>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+namespace Kokkos {
+namespace Impl {
+
+bool CudaInternal::kokkos_impl_cuda_use_serial_execution_v = false;
+
+void CudaInternal::cuda_set_serial_execution(bool val) {
+ CudaInternal::kokkos_impl_cuda_use_serial_execution_v = val;
+}
+bool CudaInternal::cuda_use_serial_execution() {
+ return CudaInternal::kokkos_impl_cuda_use_serial_execution_v;
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+void kokkos_impl_cuda_set_serial_execution(bool val) {
+ Kokkos::Impl::CudaInternal::cuda_set_serial_execution(val);
+}
+bool kokkos_impl_cuda_use_serial_execution() {
+ return Kokkos::Impl::CudaInternal::cuda_use_serial_execution();
+}
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+
+__device__ __constant__ unsigned long kokkos_impl_cuda_constant_memory_buffer
+ [Kokkos::Impl::CudaTraits::ConstantMemoryUsage / sizeof(unsigned long)];
+
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+namespace {
+
+__global__ void query_cuda_kernel_arch(int *d_arch) {
+#ifdef _NVHPC_CUDA
+ *d_arch = __builtin_current_device_sm() * 10;
+#else
+#if defined(__CUDA_ARCH__)
+ *d_arch = __CUDA_ARCH__;
+#else
+ *d_arch = 0;
+#endif
+#endif
+}
+
+/** Query what compute capability is actually launched to the device: */
+int cuda_kernel_arch(int device_id) {
+ int arch = 0;
+ int *d_arch = nullptr;
+
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(device_id));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaMalloc(reinterpret_cast<void **>(&d_arch), sizeof(int)));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaMemcpy(d_arch, &arch, sizeof(int), cudaMemcpyDefault));
+
+ query_cuda_kernel_arch<<<1, 1>>>(d_arch);
+
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaMemcpy(&arch, d_arch, sizeof(int), cudaMemcpyDefault));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(d_arch));
+ return arch;
+}
+
+constexpr auto sizeScratchGrain =
+ sizeof(Cuda::size_type[Impl::CudaTraits::WarpSize]);
+
+std::size_t scratch_count(const std::size_t size) {
+ return (size + sizeScratchGrain - 1) / sizeScratchGrain;
+}
+
+} // namespace
+
+Kokkos::View<uint32_t *, Kokkos::CudaSpace> cuda_global_unique_token_locks(
+ bool deallocate) {
+ static Kokkos::View<uint32_t *, Kokkos::CudaSpace> locks =
+ Kokkos::View<uint32_t *, Kokkos::CudaSpace>();
+ if (!deallocate && locks.extent(0) == 0)
+ locks = Kokkos::View<uint32_t *, Kokkos::CudaSpace>(
+ "Kokkos::UniqueToken<Cuda>::m_locks", Kokkos::Cuda().concurrency());
+ if (deallocate) locks = Kokkos::View<uint32_t *, Kokkos::CudaSpace>();
+ return locks;
+}
+
+void cuda_device_synchronize(const std::string &name) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
+ name,
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ GlobalDeviceSynchronization,
+#if defined(KOKKOS_COMPILER_CLANG)
+ // annotate with __host__ silence a clang warning about using
+ // cudaDeviceSynchronize in device code
+ [] __host__()
+#else
+ []()
+#endif
+ {
+ for (int cuda_device : Kokkos::Impl::CudaInternal::cuda_devices) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(cuda_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
+ }
+ });
+}
+
+void cuda_stream_synchronize(const cudaStream_t stream, const CudaInternal *ptr,
+ const std::string &name) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
+ name,
+ Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
+ ptr->impl_get_instance_id()},
+ [&]() {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (ptr->cuda_stream_synchronize_wrapper(stream)));
+ });
+}
+
+void cuda_internal_error_throw(cudaError e, const char *name, const char *file,
+ const int line) {
+ std::ostringstream out;
+ out << name << " error( " << cudaGetErrorName(e)
+ << "): " << cudaGetErrorString(e);
+ if (file) {
+ out << " " << file << ":" << line;
+ }
+ throw_runtime_exception(out.str());
+}
+
+void cuda_internal_error_abort(cudaError e, const char *name, const char *file,
+ const int line) {
+ std::ostringstream out;
+ out << name << " error( " << cudaGetErrorName(e)
+ << "): " << cudaGetErrorString(e);
+ if (file) {
+ out << " " << file << ":" << line;
+ }
+ // FIXME Call Kokkos::Impl::host_abort instead of Kokkos::abort to avoid a
+ // warning about Kokkos::abort returning in some cases.
+ host_abort(out.str().c_str());
+}
+
+//----------------------------------------------------------------------------
+
+int Impl::CudaInternal::concurrency() {
+ static int const concurrency = m_deviceProp.maxThreadsPerMultiProcessor *
+ m_deviceProp.multiProcessorCount;
+ return concurrency;
+}
+
+void CudaInternal::print_configuration(std::ostream &s) const {
+#if defined(KOKKOS_ENABLE_CUDA)
+ s << "macro KOKKOS_ENABLE_CUDA : defined\n";
+#endif
+#if defined(CUDA_VERSION)
+ s << "macro CUDA_VERSION = " << CUDA_VERSION << " = version "
+ << CUDA_VERSION / 1000 << "." << (CUDA_VERSION % 1000) / 10 << '\n';
+#endif
+
+ for (int i : get_visible_devices()) {
+ cudaDeviceProp prop;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceProperties(&prop, i));
+ s << "Kokkos::Cuda[ " << i << " ] " << prop.name << " capability "
+ << prop.major << "." << prop.minor
+ << ", Total Global Memory: " << human_memory_size(prop.totalGlobalMem)
+ << ", Shared Memory per Block: "
+ << human_memory_size(prop.sharedMemPerBlock);
+ if (m_cudaDev == i) s << " : Selected";
+ s << '\n';
+ }
+}
+
+//----------------------------------------------------------------------------
+
+CudaInternal::~CudaInternal() {
+ if (m_scratchSpace || m_scratchFlags || m_scratchUnified) {
+ std::cerr << "Kokkos::Cuda ERROR: Failed to call Kokkos::Cuda::finalize()"
+ << std::endl;
+ }
+
+ m_scratchSpaceCount = 0;
+ m_scratchFlagsCount = 0;
+ m_scratchUnifiedCount = 0;
+ m_scratchSpace = nullptr;
+ m_scratchFlags = nullptr;
+ m_scratchUnified = nullptr;
+ m_stream = nullptr;
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ m_team_scratch_current_size[i] = 0;
+ m_team_scratch_ptr[i] = nullptr;
+ }
+}
+
+int CudaInternal::verify_is_initialized(const char *const label) const {
+ if (m_cudaDev < 0) {
+ Kokkos::abort((std::string("Kokkos::Cuda::") + label +
+ " : ERROR device not initialized\n")
+ .c_str());
+ }
+ return 0 <= m_cudaDev;
+}
+uint32_t CudaInternal::impl_get_instance_id() const { return m_instance_id; }
+CudaInternal &CudaInternal::singleton() {
+ static CudaInternal self;
+ return self;
+}
+void CudaInternal::fence(const std::string &name) const {
+ Impl::cuda_stream_synchronize(get_stream(), this, name);
+}
+void CudaInternal::fence() const {
+ fence("Kokkos::CudaInternal::fence(): Unnamed Instance Fence");
+}
+
+void CudaInternal::initialize(cudaStream_t stream) {
+ KOKKOS_EXPECTS(!is_initialized());
+
+ if (was_finalized)
+ Kokkos::abort("Calling Cuda::initialize after Cuda::finalize is illegal\n");
+ was_initialized = true;
+
+ // Check that the device associated with the stream matches cuda_device
+ CUcontext context;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaError_t(cuStreamGetCtx(stream, &context)));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaError_t(cuCtxPushCurrent(context)));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaError_t(cuCtxGetDevice(&m_cudaDev)));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_cudaDev));
+
+ m_stream = stream;
+ CudaInternal::cuda_devices.insert(m_cudaDev);
+
+ // Allocate a staging buffer for constant mem in pinned host memory
+ // and an event to avoid overwriting driver for previous kernel launches
+ if (!constantMemHostStagingPerDevice[m_cudaDev])
+ KOKKOS_IMPL_CUDA_SAFE_CALL((cuda_malloc_host_wrapper(
+ reinterpret_cast<void **>(&constantMemHostStagingPerDevice[m_cudaDev]),
+ CudaTraits::ConstantMemoryUsage)));
+
+ if (!constantMemReusablePerDevice[m_cudaDev])
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (cuda_event_create_wrapper(&constantMemReusablePerDevice[m_cudaDev])));
+
+ //----------------------------------
+ // Multiblock reduction uses scratch flags for counters
+ // and scratch space for partial reduction values.
+ // Allocate some initial space. This will grow as needed.
+
+ {
+ // Maximum number of warps,
+ // at most one warp per thread in a warp for reduction.
+ auto const maxWarpCount = std::min<unsigned>(
+ m_deviceProp.maxThreadsPerBlock / CudaTraits::WarpSize,
+ CudaTraits::WarpSize);
+ unsigned const reduce_block_count =
+ maxWarpCount * Impl::CudaTraits::WarpSize;
+
+ (void)scratch_unified(16 * sizeof(size_type));
+ (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type));
+ (void)scratch_space(reduce_block_count * 16 * sizeof(size_type));
+ }
+
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ m_team_scratch_current_size[i] = 0;
+ m_team_scratch_ptr[i] = nullptr;
+ }
+
+ m_num_scratch_locks = concurrency();
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (cuda_malloc_wrapper(reinterpret_cast<void **>(&m_scratch_locks),
+ sizeof(int32_t) * m_num_scratch_locks)));
+ KOKKOS_IMPL_CUDA_SAFE_CALL((cuda_memset_wrapper(
+ m_scratch_locks, 0, sizeof(int32_t) * m_num_scratch_locks)));
+}
+
+//----------------------------------------------------------------------------
+
+Cuda::size_type *CudaInternal::scratch_flags(const std::size_t size) const {
+ if (verify_is_initialized("scratch_flags") &&
+ m_scratchFlagsCount < scratch_count(size)) {
+ auto mem_space = Kokkos::CudaSpace::impl_create(m_cudaDev, m_stream);
+
+ if (m_scratchFlags) {
+ mem_space.deallocate(m_scratchFlags,
+ m_scratchFlagsCount * sizeScratchGrain);
+ }
+
+ m_scratchFlagsCount = scratch_count(size);
+
+ std::size_t alloc_size =
+ multiply_overflow_abort(m_scratchFlagsCount, sizeScratchGrain);
+ m_scratchFlags = static_cast<size_type *>(
+ mem_space.allocate("Kokkos::InternalScratchFlags", alloc_size));
+
+ // We only zero-initialize the allocation when we actually allocate.
+ // It's the responsibility of the features using scratch_flags,
+ // namely parallel_reduce and parallel_scan, to reset the used values to 0.
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (cuda_memset_wrapper(m_scratchFlags, 0, alloc_size)));
+ }
+
+ return m_scratchFlags;
+}
+
+Cuda::size_type *CudaInternal::scratch_space(const std::size_t size) const {
+ if (verify_is_initialized("scratch_space") &&
+ m_scratchSpaceCount < scratch_count(size)) {
+ auto mem_space = Kokkos::CudaSpace::impl_create(m_cudaDev, m_stream);
+
+ if (m_scratchSpace) {
+ mem_space.deallocate(m_scratchSpace,
+ m_scratchSpaceCount * sizeScratchGrain);
+ }
+
+ m_scratchSpaceCount = scratch_count(size);
+
+ std::size_t alloc_size =
+ multiply_overflow_abort(m_scratchSpaceCount, sizeScratchGrain);
+ m_scratchSpace = static_cast<size_type *>(
+ mem_space.allocate("Kokkos::InternalScratchSpace", alloc_size));
+ }
+
+ return m_scratchSpace;
+}
+
+Cuda::size_type *CudaInternal::scratch_unified(const std::size_t size) const {
+ if (verify_is_initialized("scratch_unified") &&
+ m_scratchUnifiedCount < scratch_count(size)) {
+ auto mem_space =
+ Kokkos::CudaHostPinnedSpace::impl_create(m_cudaDev, m_stream);
+
+ if (m_scratchUnified) {
+ mem_space.deallocate(m_scratchUnified,
+ m_scratchUnifiedCount * sizeScratchGrain);
+ }
+
+ m_scratchUnifiedCount = scratch_count(size);
+
+ std::size_t alloc_size =
+ multiply_overflow_abort(m_scratchUnifiedCount, sizeScratchGrain);
+ m_scratchUnified = static_cast<size_type *>(
+ mem_space.allocate("Kokkos::InternalScratchUnified", alloc_size));
+ }
+
+ return m_scratchUnified;
+}
+
+Cuda::size_type *CudaInternal::scratch_functor(const std::size_t size) const {
+ if (verify_is_initialized("scratch_functor") && m_scratchFunctorSize < size) {
+ auto mem_space = Kokkos::CudaSpace::impl_create(m_cudaDev, m_stream);
+
+ if (m_scratchFunctor) {
+ mem_space.deallocate(m_scratchFunctor, m_scratchFunctorSize);
+ }
+
+ m_scratchFunctorSize = size;
+
+ m_scratchFunctor = static_cast<size_type *>(mem_space.allocate(
+ "Kokkos::InternalScratchFunctor", m_scratchFunctorSize));
+ }
+
+ return m_scratchFunctor;
+}
+
+int CudaInternal::acquire_team_scratch_space() {
+ int current_team_scratch = 0;
+ int zero = 0;
+ while (!m_team_scratch_pool[current_team_scratch].compare_exchange_weak(
+ zero, 1, std::memory_order_release, std::memory_order_relaxed)) {
+ current_team_scratch = (current_team_scratch + 1) % m_n_team_scratch;
+ }
+
+ return current_team_scratch;
+}
+
+void *CudaInternal::resize_team_scratch_space(int scratch_pool_id,
+ std::int64_t bytes,
+ bool force_shrink) {
+ // Multiple ParallelFor/Reduce Teams can call this function at the same time
+ // and invalidate the m_team_scratch_ptr. We use a pool to avoid any race
+ // condition.
+ auto mem_space = Kokkos::CudaSpace::impl_create(m_cudaDev, m_stream);
+ if (m_team_scratch_current_size[scratch_pool_id] == 0) {
+ m_team_scratch_current_size[scratch_pool_id] = bytes;
+ m_team_scratch_ptr[scratch_pool_id] =
+ mem_space.allocate("Kokkos::CudaSpace::TeamScratchMemory",
+ m_team_scratch_current_size[scratch_pool_id]);
+ }
+ if ((bytes > m_team_scratch_current_size[scratch_pool_id]) ||
+ ((bytes < m_team_scratch_current_size[scratch_pool_id]) &&
+ (force_shrink))) {
+ mem_space.deallocate(m_team_scratch_ptr[scratch_pool_id],
+ m_team_scratch_current_size[scratch_pool_id]);
+ m_team_scratch_current_size[scratch_pool_id] = bytes;
+ m_team_scratch_ptr[scratch_pool_id] =
+ mem_space.allocate("Kokkos::CudaSpace::TeamScratchMemory", bytes);
+ }
+ return m_team_scratch_ptr[scratch_pool_id];
+}
+
+void CudaInternal::release_team_scratch_space(int scratch_pool_id) {
+ m_team_scratch_pool[scratch_pool_id] = 0;
+}
+
+//----------------------------------------------------------------------------
+
+void CudaInternal::finalize() {
+ // skip if finalize() has already been called
+ if (was_finalized) return;
+
+ was_finalized = true;
+
+ auto cuda_mem_space = Kokkos::CudaSpace::impl_create(m_cudaDev, m_stream);
+ if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {
+ auto host_mem_space =
+ Kokkos::CudaHostPinnedSpace::impl_create(m_cudaDev, m_stream);
+ cuda_mem_space.deallocate(m_scratchFlags,
+ m_scratchFlagsCount * sizeScratchGrain);
+ cuda_mem_space.deallocate(m_scratchSpace,
+ m_scratchSpaceCount * sizeScratchGrain);
+ host_mem_space.deallocate(m_scratchUnified,
+ m_scratchUnifiedCount * sizeScratchGrain);
+ if (m_scratchFunctorSize > 0) {
+ cuda_mem_space.deallocate(m_scratchFunctor, m_scratchFunctorSize);
+ }
+ }
+
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ if (m_team_scratch_current_size[i] > 0)
+ cuda_mem_space.deallocate(m_team_scratch_ptr[i],
+ m_team_scratch_current_size[i]);
+ }
+
+ m_scratchSpaceCount = 0;
+ m_scratchFlagsCount = 0;
+ m_scratchUnifiedCount = 0;
+ m_scratchSpace = nullptr;
+ m_scratchFlags = nullptr;
+ m_scratchUnified = nullptr;
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ m_team_scratch_current_size[i] = 0;
+ m_team_scratch_ptr[i] = nullptr;
+ }
+
+ KOKKOS_IMPL_CUDA_SAFE_CALL((cuda_free_wrapper(m_scratch_locks)));
+ m_scratch_locks = nullptr;
+ m_num_scratch_locks = 0;
+}
+
+//----------------------------------------------------------------------------
+
+Cuda::size_type *cuda_internal_scratch_space(const Cuda &instance,
+ const std::size_t size) {
+ return instance.impl_internal_space_instance()->scratch_space(size);
+}
+
+Cuda::size_type *cuda_internal_scratch_flags(const Cuda &instance,
+ const std::size_t size) {
+ return instance.impl_internal_space_instance()->scratch_flags(size);
+}
+
+Cuda::size_type *cuda_internal_scratch_unified(const Cuda &instance,
+ const std::size_t size) {
+ return instance.impl_internal_space_instance()->scratch_unified(size);
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+int Cuda::concurrency() {
+#else
+int Cuda::concurrency() const {
+#endif
+ return Impl::CudaInternal::concurrency();
+}
+
+int Cuda::impl_is_initialized() {
+ return Impl::CudaInternal::singleton().is_initialized();
+}
+
+void Cuda::impl_initialize(InitializationSettings const &settings) {
+ const std::vector<int> &visible_devices = Impl::get_visible_devices();
+ const int cuda_device_id =
+ Impl::get_gpu(settings).value_or(visible_devices[0]);
+
+ cudaDeviceProp cudaProp;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaGetDeviceProperties(&cudaProp, cuda_device_id));
+ Impl::CudaInternal::m_deviceProp = cudaProp;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(cuda_device_id));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
+
+ // Query what compute capability architecture a kernel executes:
+ Impl::CudaInternal::m_cudaArch = Impl::cuda_kernel_arch(cuda_device_id);
+
+ if (Impl::CudaInternal::m_cudaArch == 0) {
+ Kokkos::abort(
+ "Kokkos::Cuda::initialize ERROR: likely mismatch of architecture\n");
+ }
+
+ int compiled_major = Impl::CudaInternal::m_cudaArch / 100;
+ int compiled_minor = (Impl::CudaInternal::m_cudaArch % 100) / 10;
+
+ if ((compiled_major > cudaProp.major) ||
+ ((compiled_major == cudaProp.major) &&
+ (compiled_minor > cudaProp.minor))) {
+ std::stringstream ss;
+ ss << "Kokkos::Cuda::initialize ERROR: running kernels compiled for "
+ "compute capability "
+ << compiled_major << "." << compiled_minor
+ << " on device with compute capability " << cudaProp.major << "."
+ << cudaProp.minor << " is not supported by CUDA!\n";
+ std::string msg = ss.str();
+ Kokkos::abort(msg.c_str());
+ }
+ if (Kokkos::show_warnings() &&
+ (compiled_major != cudaProp.major || compiled_minor != cudaProp.minor)) {
+ std::cerr << "Kokkos::Cuda::initialize WARNING: running kernels compiled "
+ "for compute capability "
+ << compiled_major << "." << compiled_minor
+ << " on device with compute capability " << cudaProp.major << "."
+ << cudaProp.minor
+ << " , this will likely reduce potential performance."
+ << std::endl;
+ }
+
+ //----------------------------------
+
+#ifdef KOKKOS_ENABLE_CUDA_UVM
+ const char *env_force_device_alloc =
+ getenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC");
+ bool force_device_alloc;
+ if (env_force_device_alloc == nullptr)
+ force_device_alloc = false;
+ else
+ force_device_alloc = std::stoi(env_force_device_alloc) != 0;
+
+ const char *env_visible_devices = getenv("CUDA_VISIBLE_DEVICES");
+ bool visible_devices_one = true;
+ if (env_visible_devices == nullptr) visible_devices_one = false;
+
+ if (Kokkos::show_warnings() &&
+ (!visible_devices_one && !force_device_alloc)) {
+ std::cerr << R"warning(
+Kokkos::Cuda::initialize WARNING: Cuda is allocating into UVMSpace by default
+ without setting CUDA_MANAGED_FORCE_DEVICE_ALLOC=1 or
+ setting CUDA_VISIBLE_DEVICES.
+ This could on multi GPU systems lead to severe performance"
+ penalties.)warning"
+ << std::endl;
+ }
+#endif
+
+ //----------------------------------
+
+#ifdef KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY
+ // Check if unified memory is available
+ int cuda_result;
+ cudaDeviceGetAttribute(&cuda_result, cudaDevAttrConcurrentManagedAccess,
+ cuda_device_id);
+ if (cuda_result == 0) {
+ Kokkos::abort(
+ "Kokkos::Cuda::initialize ERROR: Unified memory is not available on "
+ "this device\n"
+ "Please recompile Kokkos with "
+ "-DKokkos_ENABLE_IMPL_CUDA_UNIFIED_MEMORY=OFF\n");
+ }
+#endif
+
+ //----------------------------------
+
+ cudaStream_t singleton_stream;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(cuda_device_id));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamCreate(&singleton_stream));
+
+ // Init the array for used for arbitrarily sized atomics
+ desul::Impl::init_lock_arrays(); // FIXME
+
+ Impl::CudaInternal::singleton().initialize(singleton_stream);
+}
+
+void Cuda::impl_finalize() {
+ (void)Impl::cuda_global_unique_token_locks(true);
+ desul::Impl::finalize_lock_arrays(); // FIXME
+
+ for (const auto cuda_device : Kokkos::Impl::CudaInternal::cuda_devices) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(cuda_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaFreeHost(Kokkos::Impl::CudaInternal::constantMemHostStagingPerDevice
+ [cuda_device]));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaEventDestroy(
+ Kokkos::Impl::CudaInternal::constantMemReusablePerDevice[cuda_device]));
+ }
+
+ auto &deep_copy_space = Impl::cuda_get_deep_copy_space(/*initialize*/ false);
+ if (deep_copy_space)
+ deep_copy_space->impl_internal_space_instance()->finalize();
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaStreamDestroy(Impl::cuda_get_deep_copy_stream()));
+
+ Impl::CudaInternal::singleton().finalize();
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ cudaStreamDestroy(Impl::CudaInternal::singleton().m_stream));
+}
+
+Cuda::Cuda()
+ : m_space_instance(&Impl::CudaInternal::singleton(),
+ [](Impl::CudaInternal *) {}) {
+ Impl::CudaInternal::singleton().verify_is_initialized(
+ "Cuda instance constructor");
+}
+
+KOKKOS_DEPRECATED Cuda::Cuda(cudaStream_t stream, bool manage_stream)
+ : Cuda(stream,
+ manage_stream ? Impl::ManageStream::yes : Impl::ManageStream::no) {}
+
+Cuda::Cuda(cudaStream_t stream, Impl::ManageStream manage_stream)
+ : m_space_instance(
+ new Impl::CudaInternal, [manage_stream](Impl::CudaInternal *ptr) {
+ ptr->finalize();
+ if (static_cast<bool>(manage_stream)) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamDestroy(ptr->m_stream));
+ }
+ delete ptr;
+ }) {
+ Impl::CudaInternal::singleton().verify_is_initialized(
+ "Cuda instance constructor");
+ m_space_instance->initialize(stream);
+}
+
+void Cuda::print_configuration(std::ostream &os, bool /*verbose*/) const {
+ os << "Device Execution Space:\n";
+ os << " KOKKOS_ENABLE_CUDA: yes\n";
+
+ os << "Cuda Options:\n";
+ os << " KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE: ";
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+ os << "yes\n";
+#else
+ os << "no\n";
+#endif
+ os << " KOKKOS_ENABLE_CUDA_UVM: ";
+#ifdef KOKKOS_ENABLE_CUDA_UVM
+ os << "yes\n";
+#else
+ os << "no\n";
+#endif
+ os << " KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC: ";
+#ifdef KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC
+ os << "yes\n";
+#else
+ os << "no\n";
+#endif
+#ifdef KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY
+ os << " KOKKOS_ENABLE_IMPL_CUDA_UNIFIED_MEMORY: ";
+ os << "yes\n";
+#endif
+
+ os << "\nCuda Runtime Configuration:\n";
+
+ m_space_instance->print_configuration(os);
+}
+
+void Cuda::impl_static_fence(const std::string &name) {
+ Kokkos::Impl::cuda_device_synchronize(name);
+}
+
+void Cuda::fence(const std::string &name) const {
+ m_space_instance->fence(name);
+}
+
+const char *Cuda::name() { return "Cuda"; }
+uint32_t Cuda::impl_instance_id() const noexcept {
+ return m_space_instance->impl_get_instance_id();
+}
+
+cudaStream_t Cuda::cuda_stream() const {
+ return m_space_instance->get_stream();
+}
+int Cuda::cuda_device() const { return m_space_instance->m_cudaDev; }
+const cudaDeviceProp &Cuda::cuda_device_prop() const {
+ return m_space_instance->m_deviceProp;
+}
+
+namespace Impl {
+
+int g_cuda_space_factory_initialized =
+ initialize_space_factory<Cuda>("150_Cuda");
+
+int CudaInternal::m_cudaArch = -1;
+cudaDeviceProp CudaInternal::m_deviceProp;
+std::set<int> CudaInternal::cuda_devices = {};
+std::map<int, unsigned long *> CudaInternal::constantMemHostStagingPerDevice =
+ {};
+std::map<int, cudaEvent_t> CudaInternal::constantMemReusablePerDevice = {};
+std::map<int, std::mutex> CudaInternal::constantMemMutexPerDevice = {};
+
+} // namespace Impl
+
+} // namespace Kokkos
+
+void Kokkos::Impl::create_Cuda_instances(std::vector<Cuda> &instances) {
+ for (int s = 0; s < int(instances.size()); s++) {
+ cudaStream_t stream;
+ KOKKOS_IMPL_CUDA_SAFE_CALL((
+ instances[s].impl_internal_space_instance()->cuda_stream_create_wrapper(
+ &stream)));
+ instances[s] = Cuda(stream, ManageStream::yes);
+ }
+}
+
+#else
+
+void KOKKOS_CORE_SRC_CUDA_IMPL_PREVENT_LINK_ERROR() {}
+
+#endif // KOKKOS_ENABLE_CUDA
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_INSTANCE_HPP_
+#define KOKKOS_CUDA_INSTANCE_HPP_
+
+#include <vector>
+#include <impl/Kokkos_Tools.hpp>
+#include <atomic>
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+#include <cuda_runtime_api.h>
+#include "Kokkos_CudaSpace.hpp"
+
+#include <set>
+#include <map>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// These functions fulfill the purpose of allowing to work around
+// a suspected system software issue, or to check for race conditions.
+// They are not currently a fully officially supported capability.
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+extern "C" void kokkos_impl_cuda_set_serial_execution(bool);
+extern "C" bool kokkos_impl_cuda_use_serial_execution();
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+struct CudaTraits {
+ static constexpr CudaSpace::size_type WarpSize = 32 /* 0x0020 */;
+ static constexpr CudaSpace::size_type WarpIndexMask =
+ 0x001f; /* Mask for warpindex */
+ static constexpr CudaSpace::size_type WarpIndexShift =
+ 5; /* WarpSize == 1 << WarpShift */
+
+ static constexpr CudaSpace::size_type ConstantMemoryUsage =
+ 0x008000; /* 32k bytes */
+ static constexpr CudaSpace::size_type ConstantMemoryCache =
+ 0x002000; /* 8k bytes */
+ static constexpr CudaSpace::size_type KernelArgumentLimit =
+ 0x001000; /* 4k bytes */
+ static constexpr CudaSpace::size_type MaxHierarchicalParallelism =
+ 1024; /* team_size * vector_length */
+ using ConstantGlobalBufferType =
+ unsigned long[ConstantMemoryUsage / sizeof(unsigned long)];
+
+ static constexpr int ConstantMemoryUseThreshold = 0x000200 /* 512 bytes */;
+};
+
+//----------------------------------------------------------------------------
+
+CudaSpace::size_type* cuda_internal_scratch_flags(const Cuda&,
+ const std::size_t size);
+CudaSpace::size_type* cuda_internal_scratch_space(const Cuda&,
+ const std::size_t size);
+CudaSpace::size_type* cuda_internal_scratch_unified(const Cuda&,
+ const std::size_t size);
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+namespace Kokkos {
+namespace Impl {
+
+class CudaInternal {
+ private:
+ CudaInternal(const CudaInternal&);
+ CudaInternal& operator=(const CudaInternal&);
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+ static bool kokkos_impl_cuda_use_serial_execution_v;
+#endif
+
+ public:
+ using size_type = Cuda::size_type;
+
+ int m_cudaDev = -1;
+
+ // Device Properties
+ static int m_cudaArch;
+ static int concurrency();
+
+ static cudaDeviceProp m_deviceProp;
+
+ // Scratch Spaces for Reductions
+ mutable std::size_t m_scratchSpaceCount;
+ mutable std::size_t m_scratchFlagsCount;
+ mutable std::size_t m_scratchUnifiedCount;
+ mutable std::size_t m_scratchFunctorSize;
+
+ mutable size_type* m_scratchSpace;
+ mutable size_type* m_scratchFlags;
+ mutable size_type* m_scratchUnified;
+ mutable size_type* m_scratchFunctor;
+ cudaStream_t m_stream;
+ uint32_t m_instance_id;
+
+ // Team Scratch Level 1 Space
+ int m_n_team_scratch = 10;
+ mutable int64_t m_team_scratch_current_size[10];
+ mutable void* m_team_scratch_ptr[10];
+ mutable std::atomic_int m_team_scratch_pool[10];
+ int32_t* m_scratch_locks;
+ size_t m_num_scratch_locks;
+
+ bool was_initialized = false;
+ bool was_finalized = false;
+
+ static std::set<int> cuda_devices;
+ static std::map<int, unsigned long*> constantMemHostStagingPerDevice;
+ static std::map<int, cudaEvent_t> constantMemReusablePerDevice;
+ static std::map<int, std::mutex> constantMemMutexPerDevice;
+
+ static CudaInternal& singleton();
+
+ int verify_is_initialized(const char* const label) const;
+
+ int is_initialized() const {
+ return nullptr != m_scratchSpace && nullptr != m_scratchFlags;
+ }
+
+ void initialize(cudaStream_t stream);
+ void finalize();
+
+ void print_configuration(std::ostream&) const;
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+ static bool cuda_use_serial_execution();
+ static void cuda_set_serial_execution(bool);
+#endif
+
+ void fence(const std::string&) const;
+ void fence() const;
+
+ ~CudaInternal();
+
+ CudaInternal()
+ : m_scratchSpaceCount(0),
+ m_scratchFlagsCount(0),
+ m_scratchUnifiedCount(0),
+ m_scratchFunctorSize(0),
+ m_scratchSpace(nullptr),
+ m_scratchFlags(nullptr),
+ m_scratchUnified(nullptr),
+ m_scratchFunctor(nullptr),
+ m_stream(nullptr),
+ m_instance_id(
+ Kokkos::Tools::Experimental::Impl::idForInstance<Kokkos::Cuda>(
+ reinterpret_cast<uintptr_t>(this))) {
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ m_team_scratch_current_size[i] = 0;
+ m_team_scratch_ptr[i] = nullptr;
+ m_team_scratch_pool[i] = 0;
+ }
+ }
+
+ // Using cudaAPI function/objects will be w.r.t. device 0 unless
+ // cudaSetDevice(device_id) is called with the correct device_id.
+ // The correct device_id is stored in the variable
+ // CudaInternal::m_cudaDev set in Cuda::impl_initialize(). It is not
+ // sufficient to call cudaSetDevice(m_cudaDev) during cuda initialization
+ // only, however, since if a user creates a new thread, that thread will be
+ // given the default cuda env with device_id=0, causing errors when
+ // device_id!=0 is requested by the user. To ensure against this, almost all
+ // cudaAPI calls, as well as using cudaStream_t variables, must be proceeded
+ // by cudaSetDevice(device_id).
+
+ // This function sets device in cudaAPI to device requested at runtime (set in
+ // m_cudaDev).
+ void set_cuda_device() const {
+ verify_is_initialized("set_cuda_device");
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_cudaDev));
+ }
+
+ // Return the class stream, optionally setting the device id.
+ template <bool setCudaDevice = true>
+ cudaStream_t get_stream() const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return m_stream;
+ }
+
+ // The following are wrappers for cudaAPI functions (C and C++ routines) which
+ // set the correct device id directly before the cudaAPI call (unless
+ // explicitly disabled by providing setCudaDevice=false template).
+ // setCudaDevice=true should be used for all API calls which take a stream
+ // unless it is guarenteed to be from a cuda instance with the correct device
+ // set already (e.g., back-to-back cudaAPI calls in a single function). For
+ // cudaAPI functions that take a stream, an optional input stream is
+ // available. If no stream is given, the stream for the CudaInternal instance
+ // is used. All cudaAPI calls should be wrapped in these interface functions
+ // to ensure safety when using threads.
+
+ // Helper function for selecting the correct input stream
+ cudaStream_t get_input_stream(cudaStream_t s) const {
+ return s == nullptr ? get_stream<false>() : s;
+ }
+
+ // C API routines
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_device_get_limit_wrapper(size_t* pValue,
+ cudaLimit limit) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaDeviceGetLimit(pValue, limit);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_device_set_limit_wrapper(cudaLimit limit,
+ size_t value) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaDeviceSetLimit(limit, value);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_event_create_wrapper(cudaEvent_t* event) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaEventCreate(event);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_event_destroy_wrapper(cudaEvent_t event) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaEventDestroy(event);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_event_record_wrapper(cudaEvent_t event,
+ cudaStream_t stream = nullptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaEventRecord(event, get_input_stream(stream));
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_event_synchronize_wrapper(cudaEvent_t event) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaEventSynchronize(event);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_free_wrapper(void* devPtr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaFree(devPtr);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_free_host_wrapper(void* ptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaFreeHost(ptr);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_add_dependencies_wrapper(
+ cudaGraph_t graph, const cudaGraphNode_t* from, const cudaGraphNode_t* to,
+ size_t numDependencies) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphAddDependencies(graph, from, to, numDependencies);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_add_empty_node_wrapper(
+ cudaGraphNode_t* pGraphNode, cudaGraph_t graph,
+ const cudaGraphNode_t* pDependencies, size_t numDependencies) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphAddEmptyNode(pGraphNode, graph, pDependencies,
+ numDependencies);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_add_kernel_node_wrapper(
+ cudaGraphNode_t* pGraphNode, cudaGraph_t graph,
+ const cudaGraphNode_t* pDependencies, size_t numDependencies,
+ const cudaKernelNodeParams* pNodeParams) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphAddKernelNode(pGraphNode, graph, pDependencies,
+ numDependencies, pNodeParams);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_create_wrapper(cudaGraph_t* pGraph,
+ unsigned int flags) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphCreate(pGraph, flags);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_destroy_wrapper(cudaGraph_t graph) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphDestroy(graph);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_exec_destroy_wrapper(cudaGraphExec_t graphExec) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphExecDestroy(graphExec);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_launch_wrapper(cudaGraphExec_t graphExec,
+ cudaStream_t stream = nullptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphLaunch(graphExec, get_input_stream(stream));
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_host_alloc_wrapper(void** pHost, size_t size,
+ unsigned int flags) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaHostAlloc(pHost, size, flags);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_malloc_wrapper(void** devPtr, size_t size) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMalloc(devPtr, size);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_malloc_host_wrapper(void** ptr, size_t size) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMallocHost(ptr, size);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_malloc_managed_wrapper(
+ void** devPtr, size_t size,
+ unsigned int flags = cudaMemAttachGlobal) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMallocManaged(devPtr, size, flags);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_mem_advise_wrapper(const void* devPtr, size_t count,
+ cudaMemoryAdvise advice,
+ int device) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMemAdvise(devPtr, count, advice, device);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_mem_prefetch_async_wrapper(
+ const void* devPtr, size_t count, int dstDevice,
+ cudaStream_t stream = nullptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMemPrefetchAsync(devPtr, count, dstDevice,
+ get_input_stream(stream));
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_memcpy_wrapper(void* dst, const void* src, size_t count,
+ cudaMemcpyKind kind) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMemcpy(dst, src, count, kind);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_memcpy_async_wrapper(void* dst, const void* src,
+ size_t count, cudaMemcpyKind kind,
+ cudaStream_t stream = nullptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMemcpyAsync(dst, src, count, kind, get_input_stream(stream));
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_memcpy_to_symbol_async_wrapper(
+ const void* symbol, const void* src, size_t count, size_t offset,
+ cudaMemcpyKind kind, cudaStream_t stream = nullptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMemcpyToSymbolAsync(symbol, src, count, offset, kind,
+ get_input_stream(stream));
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_memset_wrapper(void* devPtr, int value, size_t count) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMemset(devPtr, value, count);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_memset_async_wrapper(void* devPtr, int value, size_t count,
+ cudaStream_t stream = nullptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaMemsetAsync(devPtr, value, count, get_input_stream(stream));
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_pointer_get_attributes_wrapper(
+ cudaPointerAttributes* attributes, const void* ptr) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaPointerGetAttributes(attributes, ptr);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_stream_create_wrapper(cudaStream_t* pStream) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaStreamCreate(pStream);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_stream_destroy_wrapper(cudaStream_t stream) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaStreamDestroy(stream);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_stream_synchronize_wrapper(cudaStream_t stream) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaStreamSynchronize(stream);
+ }
+
+ // C++ API routines
+ template <typename T, bool setCudaDevice = true>
+ cudaError_t cuda_func_get_attributes_wrapper(cudaFuncAttributes* attr,
+ T* entry) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaFuncGetAttributes(attr, entry);
+ }
+
+ template <typename T, bool setCudaDevice = true>
+ cudaError_t cuda_func_set_attribute_wrapper(T* entry, cudaFuncAttribute attr,
+ int value) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaFuncSetAttribute(entry, attr, value);
+ }
+
+ template <bool setCudaDevice = true>
+ cudaError_t cuda_graph_instantiate_wrapper(cudaGraphExec_t* pGraphExec,
+ cudaGraph_t graph,
+ cudaGraphNode_t* pErrorNode,
+ char* pLogBuffer,
+ size_t bufferSize) const {
+ if constexpr (setCudaDevice) set_cuda_device();
+ return cudaGraphInstantiate(pGraphExec, graph, pErrorNode, pLogBuffer,
+ bufferSize);
+ }
+
+ // Resizing of reduction related scratch spaces
+ size_type* scratch_space(const std::size_t size) const;
+ size_type* scratch_flags(const std::size_t size) const;
+ size_type* scratch_unified(const std::size_t size) const;
+ size_type* scratch_functor(const std::size_t size) const;
+ uint32_t impl_get_instance_id() const;
+ int acquire_team_scratch_space();
+ // Resizing of team level 1 scratch
+ void* resize_team_scratch_space(int scratch_pool_id, std::int64_t bytes,
+ bool force_shrink = false);
+ void release_team_scratch_space(int scratch_pool_id);
+};
+
+void create_Cuda_instances(std::vector<Cuda>& instances);
+} // Namespace Impl
+
+namespace Experimental {
+// Partitioning an Execution Space: expects space and integer arguments for
+// relative weight
+// Customization point for backends
+// Default behavior is to return the passed in instance
+
+template <class... Args>
+std::vector<Cuda> partition_space(const Cuda&, Args...) {
+ static_assert(
+ (... && std::is_arithmetic_v<Args>),
+ "Kokkos Error: partitioning arguments must be integers or floats");
+ std::vector<Cuda> instances(sizeof...(Args));
+ Kokkos::Impl::create_Cuda_instances(instances);
+ return instances;
+}
+
+template <class T>
+std::vector<Cuda> partition_space(const Cuda&, std::vector<T> const& weights) {
+ static_assert(
+ std::is_arithmetic<T>::value,
+ "Kokkos Error: partitioning arguments must be integers or floats");
+
+ // We only care about the number of instances to create and ignore weights
+ // otherwise.
+ std::vector<Cuda> instances(weights.size());
+ Kokkos::Impl::create_Cuda_instances(instances);
+ return instances;
+}
+} // namespace Experimental
+
+} // Namespace Kokkos
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDAEXEC_HPP
#define KOKKOS_CUDAEXEC_HPP
#ifdef KOKKOS_ENABLE_CUDA
#include <mutex>
-#include <string>
#include <cstdint>
#include <cmath>
#include <Kokkos_Parallel.hpp>
#include <impl/Kokkos_Error.hpp>
#include <Cuda/Kokkos_Cuda_abort.hpp>
#include <Cuda/Kokkos_Cuda_Error.hpp>
-#include <Cuda/Kokkos_Cuda_Locks.hpp>
#include <Cuda/Kokkos_Cuda_Instance.hpp>
#include <impl/Kokkos_GraphImpl_fwd.hpp>
#include <Cuda/Kokkos_Cuda_GraphNodeKernel.hpp>
// __launch_bounds__(maxThreadsPerBlock,minBlocksPerMultiprocessor)
// function qualifier which could be used to improve performance.
//----------------------------------------------------------------------------
-// Maximize L1 cache and minimize shared memory:
-// cudaFuncSetCacheConfig(MyKernel, cudaFuncCachePreferL1 );
-// For 2.0 capability: 48 KB L1 and 16 KB shared
-//----------------------------------------------------------------------------
template <class DriverType>
__global__ static void cuda_parallel_launch_constant_memory() {
}
inline void check_shmem_request(CudaInternal const* cuda_instance, int shmem) {
- if (cuda_instance->m_maxShmemPerBlock < shmem) {
+ int const maxShmemPerBlock = cuda_instance->m_deviceProp.sharedMemPerBlock;
+ if (maxShmemPerBlock < shmem) {
Kokkos::Impl::throw_runtime_exception(
- std::string("CudaParallelLaunch (or graph node creation) FAILED: shared"
- " memory request is too large"));
+ "CudaParallelLaunch (or graph node creation) FAILED: shared memory "
+ "request is too large");
}
}
-// This function needs to be template on DriverType and LaunchBounds
+// These functions need to be templated on DriverType and LaunchBounds
// so that the static bool is unique for each type combo
// KernelFuncPtr does not necessarily contain that type information.
template <class DriverType, class LaunchBounds, class KernelFuncPtr>
-inline void configure_shmem_preference(KernelFuncPtr const& func,
- bool prefer_shmem) {
+const cudaFuncAttributes& get_cuda_kernel_func_attributes(
+ int cuda_device, const KernelFuncPtr& func) {
+ // Only call cudaFuncGetAttributes once for each unique kernel
+ // by leveraging static variable initialization rules
+ static std::map<int, cudaFuncAttributes> func_attr;
+ if (func_attr.find(cuda_device) == func_attr.end()) {
+ cudaFuncAttributes attr;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(cuda_device));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFuncGetAttributes(&attr, func));
+ func_attr.emplace(cuda_device, attr);
+ }
+ return func_attr[cuda_device];
+}
+
+template <class DriverType, class LaunchBounds, class KernelFuncPtr>
+inline void configure_shmem_preference(const int cuda_device,
+ const KernelFuncPtr& func,
+ const cudaDeviceProp& device_props,
+ const size_t block_size, int& shmem,
+ const size_t occupancy) {
#ifndef KOKKOS_ARCH_KEPLER
- // On Kepler the L1 has no benefit since it doesn't cache reads
+
+ const auto& func_attr =
+ get_cuda_kernel_func_attributes<DriverType, LaunchBounds>(cuda_device,
+ func);
+
+ // Compute limits for number of blocks due to registers/SM
+ const size_t regs_per_sm = device_props.regsPerMultiprocessor;
+ const size_t regs_per_thread = func_attr.numRegs;
+ // The granularity of register allocation is chunks of 256 registers per warp
+ // -> 8 registers per thread
+ const size_t allocated_regs_per_thread = 8 * ((regs_per_thread + 8 - 1) / 8);
+ size_t max_blocks_regs =
+ regs_per_sm / (allocated_regs_per_thread * block_size);
+
+ // Compute the maximum number of warps as a function of the number of
+ // registers
+ const size_t max_warps_per_sm_registers =
+ cuda_max_warps_per_sm_registers(device_props, func_attr);
+
+ // Correct the number of blocks to respect the maximum number of warps per
+ // SM, which is constrained to be a multiple of the warp allocation
+ // granularity defined in `cuda_warp_per_sm_allocation_granularity`.
+ while ((max_blocks_regs * block_size / device_props.warpSize) >
+ max_warps_per_sm_registers)
+ max_blocks_regs--;
+
+ // Compute how many threads per sm we actually want
+ const size_t max_threads_per_sm = device_props.maxThreadsPerMultiProcessor;
+ // only allocate multiples of warp size
+ const size_t num_threads_desired =
+ ((max_threads_per_sm * occupancy / 100 + 31) / 32) * 32;
+ // Get close to the desired occupancy,
+ // don't undershoot by much but also don't allocate a whole new block just
+ // because one is a few threads over otherwise.
+ size_t num_blocks_desired =
+ (num_threads_desired + block_size * 0.8) / block_size;
+ num_blocks_desired = ::std::min(max_blocks_regs, num_blocks_desired);
+ if (num_blocks_desired == 0) num_blocks_desired = 1;
+
+ // Calculate how much shared memory we need per block
+ size_t shmem_per_block = shmem + func_attr.sharedSizeBytes;
+
+ // The minimum shared memory allocation we can have in total per SM is 8kB.
+ // If we want to lower occupancy we have to make sure we request at least that
+ // much in aggregate over all blocks, so that shared memory actually becomes a
+ // limiting factor for occupancy
+ constexpr size_t min_shmem_size_per_sm = 8192;
+ if ((occupancy < 100) &&
+ (shmem_per_block * num_blocks_desired < min_shmem_size_per_sm)) {
+ shmem_per_block = min_shmem_size_per_sm / num_blocks_desired;
+ // Need to set the caller's shmem variable so that the
+ // kernel launch uses the correct dynamic shared memory request
+ shmem = shmem_per_block - func_attr.sharedSizeBytes;
+ }
+
+ // Compute the carveout fraction we need based on occupancy
+ // Use multiples of 8kB
+ const size_t max_shmem_per_sm = device_props.sharedMemPerMultiprocessor;
+ size_t carveout = shmem_per_block == 0
+ ? 0
+ : 100 *
+ (((num_blocks_desired * shmem_per_block +
+ min_shmem_size_per_sm - 1) /
+ min_shmem_size_per_sm) *
+ min_shmem_size_per_sm) /
+ max_shmem_per_sm;
+ if (carveout > 100) carveout = 100;
+
+ // Set the carveout, but only call it once per kernel or when it changes
+ // FIXME_CUDA_MULTIPLE_DEVICES
auto set_cache_config = [&] {
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFuncSetCacheConfig(
- func,
- (prefer_shmem ? cudaFuncCachePreferShared : cudaFuncCachePreferL1)));
- return prefer_shmem;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (CudaInternal::singleton().cuda_func_set_attribute_wrapper(
+ func, cudaFuncAttributePreferredSharedMemoryCarveout, carveout)));
+ return carveout;
};
- static bool cache_config_preference_cached = set_cache_config();
- if (cache_config_preference_cached != prefer_shmem) {
+ // Store the value in a static variable so we only reset if needed
+ static size_t cache_config_preference_cached = set_cache_config();
+ if (cache_config_preference_cached != carveout) {
cache_config_preference_cached = set_cache_config();
}
#else
// Use the parameters so we don't get a warning
(void)func;
- (void)prefer_shmem;
+ (void)device_props;
+ (void)block_size;
+ (void)occupancy;
#endif
}
-template <class Policy>
-std::enable_if_t<Policy::experimental_contains_desired_occupancy>
-modify_launch_configuration_if_desired_occupancy_is_specified(
- Policy const& policy, cudaDeviceProp const& properties,
- cudaFuncAttributes const& attributes, dim3 const& block, int& shmem,
- bool& prefer_shmem) {
- int const block_size = block.x * block.y * block.z;
- int const desired_occupancy = policy.impl_get_desired_occupancy().value();
-
- size_t const shmem_per_sm_prefer_l1 = get_shmem_per_sm_prefer_l1(properties);
- size_t const static_shmem = attributes.sharedSizeBytes;
-
- // round to nearest integer and avoid division by zero
- int active_blocks = std::max(
- 1, static_cast<int>(std::round(
- static_cast<double>(properties.maxThreadsPerMultiProcessor) /
- block_size * desired_occupancy / 100)));
- int const dynamic_shmem =
- shmem_per_sm_prefer_l1 / active_blocks - static_shmem;
-
- if (dynamic_shmem > shmem) {
- shmem = dynamic_shmem;
- prefer_shmem = false;
- }
-}
-
-template <class Policy>
-std::enable_if_t<!Policy::experimental_contains_desired_occupancy>
-modify_launch_configuration_if_desired_occupancy_is_specified(
- Policy const&, cudaDeviceProp const&, cudaFuncAttributes const&,
- dim3 const& /*block*/, int& /*shmem*/, bool& /*prefer_shmem*/) {}
-
// </editor-fold> end Some helper functions for launch code readability }}}1
//==============================================================================
static void invoke_kernel(DriverType const& driver, dim3 const& grid,
dim3 const& block, int shmem,
CudaInternal const* cuda_instance) {
- (base_t::
- get_kernel_func())<<<grid, block, shmem, cuda_instance->m_stream>>>(
- driver);
+ (base_t::get_kernel_func())<<<grid, block, shmem,
+ cuda_instance->get_stream()>>>(driver);
}
-#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
inline static void create_parallel_launch_graph_node(
DriverType const& driver, dim3 const& grid, dim3 const& block, int shmem,
- CudaInternal const* cuda_instance, bool prefer_shmem) {
+ CudaInternal const* cuda_instance) {
//----------------------------------------
auto const& graph = Impl::get_cuda_graph_from_kernel(driver);
KOKKOS_EXPECTS(bool(graph));
if (!Impl::is_empty_launch(grid, block)) {
Impl::check_shmem_request(cuda_instance, shmem);
- Impl::configure_shmem_preference<DriverType, LaunchBounds>(
- base_t::get_kernel_func(), prefer_shmem);
+ if constexpr (DriverType::Policy::
+ experimental_contains_desired_occupancy) {
+ int desired_occupancy =
+ driver.get_policy().impl_get_desired_occupancy().value();
+ size_t block_size = block.x * block.y * block.z;
+ Impl::configure_shmem_preference<DriverType, LaunchBounds>(
+ cuda_instance->m_cudaDev, base_t::get_kernel_func(),
+ cuda_instance->m_deviceProp, block_size, shmem, desired_occupancy);
+ }
void const* args[] = {&driver};
params.kernelParams = (void**)args;
params.extra = nullptr;
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphAddKernelNode(
- &graph_node, graph, /* dependencies = */ nullptr,
- /* numDependencies = */ 0, ¶ms));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (cuda_instance->cuda_graph_add_kernel_node_wrapper(
+ &graph_node, graph, /* dependencies = */ nullptr,
+ /* numDependencies = */ 0, ¶ms)));
} else {
// We still need an empty node for the dependency structure
KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaGraphAddEmptyNode(&graph_node, graph,
- /* dependencies = */ nullptr,
- /* numDependencies = */ 0));
+ (cuda_instance->cuda_graph_add_empty_node_wrapper(
+ &graph_node, graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0)));
}
KOKKOS_ENSURES(bool(graph_node))
}
-#endif
};
// </editor-fold> end local memory }}}2
DriverType* driver_ptr = reinterpret_cast<DriverType*>(
cuda_instance->scratch_functor(sizeof(DriverType)));
- cudaMemcpyAsync(driver_ptr, &driver, sizeof(DriverType), cudaMemcpyDefault,
- cuda_instance->m_stream);
- (base_t::
- get_kernel_func())<<<grid, block, shmem, cuda_instance->m_stream>>>(
- driver_ptr);
+ KOKKOS_IMPL_CUDA_SAFE_CALL((cuda_instance->cuda_memcpy_async_wrapper(
+ driver_ptr, &driver, sizeof(DriverType), cudaMemcpyDefault)));
+ (base_t::get_kernel_func())<<<grid, block, shmem,
+ cuda_instance->get_stream()>>>(driver_ptr);
}
-#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
inline static void create_parallel_launch_graph_node(
DriverType const& driver, dim3 const& grid, dim3 const& block, int shmem,
- CudaInternal const* cuda_instance, bool prefer_shmem) {
+ CudaInternal const* cuda_instance) {
//----------------------------------------
auto const& graph = Impl::get_cuda_graph_from_kernel(driver);
KOKKOS_EXPECTS(bool(graph));
if (!Impl::is_empty_launch(grid, block)) {
Impl::check_shmem_request(cuda_instance, shmem);
- Impl::configure_shmem_preference<DriverType, LaunchBounds>(
- base_t::get_kernel_func(), prefer_shmem);
-
- auto* driver_ptr = Impl::allocate_driver_storage_for_kernel(driver);
+ if constexpr (DriverType::Policy::
+ experimental_contains_desired_occupancy) {
+ int desired_occupancy =
+ driver.get_policy().impl_get_desired_occupancy().value();
+ size_t block_size = block.x * block.y * block.z;
+ Impl::configure_shmem_preference<DriverType, LaunchBounds>(
+ cuda_instance->m_cudaDev, base_t::get_kernel_func(),
+ cuda_instance->m_deviceProp, block_size, shmem, desired_occupancy);
+ }
+
+ auto* driver_ptr = Impl::allocate_driver_storage_for_kernel(
+ CudaSpace::impl_create(cuda_instance->m_cudaDev,
+ cuda_instance->m_stream),
+ driver);
// Unlike in the non-graph case, we can get away with doing an async copy
// here because the `DriverType` instance is held in the GraphNodeImpl
// which is guaranteed to be alive until the graph instance itself is
// destroyed, where there should be a fence ensuring that the allocation
// associated with this kernel on the device side isn't deleted.
- cudaMemcpyAsync(driver_ptr, &driver, sizeof(DriverType),
- cudaMemcpyDefault, cuda_instance->m_stream);
+ KOKKOS_IMPL_CUDA_SAFE_CALL((cuda_instance->cuda_memcpy_async_wrapper(
+ driver_ptr, &driver, sizeof(DriverType), cudaMemcpyDefault)));
void const* args[] = {&driver_ptr};
params.kernelParams = (void**)args;
params.extra = nullptr;
- KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphAddKernelNode(
- &graph_node, graph, /* dependencies = */ nullptr,
- /* numDependencies = */ 0, ¶ms));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (cuda_instance->cuda_graph_add_kernel_node_wrapper(
+ &graph_node, graph, /* dependencies = */ nullptr,
+ /* numDependencies = */ 0, ¶ms)));
} else {
// We still need an empty node for the dependency structure
KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaGraphAddEmptyNode(&graph_node, graph,
- /* dependencies = */ nullptr,
- /* numDependencies = */ 0));
+ (cuda_instance->cuda_graph_add_empty_node_wrapper(
+ &graph_node, graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0)));
}
KOKKOS_ENSURES(bool(graph_node))
}
-#endif
};
// </editor-fold> end Global Memory }}}2
static void invoke_kernel(DriverType const& driver, dim3 const& grid,
dim3 const& block, int shmem,
CudaInternal const* cuda_instance) {
+ int cuda_device = cuda_instance->m_cudaDev;
// Wait until the previous kernel that uses the constant buffer is done
- std::lock_guard<std::mutex> lock(CudaInternal::constantMemMutex);
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaEventSynchronize(CudaInternal::constantMemReusable));
+ std::lock_guard<std::mutex> lock(
+ CudaInternal::constantMemMutexPerDevice[cuda_device]);
+ KOKKOS_IMPL_CUDA_SAFE_CALL((cuda_instance->cuda_event_synchronize_wrapper(
+ CudaInternal::constantMemReusablePerDevice[cuda_device])));
// Copy functor (synchronously) to staging buffer in pinned host memory
- unsigned long* staging = cuda_instance->constantMemHostStaging;
+ unsigned long* staging =
+ cuda_instance->constantMemHostStagingPerDevice[cuda_device];
memcpy(staging, &driver, sizeof(DriverType));
// Copy functor asynchronously from there to constant memory on the device
- cudaMemcpyToSymbolAsync(kokkos_impl_cuda_constant_memory_buffer, staging,
- sizeof(DriverType), 0, cudaMemcpyHostToDevice,
- cudaStream_t(cuda_instance->m_stream));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (cuda_instance->cuda_memcpy_to_symbol_async_wrapper(
+ kokkos_impl_cuda_constant_memory_buffer, staging,
+ sizeof(DriverType), 0, cudaMemcpyHostToDevice)));
// Invoke the driver function on the device
- (base_t::
- get_kernel_func())<<<grid, block, shmem, cuda_instance->m_stream>>>();
+ (base_t::get_kernel_func())<<<grid, block, shmem,
+ cuda_instance->get_stream()>>>();
// Record an event that says when the constant buffer can be reused
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaEventRecord(CudaInternal::constantMemReusable,
- cudaStream_t(cuda_instance->m_stream)));
+ KOKKOS_IMPL_CUDA_SAFE_CALL((cuda_instance->cuda_event_record_wrapper(
+ CudaInternal::constantMemReusablePerDevice[cuda_device])));
}
-#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
inline static void create_parallel_launch_graph_node(
DriverType const& driver, dim3 const& grid, dim3 const& block, int shmem,
- CudaInternal const* cuda_instance, bool prefer_shmem) {
+ CudaInternal const* cuda_instance) {
// Just use global memory; coordinating through events to share constant
// memory with the non-graph interface is not really reasonable since
// events don't work with Graphs directly, and this would anyway require
DriverType, LaunchBounds,
Experimental::CudaLaunchMechanism::GlobalMemory>;
global_launch_impl_t::create_parallel_launch_graph_node(
- driver, grid, block, shmem, cuda_instance, prefer_shmem);
+ driver, grid, block, shmem, cuda_instance);
}
-#endif
};
// </editor-fold> end Constant Memory }}}2
inline static void launch_kernel(const DriverType& driver, const dim3& grid,
const dim3& block, int shmem,
- const CudaInternal* cuda_instance,
- bool prefer_shmem) {
+ const CudaInternal* cuda_instance) {
if (!Impl::is_empty_launch(grid, block)) {
// Prevent multiple threads to simultaneously set the cache configuration
// preference and launch the same kernel
Impl::check_shmem_request(cuda_instance, shmem);
- // If a desired occupancy is specified, we compute how much shared memory
- // to ask for to achieve that occupancy, assuming that the cache
- // configuration is `cudaFuncCachePreferL1`. If the amount of dynamic
- // shared memory computed is actually smaller than `shmem` we overwrite
- // `shmem` and set `prefer_shmem` to `false`.
- modify_launch_configuration_if_desired_occupancy_is_specified(
- driver.get_policy(), cuda_instance->m_deviceProp,
- get_cuda_func_attributes(), block, shmem, prefer_shmem);
+ if constexpr (DriverType::Policy::
+ experimental_contains_desired_occupancy) {
+ int desired_occupancy =
+ driver.get_policy().impl_get_desired_occupancy().value();
+ size_t block_size = block.x * block.y * block.z;
+ Impl::configure_shmem_preference<
+ DriverType,
+ Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>>(
+ cuda_instance->m_cudaDev, base_t::get_kernel_func(),
+ cuda_instance->m_deviceProp, block_size, shmem, desired_occupancy);
+ }
- Impl::configure_shmem_preference<
- DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>>(
- base_t::get_kernel_func(), prefer_shmem);
-
- ensure_cuda_lock_arrays_on_device();
+ desul::ensure_cuda_lock_arrays_on_device();
// Invoke the driver function on the device
base_t::invoke_kernel(driver, grid, block, shmem, cuda_instance);
}
}
- static cudaFuncAttributes get_cuda_func_attributes() {
- // Race condition inside of cudaFuncGetAttributes if the same address is
- // given requires using a local variable as input instead of a static Rely
- // on static variable initialization to make sure only one thread executes
- // the code and the result is visible.
- auto wrap_get_attributes = []() -> cudaFuncAttributes {
- cudaFuncAttributes attr_tmp;
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaFuncGetAttributes(&attr_tmp, base_t::get_kernel_func()));
- return attr_tmp;
- };
- static cudaFuncAttributes attr = wrap_get_attributes();
- return attr;
+ static cudaFuncAttributes get_cuda_func_attributes(int cuda_device) {
+ return get_cuda_kernel_func_attributes<
+ DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>>(
+ cuda_device, base_t::get_kernel_func());
}
};
template <class DriverType, class LaunchBounds = Kokkos::LaunchBounds<>,
Experimental::CudaLaunchMechanism LaunchMechanism =
DeduceCudaLaunchMechanism<DriverType>::launch_mechanism,
- bool DoGraph = DriverType::Policy::is_graph_kernel::value
-#ifndef KOKKOS_CUDA_ENABLE_GRAPHS
- && false
-#endif
- >
+ bool DoGraph = DriverType::Policy::is_graph_kernel::value>
struct CudaParallelLaunch;
// General launch mechanism
CudaParallelLaunchImpl<DriverType, LaunchBounds, LaunchMechanism>;
template <class... Args>
CudaParallelLaunch(Args&&... args) {
- base_t::launch_kernel((Args &&) args...);
+ base_t::launch_kernel((Args&&)args...);
}
};
-#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
// Launch mechanism for creating graph nodes
template <class DriverType, class LaunchBounds,
Experimental::CudaLaunchMechanism LaunchMechanism>
CudaParallelLaunchImpl<DriverType, LaunchBounds, LaunchMechanism>;
template <class... Args>
CudaParallelLaunch(Args&&... args) {
- base_t::create_parallel_launch_graph_node((Args &&) args...);
+ base_t::create_parallel_launch_graph_node((Args&&)args...);
}
};
-#endif
// </editor-fold> end CudaParallelLaunch }}}1
//==============================================================================
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_MDRANGEPOLICY_HPP_
+#define KOKKOS_CUDA_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+
+template <>
+struct default_outer_direction<Kokkos::Cuda> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+template <>
+struct default_inner_direction<Kokkos::Cuda> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+namespace Impl {
+
+// Settings for MDRangePolicy
+template <>
+inline TileSizeProperties get_tile_size_properties<Kokkos::Cuda>(
+ const Kokkos::Cuda& space) {
+ TileSizeProperties properties;
+ properties.max_threads = space.impl_internal_space_instance()
+ ->m_deviceProp.maxThreadsPerMultiProcessor;
+ properties.default_largest_tile_size = 16;
+ properties.default_tile_size = 2;
+ properties.max_total_tile_size = 512;
+ return properties;
+}
+
+// Settings for TeamMDRangePolicy
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, Cuda, ThreadAndVector>
+ : AcceleratorBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // Namespace Impl
+} // Namespace Kokkos
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_PARALLEL_MD_RANGE_HPP
#define KOKKOS_CUDA_PARALLEL_MD_RANGE_HPP
#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
#include <Cuda/Kokkos_Cuda_ReduceScan.hpp>
#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
-#include <Kokkos_MinMaxClamp.hpp>
#include <impl/Kokkos_Tools.hpp>
#include <typeinfo>
namespace Kokkos {
namespace Impl {
+template <typename ParallelType, typename Policy, typename LaunchBounds>
+int max_tile_size_product_helper(const Policy& pol, const LaunchBounds&) {
+ cudaFuncAttributes attr =
+ CudaParallelLaunch<ParallelType, LaunchBounds>::get_cuda_func_attributes(
+ pol.space().cuda_device());
+ auto const& prop = pol.space().cuda_device_prop();
+
+ // Limits due to registers/SM, MDRange doesn't have
+ // shared memory constraints
+ int const optimal_block_size =
+ cuda_get_opt_block_size_no_shmem(prop, attr, LaunchBounds{});
+
+ // Compute how many blocks of this size we can launch, based on warp
+ // constraints
+ int const max_warps_per_sm_registers =
+ Kokkos::Impl::cuda_max_warps_per_sm_registers(prop, attr);
+ int const max_num_threads_from_warps =
+ max_warps_per_sm_registers * prop.warpSize;
+ int const max_num_blocks = max_num_threads_from_warps / optimal_block_size;
+
+ // Compute the total number of threads
+ int const max_threads_per_sm = optimal_block_size * max_num_blocks;
+
+ return std::min(
+ max_threads_per_sm,
+ static_cast<int>(Kokkos::Impl::CudaTraits::MaxHierarchicalParallelism));
+}
+
template <class FunctorType, class... Traits>
class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>, Kokkos::Cuda> {
public:
public:
template <typename Policy, typename Functor>
static int max_tile_size_product(const Policy& pol, const Functor&) {
- cudaFuncAttributes attr =
- CudaParallelLaunch<ParallelFor,
- LaunchBounds>::get_cuda_func_attributes();
- auto const& prop = pol.space().cuda_device_prop();
- // Limits due to registers/SM, MDRange doesn't have
- // shared memory constraints
- int const regs_per_sm = prop.regsPerMultiprocessor;
- int const regs_per_thread = attr.numRegs;
- int const max_threads_per_sm = regs_per_sm / regs_per_thread;
- return std::min(
- max_threads_per_sm,
- static_cast<int>(Kokkos::Impl::CudaTraits::MaxHierarchicalParallelism));
+ return max_tile_size_product_helper<ParallelFor>(pol, LaunchBounds{});
}
Policy const& get_policy() const { return m_rp; }
inline __device__ void operator()() const {
inline void execute() const {
if (m_rp.m_num_tiles == 0) return;
- const auto maxblocks = cuda_internal_maximum_grid_count();
+ const auto maxblocks = m_rp.space().cuda_device_prop().maxGridSize;
+ const auto maxthreads = m_rp.space().cuda_device_prop().maxThreadsDim;
+ [[maybe_unused]] const auto maxThreadsPerBlock =
+ m_rp.space().cuda_device_prop().maxThreadsPerBlock;
+ // make sure the Z dimension (it is less than x,y limits) isn't exceeded
+ const auto clampZ = [&](const int input) {
+ return (input > maxthreads[2] ? maxthreads[2] : input);
+ };
+ // make sure the block dimensions don't exceed the max number of threads
+ // allowed
+ const auto check_block_sizes = [&]([[maybe_unused]] const dim3& block) {
+ KOKKOS_ASSERT(block.x > 0 &&
+ block.x <= static_cast<unsigned int>(maxthreads[0]));
+ KOKKOS_ASSERT(block.y > 0 &&
+ block.y <= static_cast<unsigned int>(maxthreads[1]));
+ KOKKOS_ASSERT(block.z > 0 &&
+ block.z <= static_cast<unsigned int>(maxthreads[2]));
+ KOKKOS_ASSERT(block.x * block.y * block.z <=
+ static_cast<unsigned int>(maxThreadsPerBlock));
+ };
+ // make sure the grid dimensions don't exceed the max number of blocks
+ // allowed
+ const auto check_grid_sizes = [&]([[maybe_unused]] const dim3& grid) {
+ KOKKOS_ASSERT(grid.x > 0 &&
+ grid.x <= static_cast<unsigned int>(maxblocks[0]));
+ KOKKOS_ASSERT(grid.y > 0 &&
+ grid.y <= static_cast<unsigned int>(maxblocks[1]));
+ KOKKOS_ASSERT(grid.z > 0 &&
+ grid.z <= static_cast<unsigned int>(maxblocks[2]));
+ };
if (RP::rank == 2) {
const dim3 block(m_rp.m_tile[0], m_rp.m_tile[1], 1);
- KOKKOS_ASSERT(block.x > 0);
- KOKKOS_ASSERT(block.y > 0);
+ check_block_sizes(block);
const dim3 grid(
std::min<array_index_type>(
(m_rp.m_upper[0] - m_rp.m_lower[0] + block.x - 1) / block.x,
(m_rp.m_upper[1] - m_rp.m_lower[1] + block.y - 1) / block.y,
maxblocks[1]),
1);
+ check_grid_sizes(grid);
CudaParallelLaunch<ParallelFor, LaunchBounds>(
- *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
- false);
+ *this, grid, block, 0, m_rp.space().impl_internal_space_instance());
} else if (RP::rank == 3) {
- const dim3 block(m_rp.m_tile[0], m_rp.m_tile[1], m_rp.m_tile[2]);
- KOKKOS_ASSERT(block.x > 0);
- KOKKOS_ASSERT(block.y > 0);
- KOKKOS_ASSERT(block.z > 0);
+ const dim3 block(m_rp.m_tile[0], m_rp.m_tile[1], clampZ(m_rp.m_tile[2]));
+ check_block_sizes(block);
const dim3 grid(
std::min<array_index_type>(
(m_rp.m_upper[0] - m_rp.m_lower[0] + block.x - 1) / block.x,
std::min<array_index_type>(
(m_rp.m_upper[2] - m_rp.m_lower[2] + block.z - 1) / block.z,
maxblocks[2]));
+ // ensure we don't exceed the capability of the device
+ check_grid_sizes(grid);
CudaParallelLaunch<ParallelFor, LaunchBounds>(
- *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
- false);
+ *this, grid, block, 0, m_rp.space().impl_internal_space_instance());
} else if (RP::rank == 4) {
// id0,id1 encoded within threadIdx.x; id2 to threadIdx.y; id3 to
// threadIdx.z
const dim3 block(m_rp.m_tile[0] * m_rp.m_tile[1], m_rp.m_tile[2],
- m_rp.m_tile[3]);
- KOKKOS_ASSERT(block.y > 0);
- KOKKOS_ASSERT(block.z > 0);
+ clampZ(m_rp.m_tile[3]));
+ check_block_sizes(block);
const dim3 grid(
std::min<array_index_type>(m_rp.m_tile_end[0] * m_rp.m_tile_end[1],
maxblocks[0]),
std::min<array_index_type>(
(m_rp.m_upper[3] - m_rp.m_lower[3] + block.z - 1) / block.z,
maxblocks[2]));
+ check_grid_sizes(grid);
CudaParallelLaunch<ParallelFor, LaunchBounds>(
- *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
- false);
+ *this, grid, block, 0, m_rp.space().impl_internal_space_instance());
} else if (RP::rank == 5) {
// id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y; id4 to
// threadIdx.z
const dim3 block(m_rp.m_tile[0] * m_rp.m_tile[1],
- m_rp.m_tile[2] * m_rp.m_tile[3], m_rp.m_tile[4]);
- KOKKOS_ASSERT(block.z > 0);
+ m_rp.m_tile[2] * m_rp.m_tile[3], clampZ(m_rp.m_tile[4]));
+ check_block_sizes(block);
const dim3 grid(
std::min<array_index_type>(m_rp.m_tile_end[0] * m_rp.m_tile_end[1],
maxblocks[0]),
std::min<array_index_type>(
(m_rp.m_upper[4] - m_rp.m_lower[4] + block.z - 1) / block.z,
maxblocks[2]));
+ check_grid_sizes(grid);
CudaParallelLaunch<ParallelFor, LaunchBounds>(
- *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
- false);
+ *this, grid, block, 0, m_rp.space().impl_internal_space_instance());
} else if (RP::rank == 6) {
// id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y; id4,id5 to
// threadIdx.z
const dim3 block(m_rp.m_tile[0] * m_rp.m_tile[1],
m_rp.m_tile[2] * m_rp.m_tile[3],
- m_rp.m_tile[4] * m_rp.m_tile[5]);
+ clampZ(m_rp.m_tile[4] * m_rp.m_tile[5]));
+ check_block_sizes(block);
const dim3 grid(
std::min<array_index_type>(m_rp.m_tile_end[0] * m_rp.m_tile_end[1],
maxblocks[0]),
maxblocks[1]),
std::min<array_index_type>(m_rp.m_tile_end[4] * m_rp.m_tile_end[5],
maxblocks[2]));
+ check_grid_sizes(grid);
CudaParallelLaunch<ParallelFor, LaunchBounds>(
- *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
- false);
+ *this, grid, block, 0, m_rp.space().impl_internal_space_instance());
} else {
Kokkos::abort("Kokkos::MDRange Error: Exceeded rank bounds with Cuda\n");
}
: m_functor(arg_functor), m_rp(arg_policy) {}
};
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
- Kokkos::Cuda> {
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>, Kokkos::Cuda> {
public:
- using Policy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = Kokkos::MDRangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
private:
using array_index_type = typename Policy::array_index_type;
using Member = typename Policy::member_type;
using LaunchBounds = typename Policy::launch_bounds;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis =
- Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
- ReducerTypeFwd>;
-
public:
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
using functor_type = FunctorType;
using size_type = Cuda::size_type;
using reducer_type = ReducerType;
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::Cuda::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the reduction is performed.
+ // Within the reduction, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the reduction, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ static_assert(sizeof(size_type) == 4);
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < 4,
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>, size_type>;
+
// Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
// blockDim.z == 1
- const FunctorType m_functor;
+ const CombinedFunctorReducerType m_functor_reducer;
const Policy m_policy; // used for workrange and nwork
- const ReducerType m_reducer;
const pointer_type m_result_ptr;
const bool m_result_ptr_device_accessible;
- size_type* m_scratch_space;
+ word_size_type* m_scratch_space;
size_type* m_scratch_flags;
- size_type* m_unified_space;
+ word_size_type* m_unified_space;
using DeviceIteratePattern = typename Kokkos::Impl::Reduce::DeviceIterateTile<
Policy::rank, Policy, FunctorType, typename Policy::work_tag,
// Shall we use the shfl based reduction or not (only use it for static sized
// types of more than 128bit
static constexpr bool UseShflReduction = false;
- //((sizeof(value_type)>2*sizeof(double)) && Analysis::StaticValueSize)
+ //((sizeof(value_type)>2*sizeof(double)) && ReducerType::static_value_size())
// Some crutch to do function overloading
public:
template <typename Policy, typename Functor>
static int max_tile_size_product(const Policy& pol, const Functor&) {
- cudaFuncAttributes attr =
- CudaParallelLaunch<ParallelReduce,
- LaunchBounds>::get_cuda_func_attributes();
- auto const& prop = pol.space().cuda_device_prop();
- // Limits due do registers/SM
- int const regs_per_sm = prop.regsPerMultiprocessor;
- int const regs_per_thread = attr.numRegs;
- int const max_threads_per_sm = regs_per_sm / regs_per_thread;
- return std::min(
- max_threads_per_sm,
- static_cast<int>(Kokkos::Impl::CudaTraits::MaxHierarchicalParallelism));
+ return max_tile_size_product_helper<ParallelReduce>(pol, LaunchBounds{});
}
Policy const& get_policy() const { return m_policy; }
inline __device__ void exec_range(reference_type update) const {
Kokkos::Impl::Reduce::DeviceIterateTile<Policy::rank, Policy, FunctorType,
typename Policy::work_tag,
- reference_type>(m_policy, m_functor,
- update)
+ reference_type>(
+ m_policy, m_functor_reducer.get_functor(), update)
.exec_range();
}
inline __device__ void operator()() const {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)) /
- sizeof(size_type));
+ const integral_nonzero_constant<word_size_type,
+ ReducerType::static_value_size() /
+ sizeof(word_size_type)>
+ word_count(m_functor_reducer.get_reducer().value_size() /
+ sizeof(word_size_type));
{
- reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
- kokkos_impl_cuda_shared_memory<size_type>() +
- threadIdx.y * word_count.value));
+ reference_type value =
+ m_functor_reducer.get_reducer().init(reinterpret_cast<pointer_type>(
+ kokkos_impl_cuda_shared_memory<word_size_type>() +
+ threadIdx.y * word_count.value));
// Number of blocks is bounded so that the reduction can be limited to two
// passes. Each thread block is given an approximately equal amount of
// work to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmatically
+ // ordering does not match the final pass, but is arithmetically
// equivalent.
this->exec_range(value);
// Reduce with final value at blockDim.y - 1 location.
// Problem: non power-of-two blockDim
if (cuda_single_inter_block_reduce_scan<false>(
- final_reducer, blockIdx.x, gridDim.x,
- kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+ m_functor_reducer.get_reducer(), blockIdx.x, gridDim.x,
+ kokkos_impl_cuda_shared_memory<word_size_type>(), m_scratch_space,
m_scratch_flags)) {
// This is the final block with the final result at the final threads'
// location
- size_type* const shared = kokkos_impl_cuda_shared_memory<size_type>() +
- (blockDim.y - 1) * word_count.value;
- size_type* const global =
+ word_size_type* const shared =
+ kokkos_impl_cuda_shared_memory<word_size_type>() +
+ (blockDim.y - 1) * word_count.value;
+ word_size_type* const global =
m_result_ptr_device_accessible
- ? reinterpret_cast<size_type*>(m_result_ptr)
+ ? reinterpret_cast<word_size_type*>(m_result_ptr)
: (m_unified_space ? m_unified_space : m_scratch_space);
if (threadIdx.y == 0) {
- final_reducer.final(reinterpret_cast<value_type*>(shared));
+ m_functor_reducer.get_reducer().final(
+ reinterpret_cast<value_type*>(shared));
}
if (CudaTraits::WarpSize < word_count.value) {
__syncthreads();
+ } else {
+ // In the above call to final(), shared might have been updated by a
+ // single thread within a warp without synchronization. Synchronize
+ // threads within warp to avoid potential race condition.
+ __syncwarp(0xffffffff);
}
for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
// Determine block size constrained by shared memory:
inline unsigned local_block_size(const FunctorType& f) {
unsigned n = CudaTraits::WarpSize * 8;
+ int const maxShmemPerBlock =
+ m_policy.space().cuda_device_prop().sharedMemPerBlock;
int shmem_size =
- cuda_single_inter_block_reduce_scan_shmem<false, FunctorType, WorkTag>(
+ cuda_single_inter_block_reduce_scan_shmem<false, WorkTag, value_type>(
f, n);
- using closure_type = Impl::ParallelReduce<FunctorType, Policy, ReducerType>;
- cudaFuncAttributes attr =
- CudaParallelLaunch<closure_type,
- LaunchBounds>::get_cuda_func_attributes();
+ using closure_type =
+ Impl::ParallelReduce<CombinedFunctorReducer<FunctorType, ReducerType>,
+ Policy, Kokkos::Cuda>;
+ cudaFuncAttributes attr = CudaParallelLaunch<closure_type, LaunchBounds>::
+ get_cuda_func_attributes(m_policy.space().cuda_device());
while (
- (n &&
- (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size)) ||
+ (n && (maxShmemPerBlock < shmem_size)) ||
(n >
static_cast<unsigned>(
Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
m_policy.space().impl_internal_space_instance(), attr, f, 1,
shmem_size, 0)))) {
n >>= 1;
- shmem_size = cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(f, n);
+ shmem_size =
+ cuda_single_inter_block_reduce_scan_shmem<false, WorkTag, value_type>(
+ f, n);
}
return n;
}
inline void execute() {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
const auto nwork = m_policy.m_num_tiles;
if (nwork) {
int block_size = m_policy.m_prod_tile_dims;
// CONSTRAINT: Algorithm requires block_size >= product of tile dimensions
// Nearest power of two
- int exponent_pow_two = std::ceil(std::log2(block_size));
- block_size = std::pow(2, exponent_pow_two);
- int suggested_blocksize = local_block_size(m_functor);
+ int exponent_pow_two = std::ceil(std::log2(block_size));
+ block_size = std::pow(2, exponent_pow_two);
+ int suggested_blocksize =
+ local_block_size(m_functor_reducer.get_functor());
block_size = (block_size > suggested_blocksize)
? block_size
: suggested_blocksize; // Note: block_size must be less
// than or equal to 512
- m_scratch_space = cuda_internal_scratch_space(
- m_policy.space(), Analysis::value_size(ReducerConditional::select(
- m_functor, m_reducer)) *
- block_size /* block_size == max block_count */);
+ m_scratch_space =
+ reinterpret_cast<word_size_type*>(cuda_internal_scratch_space(
+ m_policy.space(),
+ m_functor_reducer.get_reducer().value_size() *
+ block_size /* block_size == max block_count */));
m_scratch_flags =
cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type));
- m_unified_space = cuda_internal_scratch_unified(
- m_policy.space(), Analysis::value_size(ReducerConditional::select(
- m_functor, m_reducer)));
+ m_unified_space =
+ reinterpret_cast<word_size_type*>(cuda_internal_scratch_unified(
+ m_policy.space(), m_functor_reducer.get_reducer().value_size()));
// REQUIRED ( 1 , N , 1 )
const dim3 block(1, block_size, 1);
const int shmem =
UseShflReduction
? 0
- : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(m_functor,
- block.y);
+ : cuda_single_inter_block_reduce_scan_shmem<false, WorkTag,
+ value_type>(
+ m_functor_reducer.get_functor(), block.y);
CudaParallelLaunch<ParallelReduce, LaunchBounds>(
*this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
if (!m_result_ptr_device_accessible) {
if (m_result_ptr) {
"Kokkos::Impl::ParallelReduce<Cuda, MDRangePolicy>::execute: "
"Result Not Device Accessible");
- const int count = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
+ const int count = m_functor_reducer.get_reducer().value_count();
for (int i = 0; i < count; ++i) {
m_result_ptr[i] = pointer_type(m_unified_space)[i];
}
} else {
- const int size = Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer));
+ const int size = m_functor_reducer.get_reducer().value_size();
DeepCopy<HostSpace, CudaSpace, Cuda>(m_policy.space(), m_result_ptr,
m_scratch_space, size);
}
} else {
if (m_result_ptr) {
// TODO @graph We need to effectively insert this in to the graph
- final_reducer.init(m_result_ptr);
+ m_functor_reducer.get_reducer().init(m_result_ptr);
}
}
}
template <class ViewType>
- ParallelReduce(
- const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
- : m_functor(arg_functor),
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
m_policy(arg_policy),
- m_reducer(InvalidType()),
m_result_ptr(arg_result.data()),
m_result_ptr_device_accessible(
MemorySpaceAccess<Kokkos::CudaSpace,
m_scratch_space(nullptr),
m_scratch_flags(nullptr),
m_unified_space(nullptr) {
- check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
- }
-
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::CudaSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_scratch_space(nullptr),
- m_scratch_flags(nullptr),
- m_unified_space(nullptr) {
- check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
+ check_reduced_view_shmem_size<WorkTag, value_type>(
+ m_policy, m_functor_reducer.get_functor());
}
};
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_PARALLEL_RANGE_HPP
#define KOKKOS_CUDA_PARALLEL_RANGE_HPP
#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
#include <Cuda/Kokkos_Cuda_ReduceScan.hpp>
#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
-#include <Kokkos_MinMaxClamp.hpp>
#include <impl/Kokkos_Tools.hpp>
#include <typeinfo>
const FunctorType m_functor;
const Policy m_policy;
- ParallelFor() = delete;
+ ParallelFor() = delete;
ParallelFor& operator=(const ParallelFor&) = delete;
template <class TagType>
const typename Policy::index_type nwork = m_policy.end() - m_policy.begin();
cudaFuncAttributes attr =
- CudaParallelLaunch<ParallelFor,
- LaunchBounds>::get_cuda_func_attributes();
+ CudaParallelLaunch<ParallelFor, LaunchBounds>::get_cuda_func_attributes(
+ m_policy.space().cuda_device());
const int block_size =
Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
m_policy.space().impl_internal_space_instance(), attr, m_functor, 1,
0, 0);
KOKKOS_ASSERT(block_size > 0);
dim3 block(1, block_size, 1);
+ const int maxGridSizeX = m_policy.space().cuda_device_prop().maxGridSize[0];
dim3 grid(
- std::min(
- typename Policy::index_type((nwork + block.y - 1) / block.y),
- typename Policy::index_type(cuda_internal_maximum_grid_count()[0])),
+ std::min(typename Policy::index_type((nwork + block.y - 1) / block.y),
+ typename Policy::index_type(maxGridSizeX)),
1, 1);
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
if (Kokkos::Impl::CudaInternal::cuda_use_serial_execution()) {
#endif
CudaParallelLaunch<ParallelFor, LaunchBounds>(
- *this, grid, block, 0, m_policy.space().impl_internal_space_instance(),
- false);
+ *this, grid, block, 0, m_policy.space().impl_internal_space_instance());
}
ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
: m_functor(arg_functor), m_policy(arg_policy) {}
};
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>,
Kokkos::Cuda> {
public:
- using Policy = Kokkos::RangePolicy<Traits...>;
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
private:
using WorkRange = typename Policy::WorkRange;
using Member = typename Policy::member_type;
using LaunchBounds = typename Policy::launch_bounds;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis =
- Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
- ReducerTypeFwd>;
-
public:
- using pointer_type = typename Analysis::pointer_type;
- using value_type = typename Analysis::value_type;
- using reference_type = typename Analysis::reference_type;
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
using functor_type = FunctorType;
// Conditionally set word_size_type to int16_t or int8_t if value_type is
// smaller than int32_t (Kokkos::Cuda::size_type)
// Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
// blockDim.z == 1
- const FunctorType m_functor;
+ const CombinedFunctorReducerType m_functor_reducer;
const Policy m_policy;
- const ReducerType m_reducer;
const pointer_type m_result_ptr;
const bool m_result_ptr_device_accessible;
const bool m_result_ptr_host_accessible;
// FIXME_CUDA Shall we use the shfl based reduction or not (only use it for
// static sized types of more than 128bit:
- // sizeof(value_type)>2*sizeof(double)) && Analysis::StaticValueSize)
+ // sizeof(value_type)>2*sizeof(double)) && ReducerType::static_value_size())
static constexpr bool UseShflReduction = false;
public:
template <class TagType>
__device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
const Member& i, reference_type update) const {
- m_functor(i, update);
+ m_functor_reducer.get_functor()(i, update);
}
template <class TagType>
__device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
const Member& i, reference_type update) const {
- m_functor(TagType(), i, update);
+ m_functor_reducer.get_functor()(TagType(), i, update);
}
__device__ inline void operator()() const {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
- sizeof(word_size_type)>
- word_count(Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)) /
+ const integral_nonzero_constant<word_size_type,
+ ReducerType::static_value_size() /
+ sizeof(word_size_type)>
+ word_count(m_functor_reducer.get_reducer().value_size() /
sizeof(word_size_type));
{
- reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
- kokkos_impl_cuda_shared_memory<word_size_type>() +
- threadIdx.y * word_count.value));
+ reference_type value =
+ m_functor_reducer.get_reducer().init(reinterpret_cast<pointer_type>(
+ kokkos_impl_cuda_shared_memory<word_size_type>() +
+ threadIdx.y * word_count.value));
// Number of blocks is bounded so that the reduction can be limited to two
// passes. Each thread block is given an approximately equal amount of
// work to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmatically
+ // ordering does not match the final pass, but is arithmetically
// equivalent.
const WorkRange range(m_policy, blockIdx.x, gridDim.x);
bool do_final_reduction = true;
if (!zero_length)
do_final_reduction = cuda_single_inter_block_reduce_scan<false>(
- final_reducer, blockIdx.x, gridDim.x,
+ m_functor_reducer.get_reducer(), blockIdx.x, gridDim.x,
kokkos_impl_cuda_shared_memory<word_size_type>(), m_scratch_space,
m_scratch_flags);
: (m_unified_space ? m_unified_space : m_scratch_space);
if (threadIdx.y == 0) {
- final_reducer.final(reinterpret_cast<value_type*>(shared));
+ m_functor_reducer.get_reducer().final(
+ reinterpret_cast<value_type*>(shared));
}
if (CudaTraits::WarpSize < word_count.value) {
__syncthreads();
+ } else if (word_count.value > 1) {
+ // Inside cuda_single_inter_block_reduce_scan() and final() above,
+ // shared[i] below might have been updated by a single thread within a
+ // warp without synchronization afterwards. Synchronize threads within
+ // warp to avoid potential race condition.
+ __syncwarp(0xffffffff);
}
for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
// Determine block size constrained by shared memory:
inline unsigned local_block_size(const FunctorType& f) {
unsigned n = CudaTraits::WarpSize * 8;
+ const int maxShmemPerBlock =
+ m_policy.space().cuda_device_prop().sharedMemPerBlock;
int shmem_size =
- cuda_single_inter_block_reduce_scan_shmem<false, FunctorType, WorkTag>(
+ cuda_single_inter_block_reduce_scan_shmem<false, WorkTag, value_type>(
f, n);
- using closure_type = Impl::ParallelReduce<FunctorType, Policy, ReducerType>;
- cudaFuncAttributes attr =
- CudaParallelLaunch<closure_type,
- LaunchBounds>::get_cuda_func_attributes();
+ using closure_type =
+ Impl::ParallelReduce<CombinedFunctorReducer<FunctorType, ReducerType>,
+ Policy, Kokkos::Cuda>;
+ cudaFuncAttributes attr = CudaParallelLaunch<closure_type, LaunchBounds>::
+ get_cuda_func_attributes(m_policy.space().cuda_device());
while (
- (n &&
- (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size)) ||
+ (n && (maxShmemPerBlock < shmem_size)) ||
(n >
static_cast<unsigned>(
Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
m_policy.space().impl_internal_space_instance(), attr, f, 1,
shmem_size, 0)))) {
n >>= 1;
- shmem_size = cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(f, n);
+ shmem_size =
+ cuda_single_inter_block_reduce_scan_shmem<false, WorkTag, value_type>(
+ f, n);
}
return n;
}
inline void execute() {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
const index_type nwork = m_policy.end() - m_policy.begin();
- const bool need_device_set = Analysis::has_init_member_function ||
- Analysis::has_final_member_function ||
+ const bool need_device_set = ReducerType::has_init_member_function() ||
+ ReducerType::has_final_member_function() ||
!m_result_ptr_host_accessible ||
-#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
Policy::is_graph_kernel::value ||
-#endif
!std::is_same<ReducerType, InvalidType>::value;
if ((nwork > 0) || need_device_set) {
- const int block_size = local_block_size(m_functor);
+ const int block_size = local_block_size(m_functor_reducer.get_functor());
KOKKOS_ASSERT(block_size > 0);
// TODO: down casting these uses more space than required?
m_scratch_space = (word_size_type*)cuda_internal_scratch_space(
- m_policy.space(), Analysis::value_size(ReducerConditional::select(
- m_functor, m_reducer)) *
+ m_policy.space(), m_functor_reducer.get_reducer().value_size() *
block_size /* block_size == max block_count */);
// Intentionally do not downcast to word_size_type since we use Cuda
sizeof(Cuda::size_type));
m_unified_space =
reinterpret_cast<word_size_type*>(cuda_internal_scratch_unified(
- m_policy.space(), Analysis::value_size(ReducerConditional::select(
- m_functor, m_reducer))));
+ m_policy.space(), m_functor_reducer.get_reducer().value_size()));
// REQUIRED ( 1 , N , 1 )
dim3 block(1, block_size, 1);
// Required grid.x <= block.y
- dim3 grid(std::min(int(block.y), int((nwork + block.y - 1) / block.y)), 1,
- 1);
+ dim3 grid(std::min(index_type(block.y),
+ index_type((nwork + block.y - 1) / block.y)),
+ 1, 1);
// TODO @graph We need to effectively insert this in to the graph
const int shmem =
UseShflReduction
? 0
- : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(m_functor,
- block.y);
+ : cuda_single_inter_block_reduce_scan_shmem<false, WorkTag,
+ value_type>(
+ m_functor_reducer.get_functor(), block.y);
if ((nwork == 0)
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
CudaParallelLaunch<ParallelReduce, LaunchBounds>(
*this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
if (!m_result_ptr_device_accessible) {
if (m_result_ptr) {
"Kokkos::Impl::ParallelReduce<Cuda, RangePolicy>::execute: "
"Result "
"Not Device Accessible");
- const int count = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
+ const int count = m_functor_reducer.get_reducer().value_count();
for (int i = 0; i < count; ++i) {
m_result_ptr[i] = pointer_type(m_unified_space)[i];
}
} else {
- const int size = Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer));
+ const int size = m_functor_reducer.get_reducer().value_size();
DeepCopy<HostSpace, CudaSpace, Cuda>(m_policy.space(), m_result_ptr,
m_scratch_space, size);
}
} else {
if (m_result_ptr) {
// TODO @graph We need to effectively insert this in to the graph
- final_reducer.init(m_result_ptr);
+ m_functor_reducer.get_reducer().init(m_result_ptr);
}
}
}
template <class ViewType>
- ParallelReduce(
- const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
- : m_functor(arg_functor),
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
m_policy(arg_policy),
- m_reducer(InvalidType()),
m_result_ptr(arg_result.data()),
m_result_ptr_device_accessible(
MemorySpaceAccess<Kokkos::CudaSpace,
m_scratch_space(nullptr),
m_scratch_flags(nullptr),
m_unified_space(nullptr) {
- check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
- }
-
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::CudaSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_result_ptr_host_accessible(
- MemorySpaceAccess<Kokkos::HostSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_scratch_space(nullptr),
- m_scratch_flags(nullptr),
- m_unified_space(nullptr) {
- check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
+ check_reduced_view_shmem_size<WorkTag, value_type>(
+ m_policy, m_functor_reducer.get_functor());
}
};
using LaunchBounds = typename Policy::launch_bounds;
using Analysis = Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN,
- Policy, FunctorType>;
+ Policy, FunctorType, void>;
public:
using pointer_type = typename Analysis::pointer_type;
using reference_type = typename Analysis::reference_type;
+ using value_type = typename Analysis::value_type;
using functor_type = FunctorType;
using size_type = Cuda::size_type;
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::Cuda::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the scan is performed.
+ // Within the scan, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the scan, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < sizeof(size_type),
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>, size_type>;
private:
// Algorithmic constraints:
// (c) gridDim.x <= blockDim.y * blockDim.y
// (d) gridDim.y == gridDim.z == 1
- const FunctorType m_functor;
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>
+ m_functor_reducer;
const Policy m_policy;
- size_type* m_scratch_space;
+ word_size_type* m_scratch_space;
size_type* m_scratch_flags;
size_type m_final;
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
template <class TagType>
__device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
const Member& i, reference_type update, const bool final_result) const {
- m_functor(i, update, final_result);
+ m_functor_reducer.get_functor()(i, update, final_result);
}
template <class TagType>
__device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
const Member& i, reference_type update, const bool final_result) const {
- m_functor(TagType(), i, update, final_result);
+ m_functor_reducer.get_functor()(TagType(), i, update, final_result);
}
//----------------------------------------
__device__ inline void initial() const {
- typename Analysis::Reducer final_reducer(&m_functor);
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+ const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
+ sizeof(word_size_type)>
+ word_count(Analysis::value_size(m_functor_reducer.get_functor()) /
+ sizeof(word_size_type));
- size_type* const shared_value =
- kokkos_impl_cuda_shared_memory<size_type>() +
+ word_size_type* const shared_value =
+ kokkos_impl_cuda_shared_memory<word_size_type>() +
word_count.value * threadIdx.y;
final_reducer.init(reinterpret_cast<pointer_type>(shared_value));
// Number of blocks is bounded so that the reduction can be limited to two
// passes. Each thread block is given an approximately equal amount of work
// to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmatically equivalent.
+ // ordering does not match the final pass, but is arithmetically equivalent.
const WorkRange range(m_policy, blockIdx.x, gridDim.x);
// gridDim.x
cuda_single_inter_block_reduce_scan<true>(
final_reducer, blockIdx.x, gridDim.x,
- kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+ kokkos_impl_cuda_shared_memory<word_size_type>(), m_scratch_space,
m_scratch_flags);
}
//----------------------------------------
__device__ inline void final() const {
- typename Analysis::Reducer final_reducer(&m_functor);
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+ const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
+ sizeof(word_size_type)>
+ word_count(Analysis::value_size(m_functor_reducer.get_functor()) /
+ sizeof(word_size_type));
// Use shared memory as an exclusive scan: { 0 , value[0] , value[1] ,
// value[2] , ... }
- size_type* const shared_data = kokkos_impl_cuda_shared_memory<size_type>();
- size_type* const shared_prefix =
+ word_size_type* const shared_data =
+ kokkos_impl_cuda_shared_memory<word_size_type>();
+ word_size_type* const shared_prefix =
shared_data + word_count.value * threadIdx.y;
- size_type* const shared_accum =
+ word_size_type* const shared_accum =
shared_data + word_count.value * (blockDim.y + 1);
// Starting value for this thread block is the previous block's total.
if (blockIdx.x) {
- size_type* const block_total =
+ word_size_type* const block_total =
m_scratch_space + word_count.value * (blockIdx.x - 1);
for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
shared_accum[i] = block_total[i];
typename Analysis::pointer_type(shared_data + word_count.value));
{
- size_type* const block_total =
+ word_size_type* const block_total =
shared_data + word_count.value * blockDim.y;
for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
shared_accum[i] = block_total[i];
// 4 warps was 10% faster than 8 warps and 20% faster than 16 warps in unit
// testing
+ const int maxShmemPerBlock =
+ m_policy.space().cuda_device_prop().sharedMemPerBlock;
unsigned n = CudaTraits::WarpSize * 4;
- while (n && unsigned(m_policy.space()
- .impl_internal_space_instance()
- ->m_maxShmemPerBlock) <
- cuda_single_inter_block_reduce_scan_shmem<true, FunctorType,
- WorkTag>(f, n)) {
+ while (n &&
+ unsigned(maxShmemPerBlock) <
+ cuda_single_inter_block_reduce_scan_shmem<true, WorkTag,
+ value_type>(f, n)) {
n >>= 1;
}
return n;
if (nwork) {
constexpr int GridMaxComputeCapability_2x = 0x0ffff;
- const int block_size = local_block_size(m_functor);
+ const int block_size = local_block_size(m_functor_reducer.get_functor());
KOKKOS_ASSERT(block_size > 0);
const int grid_max =
// How many block are really needed for this much work:
const int grid_x = (nwork + work_per_block - 1) / work_per_block;
- m_scratch_space = cuda_internal_scratch_space(
- m_policy.space(), Analysis::value_size(m_functor) * grid_x);
+ m_scratch_space =
+ reinterpret_cast<word_size_type*>(cuda_internal_scratch_space(
+ m_policy.space(),
+ Analysis::value_size(m_functor_reducer.get_functor()) * grid_x));
m_scratch_flags =
cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type) * 1);
dim3 grid(grid_x, 1, 1);
dim3 block(1, block_size, 1); // REQUIRED DIMENSIONS ( 1 , N , 1 )
- const int shmem = Analysis::value_size(m_functor) * (block_size + 2);
+ const int shmem = Analysis::value_size(m_functor_reducer.get_functor()) *
+ (block_size + 2);
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
if (m_run_serial) {
m_final = false;
CudaParallelLaunch<ParallelScan, LaunchBounds>(
*this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
}
#endif
m_final = true;
CudaParallelLaunch<ParallelScan, LaunchBounds>(
*this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
}
}
ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor),
+ : m_functor_reducer(arg_functor, typename Analysis::Reducer{arg_functor}),
m_policy(arg_policy),
m_scratch_space(nullptr),
m_scratch_flags(nullptr),
using WorkRange = typename Policy::WorkRange;
using LaunchBounds = typename Policy::launch_bounds;
- using Analysis = Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN,
- Policy, FunctorType>;
+ using Analysis =
+ Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN, Policy,
+ FunctorType, ReturnType>;
public:
+ using value_type = typename Analysis::value_type;
using pointer_type = typename Analysis::pointer_type;
using reference_type = typename Analysis::reference_type;
using functor_type = FunctorType;
using size_type = Cuda::size_type;
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::Cuda::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the scan is performed.
+ // Within the scan, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the scan, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < sizeof(size_type),
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>, size_type>;
private:
// Algorithmic constraints:
// (c) gridDim.x <= blockDim.y * blockDim.y
// (d) gridDim.y == gridDim.z == 1
- const FunctorType m_functor;
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>
+ m_functor_reducer;
const Policy m_policy;
- size_type* m_scratch_space;
+ word_size_type* m_scratch_space;
size_type* m_scratch_flags;
size_type m_final;
- ReturnType& m_returnvalue;
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
bool m_run_serial;
#endif
template <class TagType>
__device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
const Member& i, reference_type update, const bool final_result) const {
- m_functor(i, update, final_result);
+ m_functor_reducer.get_functor()(i, update, final_result);
}
template <class TagType>
__device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
const Member& i, reference_type update, const bool final_result) const {
- m_functor(TagType(), i, update, final_result);
+ m_functor_reducer.get_functor()(TagType(), i, update, final_result);
}
//----------------------------------------
__device__ inline void initial() const {
- typename Analysis::Reducer final_reducer(&m_functor);
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+ const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
+ sizeof(word_size_type)>
+ word_count(Analysis::value_size(m_functor_reducer.get_functor()) /
+ sizeof(word_size_type));
- size_type* const shared_value =
- kokkos_impl_cuda_shared_memory<size_type>() +
+ word_size_type* const shared_value =
+ kokkos_impl_cuda_shared_memory<word_size_type>() +
word_count.value * threadIdx.y;
final_reducer.init(reinterpret_cast<pointer_type>(shared_value));
// Number of blocks is bounded so that the reduction can be limited to two
// passes. Each thread block is given an approximately equal amount of work
// to perform. Accumulate the values for this block. The accumulation
- // ordering does not match the final pass, but is arithmatically equivalent.
+ // ordering does not match the final pass, but is arithmetically equivalent.
const WorkRange range(m_policy, blockIdx.x, gridDim.x);
// gridDim.x
cuda_single_inter_block_reduce_scan<true>(
final_reducer, blockIdx.x, gridDim.x,
- kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+ kokkos_impl_cuda_shared_memory<word_size_type>(), m_scratch_space,
m_scratch_flags);
}
//----------------------------------------
__device__ inline void final() const {
- typename Analysis::Reducer final_reducer(&m_functor);
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+ const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
+ sizeof(word_size_type)>
+ word_count(final_reducer.value_size() / sizeof(word_size_type));
// Use shared memory as an exclusive scan: { 0 , value[0] , value[1] ,
// value[2] , ... }
- size_type* const shared_data = kokkos_impl_cuda_shared_memory<size_type>();
- size_type* const shared_prefix =
+ word_size_type* const shared_data =
+ kokkos_impl_cuda_shared_memory<word_size_type>();
+ word_size_type* const shared_prefix =
shared_data + word_count.value * threadIdx.y;
- size_type* const shared_accum =
+ word_size_type* const shared_accum =
shared_data + word_count.value * (blockDim.y + 1);
// Starting value for this thread block is the previous block's total.
if (blockIdx.x) {
- size_type* const block_total =
+ word_size_type* const block_total =
m_scratch_space + word_count.value * (blockIdx.x - 1);
for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
shared_accum[i] = block_total[i];
typename Analysis::pointer_type(shared_data + word_count.value));
{
- size_type* const block_total =
+ word_size_type* const block_total =
shared_data + word_count.value * blockDim.y;
for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
shared_accum[i] = block_total[i];
reinterpret_cast<pointer_type>(shared_prefix)),
true);
}
+ if (iwork + 1 == m_policy.end() && m_policy.end() == range.end() &&
+ m_result_ptr_device_accessible)
+ *m_result_ptr = *reinterpret_cast<pointer_type>(shared_prefix);
}
}
// 4 warps was 10% faster than 8 warps and 20% faster than 16 warps in unit
// testing
+ const int maxShmemPerBlock =
+ m_policy.space().cuda_device_prop().sharedMemPerBlock;
unsigned n = CudaTraits::WarpSize * 4;
- while (n && unsigned(m_policy.space()
- .impl_internal_space_instance()
- ->m_maxShmemPerBlock) <
- cuda_single_inter_block_reduce_scan_shmem<true, FunctorType,
- WorkTag>(f, n)) {
+ while (n &&
+ unsigned(maxShmemPerBlock) <
+ cuda_single_inter_block_reduce_scan_shmem<true, WorkTag,
+ value_type>(f, n)) {
n >>= 1;
}
return n;
if (nwork) {
enum { GridMaxComputeCapability_2x = 0x0ffff };
- const int block_size = local_block_size(m_functor);
+ const int block_size = local_block_size(m_functor_reducer.get_functor());
KOKKOS_ASSERT(block_size > 0);
const int grid_max =
// How many block are really needed for this much work:
const int grid_x = (nwork + work_per_block - 1) / work_per_block;
- m_scratch_space = cuda_internal_scratch_space(
- m_policy.space(), Analysis::value_size(m_functor) * grid_x);
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
+ m_scratch_space =
+ reinterpret_cast<word_size_type*>(cuda_internal_scratch_space(
+ m_policy.space(), final_reducer.value_size() * grid_x));
m_scratch_flags =
cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type) * 1);
dim3 grid(grid_x, 1, 1);
dim3 block(1, block_size, 1); // REQUIRED DIMENSIONS ( 1 , N , 1 )
- const int shmem = Analysis::value_size(m_functor) * (block_size + 2);
+ const int shmem = final_reducer.value_size() * (block_size + 2);
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
if (m_run_serial) {
block = dim3(1, 1, 1);
grid = dim3(1, 1, 1);
- } else {
+ } else
#endif
-
+ {
m_final = false;
CudaParallelLaunch<ParallelScanWithTotal, LaunchBounds>(
*this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
-#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
}
-#endif
m_final = true;
CudaParallelLaunch<ParallelScanWithTotal, LaunchBounds>(
*this, grid, block, shmem,
- m_policy.space().impl_internal_space_instance(),
- false); // copy to device and execute
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
- const int size = Analysis::value_size(m_functor);
+ const int size = final_reducer.value_size();
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
if (m_run_serial)
DeepCopy<HostSpace, CudaSpace, Cuda>(m_policy.space(), &m_returnvalue,
m_scratch_space, size);
else
#endif
- DeepCopy<HostSpace, CudaSpace, Cuda>(
- m_policy.space(), &m_returnvalue,
- m_scratch_space + (grid_x - 1) * size / sizeof(int), size);
+ {
+ if (!m_result_ptr_device_accessible)
+ DeepCopy<HostSpace, CudaSpace, Cuda>(
+ m_policy.space(), m_result_ptr,
+ m_scratch_space + (grid_x - 1) * size / sizeof(word_size_type),
+ size);
+ }
}
}
+ template <class ViewType>
ParallelScanWithTotal(const FunctorType& arg_functor,
- const Policy& arg_policy, ReturnType& arg_returnvalue)
- : m_functor(arg_functor),
+ const Policy& arg_policy,
+ const ViewType& arg_result_view)
+ : m_functor_reducer(arg_functor, typename Analysis::Reducer{arg_functor}),
m_policy(arg_policy),
m_scratch_space(nullptr),
m_scratch_flags(nullptr),
m_final(false),
- m_returnvalue(arg_returnvalue)
+ m_result_ptr(arg_result_view.data()),
+ m_result_ptr_device_accessible(
+ MemorySpaceAccess<Kokkos::CudaSpace,
+ typename ViewType::memory_space>::accessible)
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
,
m_run_serial(Kokkos::Impl::CudaInternal::cuda_use_serial_execution())
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_PARALLEL_TEAM_HPP
#define KOKKOS_CUDA_PARALLEL_TEAM_HPP
#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
#include <Cuda/Kokkos_Cuda_ReduceScan.hpp>
#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
-#include <Cuda/Kokkos_Cuda_Locks.hpp>
#include <Cuda/Kokkos_Cuda_Team.hpp>
-#include <Kokkos_MinMaxClamp.hpp>
+#include <Kokkos_MinMax.hpp>
#include <Kokkos_Vectorization.hpp>
#include <impl/Kokkos_Tools.hpp>
Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
cudaFuncAttributes attr =
CudaParallelLaunch<closure_type, typename traits::launch_bounds>::
- get_cuda_func_attributes();
+ get_cuda_func_attributes(space().cuda_device());
int block_size =
Kokkos::Impl::cuda_get_max_block_size<FunctorType,
typename traits::launch_bounds>(
const ParallelReduceTag&) const {
using functor_analysis_type =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- TeamPolicyInternal, FunctorType>;
- using reducer_type = typename Impl::ParallelReduceReturnValue<
- void, typename functor_analysis_type::value_type,
- FunctorType>::reducer_type;
- using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- reducer_type>;
+ TeamPolicyInternal, FunctorType, void>;
+ using closure_type = Impl::ParallelReduce<
+ CombinedFunctorReducer<FunctorType,
+ typename functor_analysis_type::Reducer>,
+ TeamPolicy<Properties...>, Kokkos::Cuda>;
return internal_team_size_max<closure_type>(f);
}
inline int team_size_max(const FunctorType& f, const ReducerType& /*r*/,
const ParallelReduceTag&) const {
using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- ReducerType>;
+ Impl::ParallelReduce<CombinedFunctorReducer<FunctorType, ReducerType>,
+ TeamPolicy<Properties...>, Kokkos::Cuda>;
return internal_team_size_max<closure_type>(f);
}
Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
cudaFuncAttributes attr =
CudaParallelLaunch<closure_type, typename traits::launch_bounds>::
- get_cuda_func_attributes();
+ get_cuda_func_attributes(space().cuda_device());
const int block_size =
Kokkos::Impl::cuda_get_opt_block_size<FunctorType,
typename traits::launch_bounds>(
const ParallelReduceTag&) const {
using functor_analysis_type =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- TeamPolicyInternal, FunctorType>;
- using reducer_type = typename Impl::ParallelReduceReturnValue<
- void, typename functor_analysis_type::value_type,
- FunctorType>::reducer_type;
- using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- reducer_type>;
+ TeamPolicyInternal, FunctorType, void>;
+ using closure_type = Impl::ParallelReduce<
+ CombinedFunctorReducer<FunctorType,
+ typename functor_analysis_type::Reducer>,
+ TeamPolicy<Properties...>, Kokkos::Cuda>;
return internal_team_size_recommended<closure_type>(f);
}
int team_size_recommended(const FunctorType& f, const ReducerType&,
const ParallelReduceTag&) const {
using closure_type =
- Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
- ReducerType>;
+ Impl::ParallelReduce<CombinedFunctorReducer<FunctorType, ReducerType>,
+ TeamPolicy<Properties...>, Kokkos::Cuda>;
return internal_team_size_recommended<closure_type>(f);
}
}
inline static int scratch_size_max(int level) {
- return (
- level == 0 ? 1024 * 40 : // 48kB is the max for CUDA, but we need some
- // for team_member.reduce etc.
- 20 * 1024 *
- 1024); // arbitrarily setting this to 20MB, for a Volta V100
- // that would give us about 3.2GB for 2 teams per SM
+ // Cuda Teams use (team_size + 2)*sizeof(double) shared memory for team
+ // reductions. They also use one int64_t in static shared memory for a
+ // shared ID. Furthermore, they use additional scratch memory in some
+ // reduction scenarios, which depend on the size of the value_type and is
+ // NOT captured here.
+ constexpr size_t max_possible_team_size = 1024;
+ constexpr size_t max_reserved_shared_mem_per_team =
+ (max_possible_team_size + 2) * sizeof(double) + sizeof(int64_t);
+ // arbitrarily setting level 1 scratch limit to 20MB, for a
+ // Volta V100 that would give us about 3.2GB for 2 teams per SM
+ constexpr size_t max_l1_scratch_size = 20 * 1024 * 1024;
+
+ size_t max_shmem = Cuda().cuda_device_prop().sharedMemPerBlock;
+ return (level == 0 ? max_shmem - max_reserved_shared_mem_per_team
+ : max_l1_scratch_size);
}
//----------------------------------------
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- KOKKOS_DEPRECATED inline int vector_length() const {
- return impl_vector_length();
- }
-#endif
inline int impl_vector_length() const { return m_vector_length; }
inline int team_size() const { return m_team_size; }
inline int league_size() const { return m_league_size; }
m_tune_team(bool(team_size_request <= 0)),
m_tune_vector(bool(vector_length_request <= 0)) {
// Make sure league size is permissible
- if (league_size_ >= int(Impl::cuda_internal_maximum_grid_count()[0]))
+ const int maxGridSizeX = m_space.cuda_device_prop().maxGridSize[0];
+ if (league_size_ >= maxGridSizeX)
Impl::throw_runtime_exception(
"Requested too large league_size for TeamPolicy on Cuda execution "
"space.");
typename Impl::DeduceFunctorPatternInterface<ClosureType>::type;
using Analysis =
Impl::FunctorAnalysis<Interface, typename ClosureType::Policy,
- FunctorType>;
+ FunctorType, void>;
cudaFuncAttributes attr =
CudaParallelLaunch<closure_type, typename traits::launch_bounds>::
- get_cuda_func_attributes();
+ get_cuda_func_attributes(space().cuda_device());
const int block_size = std::forward<BlockSizeCallable>(block_size_callable)(
space().impl_internal_space_instance(), attr, f,
(size_t)impl_vector_length(),
};
__device__ inline int64_t cuda_get_scratch_index(Cuda::size_type league_size,
- int32_t* scratch_locks) {
+ int32_t* scratch_locks,
+ size_t num_scratch_locks) {
int64_t threadid = 0;
__shared__ int64_t base_thread_id;
if (threadIdx.x == 0 && threadIdx.y == 0) {
int64_t const wraparound_len = Kokkos::max(
- int64_t(1), Kokkos::min(int64_t(league_size),
- (int64_t(g_device_cuda_lock_arrays.n)) /
- (blockDim.x * blockDim.y)));
+ int64_t(1),
+ Kokkos::min(int64_t(league_size),
+ int64_t(num_scratch_locks) / (blockDim.x * blockDim.y)));
threadid = (blockIdx.x * blockDim.z + threadIdx.z) % wraparound_len;
threadid *= blockDim.x * blockDim.y;
int done = 0;
size_t m_scratch_size[2];
int m_scratch_pool_id = -1;
int32_t* m_scratch_locks;
+ size_t m_num_scratch_locks;
template <class TagType>
__device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
// Iterate this block through the league
int64_t threadid = 0;
if (m_scratch_size[1] > 0) {
- threadid = cuda_get_scratch_index(m_league_size, m_scratch_locks);
+ threadid = cuda_get_scratch_index(m_league_size, m_scratch_locks,
+ m_num_scratch_locks);
}
const int int_league_size = (int)m_league_size;
CudaParallelLaunch<ParallelFor, LaunchBounds>(
*this, grid, block, shmem_size_total,
- m_policy.space().impl_internal_space_instance(),
- true); // copy to device and execute
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
}
ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
m_league_size(arg_policy.league_size()),
m_team_size(arg_policy.team_size()),
m_vector_size(arg_policy.impl_vector_length()) {
- cudaFuncAttributes attr =
- CudaParallelLaunch<ParallelFor,
- LaunchBounds>::get_cuda_func_attributes();
- m_team_size =
- m_team_size >= 0
- ? m_team_size
- : Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
- m_policy.space().impl_internal_space_instance(), attr,
- m_functor, m_vector_size, m_policy.team_scratch_size(0),
- m_policy.thread_scratch_size(0)) /
- m_vector_size;
+ auto internal_space_instance =
+ m_policy.space().impl_internal_space_instance();
+ if (m_team_size < 0) {
+ m_team_size =
+ arg_policy.team_size_recommended(arg_functor, ParallelForTag());
+ if (m_team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelFor<Cuda, TeamPolicy> could not find a "
+ "valid execution configuration.");
+ }
m_shmem_begin = (sizeof(double) * (m_team_size + 2));
m_shmem_size =
(m_policy.scratch_size(0, m_team_size) +
FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
- m_scratch_size[0] = m_policy.scratch_size(0, m_team_size);
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
- m_scratch_locks =
- m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+ m_scratch_size[0] = m_policy.scratch_size(0, m_team_size);
+ m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+ m_scratch_locks = internal_space_instance->m_scratch_locks;
+ m_num_scratch_locks = internal_space_instance->m_num_scratch_locks;
// Functor's reduce memory, team scan memory, and team shared memory depend
// upon team size.
if (m_team_size <= 0) {
m_scratch_ptr[1] = nullptr;
} else {
- auto scratch_ptr_id =
- m_policy.space()
- .impl_internal_space_instance()
- ->resize_team_scratch_space(
- static_cast<std::int64_t>(m_scratch_size[1]) *
- (std::min(
- static_cast<std::int64_t>(Cuda::concurrency() /
- (m_team_size * m_vector_size)),
- static_cast<std::int64_t>(m_league_size))));
- m_scratch_ptr[1] = scratch_ptr_id.first;
- m_scratch_pool_id = scratch_ptr_id.second;
+ m_scratch_pool_id = internal_space_instance->acquire_team_scratch_space();
+ m_scratch_ptr[1] = internal_space_instance->resize_team_scratch_space(
+ m_scratch_pool_id,
+ static_cast<std::int64_t>(m_scratch_size[1]) *
+ (std::min(
+ static_cast<std::int64_t>(Cuda().concurrency() /
+ (m_team_size * m_vector_size)),
+ static_cast<std::int64_t>(m_league_size))));
}
+ const int maxShmemPerBlock =
+ m_policy.space().cuda_device_prop().sharedMemPerBlock;
const int shmem_size_total = m_shmem_begin + m_shmem_size;
- if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size_total) {
- printf(
- "%i %i\n",
- m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock,
- shmem_size_total);
+ if (maxShmemPerBlock < shmem_size_total) {
+ printf("%i %i\n", maxShmemPerBlock, shmem_size_total);
Kokkos::Impl::throw_runtime_exception(std::string(
"Kokkos::Impl::ParallelFor< Cuda > insufficient shared memory"));
}
- if (int(m_team_size) >
- int(Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
- m_policy.space().impl_internal_space_instance(), attr,
- arg_functor, arg_policy.impl_vector_length(),
- arg_policy.team_scratch_size(0),
- arg_policy.thread_scratch_size(0)) /
- arg_policy.impl_vector_length())) {
+ if (m_team_size > arg_policy.team_size_max(arg_functor, ParallelForTag())) {
Kokkos::Impl::throw_runtime_exception(std::string(
"Kokkos::Impl::ParallelFor< Cuda > requested too large team size."));
}
if (m_scratch_pool_id >= 0) {
m_policy.space()
.impl_internal_space_instance()
- ->m_team_scratch_pool[m_scratch_pool_id] = 0;
+ ->release_team_scratch_space(m_scratch_pool_id);
}
}
};
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Cuda> {
+template <class CombinedFunctorReducerType, class... Properties>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>, Kokkos::Cuda> {
public:
- using Policy = TeamPolicy<Properties...>;
+ using Policy = TeamPolicy<Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
private:
using Member = typename Policy::member_type;
using WorkTag = typename Policy::work_tag;
using LaunchBounds = typename Policy::launch_bounds;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- WorkTag, void>::type;
-
- using Analysis =
- Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
- ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
- using value_type = typename Analysis::value_type;
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+ using value_type = typename ReducerType::value_type;
public:
using functor_type = FunctorType;
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::Cuda::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the reduction is performed.
+ // Within the reduction, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the reduction, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < sizeof(Kokkos::Cuda::size_type),
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>,
+ Kokkos::Cuda::size_type>;
using size_type = Cuda::size_type;
using reducer_type = ReducerType;
static constexpr bool UseShflReduction =
- (true && (Analysis::StaticValueSize != 0));
+ ReducerType::static_value_size() != 0;
private:
struct ShflReductionTag {};
// [ team shared space ]
//
- const FunctorType m_functor;
+ const CombinedFunctorReducerType m_functor_reducer;
const Policy m_policy;
- const ReducerType m_reducer;
const pointer_type m_result_ptr;
const bool m_result_ptr_device_accessible;
const bool m_result_ptr_host_accessible;
- size_type* m_scratch_space;
- size_type* m_scratch_flags;
- size_type* m_unified_space;
+ word_size_type* m_scratch_space;
+ // m_scratch_flags must be of type Cuda::size_type due to use of atomics
+ // for tracking metadata in Kokkos_Cuda_ReduceScan.hpp
+ Cuda::size_type* m_scratch_flags;
+ word_size_type* m_unified_space;
size_type m_team_begin;
size_type m_shmem_begin;
size_type m_shmem_size;
size_t m_scratch_size[2];
int m_scratch_pool_id = -1;
int32_t* m_scratch_locks;
+ size_t m_num_scratch_locks;
const size_type m_league_size;
int m_team_size;
const size_type m_vector_size;
template <class TagType>
__device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
const Member& member, reference_type update) const {
- m_functor(member, update);
+ m_functor_reducer.get_functor()(member, update);
}
template <class TagType>
__device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
const Member& member, reference_type update) const {
- m_functor(TagType(), member, update);
+ m_functor_reducer.get_functor()(TagType(), member, update);
}
public:
__device__ inline void operator()() const {
int64_t threadid = 0;
if (m_scratch_size[1] > 0) {
- threadid = cuda_get_scratch_index(m_league_size, m_scratch_locks);
+ threadid = cuda_get_scratch_index(m_league_size, m_scratch_locks,
+ m_num_scratch_locks);
}
using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
}
}
- __device__ inline void run(SHMEMReductionTag&, const int& threadid) const {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
- sizeof(size_type)>
- word_count(Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer)) /
- sizeof(size_type));
+ __device__ inline void run(SHMEMReductionTag, const int& threadid) const {
+ const integral_nonzero_constant<word_size_type,
+ ReducerType::static_value_size() /
+ sizeof(word_size_type)>
+ word_count(m_functor_reducer.get_reducer().value_size() /
+ sizeof(word_size_type));
reference_type value =
- final_reducer.init(kokkos_impl_cuda_shared_memory<size_type>() +
- threadIdx.y * word_count.value);
+ m_functor_reducer.get_reducer().init(reinterpret_cast<pointer_type>(
+ kokkos_impl_cuda_shared_memory<word_size_type>() +
+ threadIdx.y * word_count.value));
// Iterate this block through the league
const int int_league_size = (int)m_league_size;
bool do_final_reduction = true;
if (!zero_length)
do_final_reduction = cuda_single_inter_block_reduce_scan<false>(
- final_reducer, blockIdx.x, gridDim.x,
- kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+ m_functor_reducer.get_reducer(), blockIdx.x, gridDim.x,
+ kokkos_impl_cuda_shared_memory<word_size_type>(), m_scratch_space,
m_scratch_flags);
if (do_final_reduction) {
// This is the final block with the final result at the final threads'
// location
- size_type* const shared = kokkos_impl_cuda_shared_memory<size_type>() +
- (blockDim.y - 1) * word_count.value;
+ word_size_type* const shared =
+ kokkos_impl_cuda_shared_memory<word_size_type>() +
+ (blockDim.y - 1) * word_count.value;
size_type* const global =
m_result_ptr_device_accessible
- ? reinterpret_cast<size_type*>(m_result_ptr)
+ ? reinterpret_cast<word_size_type*>(m_result_ptr)
: (m_unified_space ? m_unified_space : m_scratch_space);
if (threadIdx.y == 0) {
- final_reducer.final(reinterpret_cast<value_type*>(shared));
+ m_functor_reducer.get_reducer().final(
+ reinterpret_cast<value_type*>(shared));
}
if (CudaTraits::WarpSize < word_count.value) {
__syncthreads();
+ } else {
+ // In the above call to final(), shared might have been updated by a
+ // single thread within a warp without synchronization. Synchronize
+ // threads within warp to avoid potential race condition.
+ __syncwarp(0xffffffff);
}
for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
}
__device__ inline void run(ShflReductionTag, const int& threadid) const {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
value_type value;
- final_reducer.init(&value);
+ m_functor_reducer.get_reducer().init(&value);
// Iterate this block through the league
const int int_league_size = (int)m_league_size;
: m_scratch_space);
value_type init;
- final_reducer.init(&init);
+ m_functor_reducer.get_reducer().init(&init);
if (int_league_size == 0) {
- final_reducer.final(&value);
+ m_functor_reducer.get_reducer().final(&value);
*result = value;
- } else if (Impl::cuda_inter_block_reduction(value, init, final_reducer,
- m_scratch_space, result,
- m_scratch_flags, blockDim.y)) {
+ } else if (Impl::cuda_inter_block_reduction(
+ value, init, m_functor_reducer.get_reducer(),
+ reinterpret_cast<pointer_type>(m_scratch_space), result,
+ m_scratch_flags, blockDim.y)) {
const unsigned id = threadIdx.y * blockDim.x + threadIdx.x;
if (id == 0) {
- final_reducer.final(&value);
+ m_functor_reducer.get_reducer().final(&value);
*result = value;
}
}
}
inline void execute() {
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
const bool is_empty_range = m_league_size == 0 || m_team_size == 0;
- const bool need_device_set = Analysis::has_init_member_function ||
- Analysis::has_final_member_function ||
+ const bool need_device_set = ReducerType::has_init_member_function() ||
+ ReducerType::has_final_member_function() ||
!m_result_ptr_host_accessible ||
-#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
Policy::is_graph_kernel::value ||
-#endif
!std::is_same<ReducerType, InvalidType>::value;
if (!is_empty_range || need_device_set) {
const int block_count = std::max(
1u, UseShflReduction ? std::min(m_league_size, size_type(1024 * 32))
: std::min(int(m_league_size), m_team_size));
- m_scratch_space = cuda_internal_scratch_space(
- m_policy.space(), Analysis::value_size(ReducerConditional::select(
- m_functor, m_reducer)) *
- block_count);
+ m_scratch_space =
+ reinterpret_cast<word_size_type*>(cuda_internal_scratch_space(
+ m_policy.space(),
+ m_functor_reducer.get_reducer().value_size() * block_count));
m_scratch_flags =
cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type));
- m_unified_space = cuda_internal_scratch_unified(
- m_policy.space(), Analysis::value_size(ReducerConditional::select(
- m_functor, m_reducer)));
+ m_unified_space =
+ reinterpret_cast<word_size_type*>(cuda_internal_scratch_unified(
+ m_policy.space(), m_functor_reducer.get_reducer().value_size()));
dim3 block(m_vector_size, m_team_size, 1);
dim3 grid(block_count, 1, 1);
CudaParallelLaunch<ParallelReduce, LaunchBounds>(
*this, grid, block, shmem_size_total,
- m_policy.space().impl_internal_space_instance(),
- true); // copy to device and execute
+ m_policy.space()
+ .impl_internal_space_instance()); // copy to device and execute
if (!m_result_ptr_device_accessible) {
m_policy.space().fence(
if (m_result_ptr) {
if (m_unified_space) {
- const int count = Analysis::value_count(
- ReducerConditional::select(m_functor, m_reducer));
+ const int count = m_functor_reducer.get_reducer().value_count();
for (int i = 0; i < count; ++i) {
m_result_ptr[i] = pointer_type(m_unified_space)[i];
}
} else {
- const int size = Analysis::value_size(
- ReducerConditional::select(m_functor, m_reducer));
- DeepCopy<HostSpace, CudaSpace>(m_result_ptr, m_scratch_space, size);
+ const int size = m_functor_reducer.get_reducer().value_size();
+ DeepCopy<HostSpace, CudaSpace, Cuda>(m_policy.space(), m_result_ptr,
+ m_scratch_space, size);
}
}
}
} else {
if (m_result_ptr) {
// TODO @graph We need to effectively insert this in to the graph
- final_reducer.init(m_result_ptr);
+ m_functor_reducer.get_reducer().init(m_result_ptr);
}
}
}
template <class ViewType>
- ParallelReduce(
- const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
- : m_functor(arg_functor),
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
m_policy(arg_policy),
- m_reducer(InvalidType()),
m_result_ptr(arg_result.data()),
m_result_ptr_device_accessible(
MemorySpaceAccess<Kokkos::CudaSpace,
m_league_size(arg_policy.league_size()),
m_team_size(arg_policy.team_size()),
m_vector_size(arg_policy.impl_vector_length()) {
- cudaFuncAttributes attr =
- CudaParallelLaunch<ParallelReduce,
- LaunchBounds>::get_cuda_func_attributes();
- m_team_size =
- m_team_size >= 0
- ? m_team_size
- : Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
- m_policy.space().impl_internal_space_instance(), attr,
- m_functor, m_vector_size, m_policy.team_scratch_size(0),
- m_policy.thread_scratch_size(0)) /
- m_vector_size;
+ auto internal_space_instance =
+ m_policy.space().impl_internal_space_instance();
+
+ if (m_team_size < 0) {
+ m_team_size = arg_policy.team_size_recommended(
+ arg_functor_reducer.get_functor(), arg_functor_reducer.get_reducer(),
+ ParallelReduceTag());
+ if (m_team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelReduce<Cuda, TeamPolicy> could not find a "
+ "valid execution configuration.");
+ }
m_team_begin =
UseShflReduction
? 0
- : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(arg_functor,
- m_team_size);
+ : cuda_single_inter_block_reduce_scan_shmem<false, WorkTag,
+ value_type>(
+ arg_functor_reducer.get_functor(), m_team_size);
m_shmem_begin = sizeof(double) * (m_team_size + 2);
- m_shmem_size =
- m_policy.scratch_size(0, m_team_size) +
- FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
- m_scratch_size[0] = m_shmem_size;
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
- m_scratch_locks =
- m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+ m_shmem_size = m_policy.scratch_size(0, m_team_size) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor_reducer.get_functor(), m_team_size);
+ m_scratch_size[0] = m_shmem_size;
+ m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+ m_scratch_locks = internal_space_instance->m_scratch_locks;
+ m_num_scratch_locks = internal_space_instance->m_num_scratch_locks;
if (m_team_size <= 0) {
m_scratch_ptr[1] = nullptr;
} else {
- auto scratch_ptr_id =
- m_policy.space()
- .impl_internal_space_instance()
- ->resize_team_scratch_space(
- static_cast<std::int64_t>(m_scratch_size[1]) *
- (std::min(
- static_cast<std::int64_t>(Cuda::concurrency() /
- (m_team_size * m_vector_size)),
- static_cast<std::int64_t>(m_league_size))));
- m_scratch_ptr[1] = scratch_ptr_id.first;
- m_scratch_pool_id = scratch_ptr_id.second;
+ m_scratch_pool_id = internal_space_instance->acquire_team_scratch_space();
+ m_scratch_ptr[1] = internal_space_instance->resize_team_scratch_space(
+ m_scratch_pool_id,
+ static_cast<std::int64_t>(m_scratch_size[1]) *
+ (std::min(
+ static_cast<std::int64_t>(Cuda().concurrency() /
+ (m_team_size * m_vector_size)),
+ static_cast<std::int64_t>(m_league_size))));
}
// The global parallel_reduce does not support vector_length other than 1 at
// Functor's reduce memory, team scan memory, and team shared memory depend
// upon team size.
+ const int maxShmemPerBlock =
+ m_policy.space().cuda_device_prop().sharedMemPerBlock;
const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
if (!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
std::string("Kokkos::Impl::ParallelReduce< Cuda > bad team size"));
}
- if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size_total) {
+ if (maxShmemPerBlock < shmem_size_total) {
Kokkos::Impl::throw_runtime_exception(
std::string("Kokkos::Impl::ParallelReduce< Cuda > requested too much "
"L0 scratch memory"));
}
if (int(m_team_size) >
- arg_policy.team_size_max(m_functor, m_reducer, ParallelReduceTag())) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< Cuda > requested too "
- "large team size."));
- }
- }
-
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_result_ptr_device_accessible(
- MemorySpaceAccess<Kokkos::CudaSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_result_ptr_host_accessible(
- MemorySpaceAccess<Kokkos::HostSpace,
- typename ReducerType::result_view_type::
- memory_space>::accessible),
- m_scratch_space(nullptr),
- m_scratch_flags(nullptr),
- m_unified_space(nullptr),
- m_team_begin(0),
- m_shmem_begin(0),
- m_shmem_size(0),
- m_scratch_ptr{nullptr, nullptr},
- m_league_size(arg_policy.league_size()),
- m_team_size(arg_policy.team_size()),
- m_vector_size(arg_policy.impl_vector_length()) {
- cudaFuncAttributes attr =
- CudaParallelLaunch<ParallelReduce,
- LaunchBounds>::get_cuda_func_attributes();
-
- // Valid team size not provided, deduce team size
- m_team_size =
- m_team_size >= 0
- ? m_team_size
- : Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
- m_policy.space().impl_internal_space_instance(), attr,
- m_functor, m_vector_size, m_policy.team_scratch_size(0),
- m_policy.thread_scratch_size(0)) /
- m_vector_size;
-
- m_team_begin =
- UseShflReduction
- ? 0
- : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
- WorkTag>(arg_functor,
- m_team_size);
- m_shmem_begin = sizeof(double) * (m_team_size + 2);
- m_shmem_size =
- m_policy.scratch_size(0, m_team_size) +
- FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
- m_scratch_size[0] = m_shmem_size;
- m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
- m_scratch_locks =
- m_policy.space().impl_internal_space_instance()->m_scratch_locks;
- if (m_team_size <= 0) {
- m_scratch_ptr[1] = nullptr;
- } else {
- auto scratch_ptr_id =
- m_policy.space()
- .impl_internal_space_instance()
- ->resize_team_scratch_space(
- static_cast<std::int64_t>(m_scratch_size[1]) *
- (std::min(
- static_cast<std::int64_t>(Cuda::concurrency() /
- (m_team_size * m_vector_size)),
- static_cast<std::int64_t>(m_league_size))));
- m_scratch_ptr[1] = scratch_ptr_id.first;
- m_scratch_pool_id = scratch_ptr_id.second;
- }
-
- // The global parallel_reduce does not support vector_length other than 1 at
- // the moment
- if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
- Impl::throw_runtime_exception(
- "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
- "greater than 1 is not currently supported for CUDA for dynamic "
- "sized reduction types.");
-
- if ((m_team_size < 32) && !UseShflReduction)
- Impl::throw_runtime_exception(
- "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
- "than 32 is not currently supported with CUDA for dynamic sized "
- "reduction types.");
-
- // Functor's reduce memory, team scan memory, and team shared memory depend
- // upon team size.
-
- const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
-
- if ((!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
- !UseShflReduction) ||
- m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
- shmem_size_total) {
- Kokkos::Impl::throw_runtime_exception(
- std::string("Kokkos::Impl::ParallelReduce< Cuda > bad team size"));
- }
-
- size_type team_size_max =
- Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
- m_policy.space().impl_internal_space_instance(), attr, m_functor,
- m_vector_size, m_policy.team_scratch_size(0),
- m_policy.thread_scratch_size(0)) /
- m_vector_size;
-
- if ((int)m_team_size > (int)team_size_max) {
+ arg_policy.team_size_max(m_functor_reducer.get_functor(),
+ m_functor_reducer.get_reducer(),
+ ParallelReduceTag())) {
Kokkos::Impl::throw_runtime_exception(
std::string("Kokkos::Impl::ParallelReduce< Cuda > requested too "
"large team size."));
if (m_scratch_pool_id >= 0) {
m_policy.space()
.impl_internal_space_instance()
- ->m_team_scratch_pool[m_scratch_pool_id] = 0;
+ ->release_team_scratch_space(m_scratch_pool_id);
}
}
};
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_REDUCESCAN_HPP
#define KOKKOS_CUDA_REDUCESCAN_HPP
__device__ inline void cuda_inter_warp_reduction(
ValueType& value, const ReducerType& reducer,
const int max_active_thread = blockDim.y) {
-#define STEP_WIDTH 4
- // Depending on the ValueType _shared__ memory must be aligned up to 8byte
- // boundaries The reason not to use ValueType directly is that for types with
+ constexpr int step_width = 4;
+ // Depending on the ValueType, __shared__ memory must be aligned up to 8byte
+ // boundaries. The reason not to use ValueType directly is that for types with
// constructors it could lead to race conditions
alignas(alignof(ValueType) > alignof(double) ? alignof(ValueType)
: alignof(double))
- __shared__ double sh_result[(sizeof(ValueType) + 7) / 8 * STEP_WIDTH];
+ __shared__ double sh_result[(sizeof(ValueType) + 7) / 8 * step_width];
ValueType* result = (ValueType*)&sh_result;
const int step = 32 / blockDim.x;
- int shift = STEP_WIDTH;
+ int shift = step_width;
const int id = threadIdx.y % step == 0 ? threadIdx.y / step : 65000;
- if (id < STEP_WIDTH) {
+ if (id < step_width) {
result[id] = value;
}
__syncthreads();
while (shift <= max_active_thread / step) {
- if (shift <= id && shift + STEP_WIDTH > id && threadIdx.x == 0) {
- reducer.join(&result[id % STEP_WIDTH], &value);
+ if (shift <= id && shift + step_width > id && threadIdx.x == 0) {
+ reducer.join(&result[id % step_width], &value);
}
__syncthreads();
- shift += STEP_WIDTH;
+ shift += step_width;
}
value = result[0];
- for (int i = 1; (i * step < max_active_thread) && i < STEP_WIDTH; i++)
+ for (int i = 1; (i * step < max_active_thread) && i < step_width; i++)
reducer.join(&value, &result[i]);
+ __syncthreads();
}
template <class ValueType, class ReducerType>
__device__ bool cuda_inter_block_reduction(
typename FunctorType::reference_type value,
typename FunctorType::reference_type neutral, const FunctorType& reducer,
- Cuda::size_type* const m_scratch_space,
+ typename FunctorType::pointer_type const m_scratch_space,
typename FunctorType::pointer_type const /*result*/,
Cuda::size_type* const m_scratch_flags,
const int max_active_thread = blockDim.y) {
// One thread in the block writes block result to global scratch_memory
if (id == 0) {
- pointer_type global = ((pointer_type)m_scratch_space) + blockIdx.x;
+ pointer_type global = m_scratch_space + blockIdx.x;
*global = value;
}
last_block = true;
value = neutral;
- pointer_type const volatile global = (pointer_type)m_scratch_space;
+ pointer_type const volatile global = m_scratch_space;
// Reduce all global values with splitting work over threads in one warp
const int step_size =
__syncwarp(mask);
for (int delta = skip_vector ? blockDim.x : 1; delta < width; delta *= 2) {
- if (lane_id + delta < 32) {
+ if ((lane_id + delta < 32) && (lane_id % (delta * 2) == 0)) {
functor.join(value, value + delta);
}
__syncwarp(mask);
// __launch_bounds__(maxThreadsPerBlock,minBlocksPerMultiprocessor)
// function qualifier which could be used to improve performance.
//----------------------------------------------------------------------------
-// Maximize shared memory and minimize L1 cache:
-// cudaFuncSetCacheConfig(MyKernel, cudaFuncCachePreferShared );
-// For 2.0 capability: 48 KB shared and 16 KB L1
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
/*
* Algorithmic constraints:
* (a) blockDim.y <= 1024
}
// Size in bytes required for inter block reduce or scan
-template <bool DoScan, class FunctorType, class ArgTag>
+template <bool DoScan, class ArgTag, class ValueType, class FunctorType>
inline std::enable_if_t<DoScan, unsigned>
cuda_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
const unsigned BlockSize) {
using Analysis =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- RangePolicy<Cuda, ArgTag>, FunctorType>;
+ RangePolicy<Cuda, ArgTag>, FunctorType, ValueType>;
return (BlockSize + 2) * Analysis::value_size(functor);
}
-template <bool DoScan, class FunctorType, class ArgTag>
+template <bool DoScan, class ArgTag, class ValueType, class FunctorType>
inline std::enable_if_t<!DoScan, unsigned>
cuda_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
const unsigned BlockSize) {
using Analysis =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- RangePolicy<Cuda, ArgTag>, FunctorType>;
+ RangePolicy<Cuda, ArgTag>, FunctorType, ValueType>;
return (BlockSize + 2) * Analysis::value_size(functor);
}
-template <typename WorkTag, typename Policy, typename FunctorType>
+template <typename WorkTag, typename ValueType, typename Policy,
+ typename FunctorType>
inline void check_reduced_view_shmem_size(const Policy& policy,
const FunctorType& functor) {
size_t minBlockSize = CudaTraits::WarpSize * 1;
unsigned reqShmemSize =
- cuda_single_inter_block_reduce_scan_shmem<false, FunctorType, WorkTag>(
+ cuda_single_inter_block_reduce_scan_shmem<false, WorkTag, ValueType>(
functor, minBlockSize);
- size_t maxShmemPerBlock =
- policy.space().impl_internal_space_instance()->m_maxShmemPerBlock;
+ size_t maxShmemPerBlock = policy.space().cuda_device_prop().sharedMemPerBlock;
if (reqShmemSize > maxShmemPerBlock) {
Kokkos::Impl::throw_runtime_exception(
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<
+ Kokkos::Cuda,
+ Impl::default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda> >;
+template class TaskQueueMultiple<
+ Kokkos::Cuda,
+ Impl::default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda> >;
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+#else
+void KOKKOS_CORE_SRC_CUDA_KOKKOS_CUDA_TASK_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) && defined( KOKKOS_ENABLE_TASKDAG \
+ ) */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_CUDA_TASK_HPP
#define KOKKOS_IMPL_CUDA_TASK_HPP
//----------------------------------------------------------------------------
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+
#if defined(__CUDA_ARCH__)
#define KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN(MSG) \
{ \
KOKKOS_INLINE_FUNCTION
static void iff_single_thread_recursive_execute(scheduler_type const&) {}
- static int get_max_team_count(execution_space const&) {
- return Kokkos::Impl::cuda_internal_multiprocessor_count() * warps_per_block;
+ static int get_max_team_count(execution_space const& space) {
+ return space.cuda_device_prop().multiProcessorCount * warps_per_block;
}
__device__ static void driver(scheduler_type scheduler,
}
}
+ // FIXME_CUDA_MULTIPLE_DEVICES
static void execute(scheduler_type const& scheduler) {
const int shared_per_warp = 2048;
- const dim3 grid(Kokkos::Impl::cuda_internal_multiprocessor_count(), 1, 1);
+ const Kokkos::Cuda& exec = scheduler.get_execution_space();
+ const auto& impl_instance = exec.impl_internal_space_instance();
+ const int multi_processor_count =
+ exec.cuda_device_prop().multiProcessorCount;
+ const dim3 grid(multi_processor_count, 1, 1);
const dim3 block(1, Kokkos::Impl::CudaTraits::WarpSize, warps_per_block);
const int shared_total = shared_per_warp * warps_per_block;
const cudaStream_t stream = nullptr;
// Query the stack size, in bytes:
size_t previous_stack_size = 0;
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaDeviceGetLimit(&previous_stack_size, cudaLimitStackSize));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(impl_instance->cuda_device_get_limit_wrapper(
+ &previous_stack_size, cudaLimitStackSize));
// If not large enough then set the stack size, in bytes:
const size_t larger_stack_size = 1 << 11;
if (previous_stack_size < larger_stack_size) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaDeviceSetLimit(cudaLimitStackSize, larger_stack_size));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(impl_instance->cuda_device_set_limit_wrapper(
+ cudaLimitStackSize, larger_stack_size));
}
cuda_task_queue_execute<<<grid, block, shared_total, stream>>>(
"Cuda>::execute: Post Task Execution");
if (previous_stack_size < larger_stack_size) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaDeviceSetLimit(cudaLimitStackSize, previous_stack_size));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(impl_instance->cuda_device_set_limit_wrapper(
+ cudaLimitStackSize, previous_stack_size));
}
}
<<<1, 1>>>(ptr_ptr, dtor_ptr);
KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetLastError());
+
Impl::cuda_device_synchronize(
"Kokkos::Impl::TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::"
"Cuda>::execute: Post Get Function Pointer for Tasks");
} while (1);
}
+ // FIXME_CUDA_MULTIPLE_DEVICES
static void execute(scheduler_type const& scheduler) {
const int shared_per_warp = 2048;
const int warps_per_block = 4;
- const dim3 grid(Kokkos::Impl::cuda_internal_multiprocessor_count(), 1, 1);
+ const Kokkos::Cuda exec = Cuda(); // FIXME_CUDA_MULTIPLE_DEVICES
+ const auto& impl_instance = exec.impl_internal_space_instance();
+ const int multi_processor_count =
+ // FIXME not sure why this didn't work
+ // exec.cuda_device_prop().multiProcessorCount;
+ impl_instance->m_deviceProp.multiProcessorCount;
+ const dim3 grid(multi_processor_count, 1, 1);
// const dim3 grid( 1 , 1 , 1 );
const dim3 block(1, Kokkos::Impl::CudaTraits::WarpSize, warps_per_block);
const int shared_total = shared_per_warp * warps_per_block;
// Query the stack size, in bytes:
size_t previous_stack_size = 0;
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaDeviceGetLimit(&previous_stack_size, cudaLimitStackSize));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(impl_instance->cuda_device_get_limit_wrapper(
+ &previous_stack_size, cudaLimitStackSize));
// If not large enough then set the stack size, in bytes:
const size_t larger_stack_size = 2048;
if (previous_stack_size < larger_stack_size) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaDeviceSetLimit(cudaLimitStackSize, larger_stack_size));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(impl_instance->cuda_device_set_limit_wrapper(
+ cudaLimitStackSize, larger_stack_size));
}
cuda_task_queue_execute<<<grid, block, shared_total, stream>>>(
"Kokkos::Cuda>::execute: Post Execute Task");
if (previous_stack_size < larger_stack_size) {
- KOKKOS_IMPL_CUDA_SAFE_CALL(
- cudaDeviceSetLimit(cudaLimitStackSize, previous_stack_size));
+ KOKKOS_IMPL_CUDA_SAFE_CALL(impl_instance->cuda_device_set_limit_wrapper(
+ cudaLimitStackSize, previous_stack_size));
}
}
private:
enum : int { WarpSize = Kokkos::Impl::CudaTraits::WarpSize };
- TaskExec(TaskExec&&) = delete;
- TaskExec(TaskExec const&) = delete;
- TaskExec& operator=(TaskExec&&) = delete;
+ TaskExec(TaskExec&&) = delete;
+ TaskExec(TaskExec const&) = delete;
+ TaskExec& operator=(TaskExec&&) = delete;
TaskExec& operator=(TaskExec const&) = delete;
friend class Kokkos::Impl::TaskQueue<
// Extract value_type from closure
using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
if (1 < loop_boundaries.thread.team_size()) {
// make sure all threads perform all loop iterations
// Extract value_type from closure
using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
if (1 < loop_boundaries.thread.team_size()) {
// make sure all threads perform all loop iterations
#undef KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+
#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
#endif /* #ifndef KOKKOS_IMPL_CUDA_TASK_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_TEAM_HPP
#define KOKKOS_CUDA_TEAM_HPP
public:
using execution_space = Kokkos::Cuda;
using scratch_memory_space = execution_space::scratch_memory_space;
+ using team_handle = CudaTeamMember;
private:
mutable void* m_team_reduce;
* ( 1 == blockDim.z )
*/
template <typename ReducerType>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer_v<ReducerType>>
team_reduce(ReducerType const& reducer) const noexcept {
team_reduce(reducer, reducer.reference());
}
template <typename ReducerType>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer_v<ReducerType>>
team_reduce(ReducerType const& reducer,
typename ReducerType::value_type& value) const noexcept {
(void)reducer;
(void)value;
+
+ KOKKOS_IF_ON_DEVICE((
+ typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE, TeamPolicy<Cuda>,
+ ReducerType, typename ReducerType::value_type>::Reducer
+ wrapped_reducer(reducer);
+
+ impl_team_reduce(wrapped_reducer, value); reducer.reference() = value;))
+ }
+
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer_v<WrappedReducerType>>
+ impl_team_reduce(
+ WrappedReducerType const& wrapped_reducer,
+ typename WrappedReducerType::value_type& value) const noexcept {
+ (void)wrapped_reducer;
+ (void)value;
+
KOKKOS_IF_ON_DEVICE(
- (typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- TeamPolicy<Cuda>, ReducerType>::Reducer
- wrapped_reducer(&reducer);
- cuda_intra_block_reduction(value, wrapped_reducer, blockDim.y);
- reducer.reference() = value;))
+ (cuda_intra_block_reduction(value, wrapped_reducer, blockDim.y);))
}
//--------------------------------------------------------------------------
Impl::CudaJoinFunctor<Type> cuda_join_functor;
typename Impl::FunctorAnalysis<
Impl::FunctorPatternInterface::SCAN, TeamPolicy<Cuda>,
- Impl::CudaJoinFunctor<Type>>::Reducer reducer(&cuda_join_functor);
+ Impl::CudaJoinFunctor<Type>, Type>::Reducer
+ reducer(cuda_join_functor);
Impl::cuda_intra_block_reduce_scan<true>(reducer, base_data + 1);
if (global_accum) {
//----------------------------------------
template <typename ReducerType>
- KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer<ReducerType>::value>
+ KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer_v<ReducerType>>
vector_reduce(ReducerType const& reducer) {
vector_reduce(reducer, reducer.reference());
}
template <typename ReducerType>
- KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer<ReducerType>::value>
+ KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer_v<ReducerType>>
vector_reduce(ReducerType const& reducer,
typename ReducerType::value_type& value) {
(void)reducer;
(void)value;
+
+ KOKKOS_IF_ON_DEVICE(
+ (typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE, TeamPolicy<Cuda>,
+ ReducerType, typename ReducerType::value_type>::Reducer
+ wrapped_reducer(reducer);
+
+ impl_vector_reduce(wrapped_reducer, value);
+ reducer.reference() = value;))
+ }
+
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+ is_reducer_v<WrappedReducerType>>
+ impl_vector_reduce(WrappedReducerType const& wrapped_reducer,
+ typename WrappedReducerType::value_type& value) {
+ (void)wrapped_reducer;
+ (void)value;
+
KOKKOS_IF_ON_DEVICE(
(if (blockDim.x == 1) return;
// Intra vector lane shuffle reduction:
- typename ReducerType::value_type tmp(value);
- typename ReducerType::value_type tmp2 = tmp;
+ typename WrappedReducerType::value_type tmp(value);
+ typename WrappedReducerType::value_type tmp2 = tmp;
unsigned mask =
blockDim.x == 32
for (int i = blockDim.x; (i >>= 1);) {
Impl::in_place_shfl_down(tmp2, tmp, i, blockDim.x, mask);
if ((int)threadIdx.x < i) {
- reducer.join(tmp, tmp2);
+ wrapped_reducer.join(&tmp, &tmp2);
}
}
// and thus different threads could have different results.
Impl::in_place_shfl(tmp2, tmp, 0, blockDim.x, mask);
- value = tmp2; reducer.reference() = tmp2;))
+ value = tmp2;))
}
//----------------------------------------
ThreadVectorRangeBoundariesStruct(const CudaTeamMember, index_type count)
: start(static_cast<index_type>(0)), end(count) {}
- KOKKOS_INLINE_FUNCTION
- ThreadVectorRangeBoundariesStruct(index_type count)
- : start(static_cast<index_type>(0)), end(count) {}
-
KOKKOS_INLINE_FUNCTION
ThreadVectorRangeBoundariesStruct(const CudaTeamMember, index_type arg_begin,
index_type arg_end)
: start(arg_begin), end(arg_end) {}
-
- KOKKOS_INLINE_FUNCTION
- ThreadVectorRangeBoundariesStruct(index_type arg_begin, index_type arg_end)
- : start(arg_begin), end(arg_end) {}
};
} // namespace Impl
parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
iType, Impl::CudaTeamMember>& loop_boundaries,
const Closure& closure, const ReducerType& reducer) {
- (void)loop_boundaries;
- (void)closure;
- (void)reducer;
KOKKOS_IF_ON_DEVICE(
- (typename ReducerType::value_type value;
+ (using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::CudaTeamMember::execution_space>,
+ ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
- reducer.init(value);
+ wrapped_reducer_type wrapped_reducer(reducer); value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start + threadIdx.y;
i < loop_boundaries.end; i += blockDim.y) { closure(i, value); }
- loop_boundaries.member.team_reduce(reducer, value);))
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value); reducer.reference() = value;))
+ // Avoid bogus warning about reducer value being uninitialized with combined
+ // reducers
+ KOKKOS_IF_ON_HOST(((void)loop_boundaries; (void)closure;
+ reducer.init(reducer.reference());
+ Kokkos::abort("Should only run on the device!");));
}
/** \brief Inter-thread parallel_reduce assuming summation.
(void)loop_boundaries;
(void)closure;
(void)result;
+
KOKKOS_IF_ON_DEVICE(
- (ValueType val; Kokkos::Sum<ValueType> reducer(val);
+ (using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::CudaTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
- reducer.init(reducer.reference());
+ wrapped_reducer_type wrapped_reducer(closure); value_type value{};
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start + threadIdx.y;
- i < loop_boundaries.end; i += blockDim.y) { closure(i, val); }
+ i < loop_boundaries.end; i += blockDim.y) { closure(i, value); }
- loop_boundaries.member.team_reduce(reducer, val);
- result = reducer.reference();))
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value); result = value;
+
+ ))
}
template <typename iType, class Closure>
parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
iType, Impl::CudaTeamMember>& loop_boundaries,
const Closure& closure, const ReducerType& reducer) {
- (void)loop_boundaries;
- (void)closure;
- (void)reducer;
- KOKKOS_IF_ON_DEVICE((typename ReducerType::value_type value;
- reducer.init(value);
-
- for (iType i = loop_boundaries.start +
- threadIdx.y * blockDim.x + threadIdx.x;
- i < loop_boundaries.end;
- i += blockDim.y * blockDim.x) { closure(i, value); }
-
- loop_boundaries.member.vector_reduce(reducer, value);
- loop_boundaries.member.team_reduce(reducer, value);))
+ KOKKOS_IF_ON_DEVICE(
+ (using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::CudaTeamMember::execution_space>,
+ ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer); value_type value;
+ wrapped_reducer.init(&value);
+
+ for (iType i =
+ loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
+ i < loop_boundaries.end;
+ i += blockDim.y * blockDim.x) { closure(i, value); }
+
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value); reducer.reference() = value;))
+
+ // Avoid bogus warning about reducer value being uninitialized with combined
+ // reducers
+ KOKKOS_IF_ON_HOST(((void)loop_boundaries; (void)closure;
+ reducer.init(reducer.reference());
+ Kokkos::abort("Should only run on the device!");));
}
template <typename iType, class Closure, typename ValueType>
(void)loop_boundaries;
(void)closure;
(void)result;
- KOKKOS_IF_ON_DEVICE((ValueType val; Kokkos::Sum<ValueType> reducer(val);
- reducer.init(reducer.reference());
+ KOKKOS_IF_ON_DEVICE(
+ (using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::CudaTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
- for (iType i = loop_boundaries.start +
- threadIdx.y * blockDim.x + threadIdx.x;
- i < loop_boundaries.end;
- i += blockDim.y * blockDim.x) { closure(i, val); }
+ wrapped_reducer_type wrapped_reducer(closure); value_type value;
+ wrapped_reducer.init(&value);
+
+ for (iType i =
+ loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
+ i < loop_boundaries.end;
+ i += blockDim.y * blockDim.x) { closure(i, value); }
+
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
- loop_boundaries.member.vector_reduce(reducer);
- loop_boundaries.member.team_reduce(reducer);
- result = reducer.reference();))
+ wrapped_reducer.final(&value); result = value;))
}
//----------------------------------------------------------------------------
parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
iType, Impl::CudaTeamMember> const& loop_boundaries,
Closure const& closure, ReducerType const& reducer) {
- (void)loop_boundaries;
- (void)closure;
- (void)reducer;
KOKKOS_IF_ON_DEVICE((
- reducer.init(reducer.reference());
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::CudaTeamMember::execution_space>,
+ ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer); value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start + threadIdx.x;
- i < loop_boundaries.end;
- i += blockDim.x) { closure(i, reducer.reference()); }
+ i < loop_boundaries.end; i += blockDim.x) { closure(i, value); }
- Impl::CudaTeamMember::vector_reduce(reducer);
+ Impl::CudaTeamMember::impl_vector_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value); reducer.reference() = value;
))
+ // Avoid bogus warning about reducer value being uninitialized with combined
+ // reducers
+ KOKKOS_IF_ON_HOST(((void)loop_boundaries; (void)closure;
+ reducer.init(reducer.reference());
+ Kokkos::abort("Should only run on the device!");));
}
/** \brief Intra-thread vector parallel_reduce.
(void)loop_boundaries;
(void)closure;
(void)result;
- KOKKOS_IF_ON_DEVICE(
- (result = ValueType();
- for (iType i = loop_boundaries.start + threadIdx.x;
- i < loop_boundaries.end; i += blockDim.x) { closure(i, result); }
+ KOKKOS_IF_ON_DEVICE((
- Impl::CudaTeamMember::vector_reduce(Kokkos::Sum<ValueType>(result));
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::CudaTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
- ))
+ wrapped_reducer_type wrapped_reducer(closure); value_type value;
+ wrapped_reducer.init(&value);
+
+ for (iType i = loop_boundaries.start + threadIdx.x;
+ i < loop_boundaries.end; i += blockDim.x) { closure(i, value); }
+
+ Impl::CudaTeamMember::impl_vector_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value); result = value;
+
+ ))
}
//----------------------------------------------------------------------------
* final == true.
*/
// This is the same code as in HIP and largely the same as in OpenMPTarget
-template <typename iType, typename FunctorType>
+template <typename iType, typename FunctorType, typename ValueType>
KOKKOS_INLINE_FUNCTION void parallel_scan(
const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
loop_bounds,
- const FunctorType& lambda) {
- // Extract value_type from lambda
- using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void,
- FunctorType>::value_type;
+ const FunctorType& lambda, ValueType& return_val) {
+ // Extract ValueType from the Functor
+ using functor_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+ static_assert(std::is_same<functor_value_type, ValueType>::value,
+ "Non-matching value types of functor and return type");
const auto start = loop_bounds.start;
const auto end = loop_bounds.end;
const auto team_size = member.team_size();
const auto team_rank = member.team_rank();
const auto nchunk = (end - start + team_size - 1) / team_size;
- value_type accum = 0;
+ ValueType accum = 0;
// each team has to process one or more chunks of the prefix scan
for (iType i = 0; i < nchunk; ++i) {
auto ii = start + i * team_size + team_rank;
// local accumulation for this chunk
- value_type local_accum = 0;
+ ValueType local_accum = 0;
// user updates value with prefix value
if (ii < loop_bounds.end) lambda(ii, local_accum, false);
// perform team scan
// broadcast last value to rest of the team
member.team_broadcast(accum, team_size - 1);
}
+ return_val = accum;
+}
+
+/** \brief Inter-thread parallel exclusive prefix sum.
+ *
+ * Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to each rank in the team (whose global rank is
+ * less than N) and a scan operation is performed. The last call to closure has
+ * final == true.
+ */
+template <typename iType, typename FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
+ loop_bounds,
+ const FunctorType& lambda) {
+ // Extract value_type from functor
+ using value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+
+ value_type dummy;
+ parallel_scan(loop_bounds, lambda, dummy);
}
//----------------------------------------------------------------------------
// exclusive scan -- the final accumulation
// of i's val will be included in the second
// closure call later.
- if (i < loop_boundaries.end && threadIdx.x > 0) {
+ if (i - 1 < loop_boundaries.end && threadIdx.x > 0) {
closure(i - 1, val, false);
}
Impl::in_place_shfl(accum, val, mask, blockDim.x, active_mask);
}
+ reducer.reference() = accum;
+
))
}
loop_boundaries,
const Closure& closure) {
using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
value_type dummy;
parallel_scan(loop_boundaries, closure, Kokkos::Sum<value_type>(dummy));
}
+/** \brief Intra-thread vector parallel exclusive prefix sum.
+ *
+ * Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to all vector lanes in the
+ * thread and a scan operation is performed.
+ * The last call to closure has final == true.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
+ loop_boundaries,
+ const Closure& closure, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using closure_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ ValueType>::value_type;
+ static_assert(std::is_same<closure_value_type, ValueType>::value,
+ "Non-matching value types of closure and return type");
+
+ ValueType accum;
+ parallel_scan(loop_boundaries, closure, Kokkos::Sum<ValueType>(accum));
+
+ return_val = accum;
+}
+
} // namespace Kokkos
namespace Kokkos {
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_UNIQUE_TOKEN_HPP
#define KOKKOS_CUDA_UNIQUE_TOKEN_HPP
#include <Kokkos_Macros.hpp>
#ifdef KOKKOS_ENABLE_CUDA
-#include <Kokkos_CudaSpace.hpp>
+#include <Cuda/Kokkos_CudaSpace.hpp>
#include <Kokkos_UniqueToken.hpp>
-#include <impl/Kokkos_SharedAlloc.hpp>
namespace Kokkos {
idx = idx % size();
}
#endif
-// Make sure that all writes in the previous lock owner are visible to me
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+ // Make sure that all writes in the previous lock owner are visible to me
desul::atomic_thread_fence(desul::MemoryOrderAcquire(),
desul::MemoryScopeDevice());
-#else
- Kokkos::memory_fence();
-#endif
return idx;
}
/// \brief release an acquired value
KOKKOS_INLINE_FUNCTION
void release(size_type idx) const noexcept {
-// Make sure my writes are visible to the next lock owner
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+ // Make sure my writes are visible to the next lock owner
desul::atomic_thread_fence(desul::MemoryOrderRelease(),
desul::MemoryScopeDevice());
-#else
- Kokkos::memory_fence();
-#endif
(void)Kokkos::atomic_exchange(&m_locks(idx), 0);
}
};
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_VECTORIZATION_HPP
#define KOKKOS_CUDA_VECTORIZATION_HPP
struct in_place_shfl_fn : in_place_shfl_op<in_place_shfl_fn> {
template <class T>
__device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(unsigned mask, T& val,
- int lane, int width) const
- noexcept {
+ int lane,
+ int width) const noexcept {
(void)mask;
(void)val;
(void)lane;
};
template <class... Args>
__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl(Args&&... args) noexcept {
- in_place_shfl_fn{}((Args &&) args...);
+ in_place_shfl_fn{}((Args&&)args...);
}
struct in_place_shfl_up_fn : in_place_shfl_op<in_place_shfl_up_fn> {
template <class T>
__device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(unsigned mask, T& val,
- int lane, int width) const
- noexcept {
+ int lane,
+ int width) const noexcept {
return __shfl_up_sync(mask, val, lane, width);
}
};
template <class... Args>
__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_up(
Args&&... args) noexcept {
- in_place_shfl_up_fn{}((Args &&) args...);
+ in_place_shfl_up_fn{}((Args&&)args...);
}
struct in_place_shfl_down_fn : in_place_shfl_op<in_place_shfl_down_fn> {
template <class T>
__device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(unsigned mask, T& val,
- int lane, int width) const
- noexcept {
+ int lane,
+ int width) const noexcept {
(void)mask;
(void)val;
(void)lane;
template <class... Args>
__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_down(
Args&&... args) noexcept {
- in_place_shfl_down_fn{}((Args &&) args...);
+ in_place_shfl_down_fn{}((Args&&)args...);
}
} // namespace Impl
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_EXPERIMENTAL_CUDA_VIEW_HPP
+#define KOKKOS_EXPERIMENTAL_CUDA_VIEW_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename ValueType, typename AliasType>
+struct CudaLDGFetch {
+ const ValueType* m_ptr;
+
+ template <typename iType>
+ KOKKOS_FUNCTION ValueType operator[](const iType& i) const {
+#if defined(KOKKOS_ARCH_KEPLER30) || defined(KOKKOS_ARCH_KEPLER32)
+ return m_ptr[i];
+#else
+ KOKKOS_IF_ON_DEVICE(
+ (AliasType v = __ldg(reinterpret_cast<const AliasType*>(&m_ptr[i]));
+ return *(reinterpret_cast<ValueType*>(&v));))
+ KOKKOS_IF_ON_HOST((return m_ptr[i];))
+#endif
+ }
+
+ KOKKOS_FUNCTION
+ operator const ValueType*() const { return m_ptr; }
+
+ KOKKOS_DEFAULTED_FUNCTION
+ CudaLDGFetch() = default;
+
+ KOKKOS_FUNCTION
+ explicit CudaLDGFetch(const ValueType* const arg_ptr) : m_ptr(arg_ptr) {}
+
+ KOKKOS_FUNCTION
+ CudaLDGFetch(CudaLDGFetch const rhs, size_t offset)
+ : m_ptr(rhs.m_ptr + offset) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/** \brief Replace Default ViewDataHandle with CudaLDGFetch
+ * specialization if 'const' value type, CudaSpace and random access.
+ */
+template <class Traits>
+class ViewDataHandle<
+ Traits, std::enable_if_t<(
+ // Is Cuda memory space
+ (std::is_same<typename Traits::memory_space,
+ Kokkos::CudaSpace>::value ||
+ std::is_same<typename Traits::memory_space,
+ Kokkos::CudaUVMSpace>::value) &&
+ // Is a trivial const value of 4, 8, or 16 bytes
+ std::is_trivial<typename Traits::const_value_type>::value &&
+ std::is_same<typename Traits::const_value_type,
+ typename Traits::value_type>::value &&
+ (sizeof(typename Traits::const_value_type) == 4 ||
+ sizeof(typename Traits::const_value_type) == 8 ||
+ sizeof(typename Traits::const_value_type) == 16) &&
+ // Random access trait
+ (Traits::memory_traits::is_random_access != 0))>> {
+ public:
+ using track_type = Kokkos::Impl::SharedAllocationTracker;
+
+ using value_type = typename Traits::const_value_type;
+ using return_type = typename Traits::const_value_type; // NOT a reference
+
+ using alias_type = std::conditional_t<
+ (sizeof(value_type) == 4), int,
+ std::conditional_t<
+ (sizeof(value_type) == 8), ::int2,
+ std::conditional_t<(sizeof(value_type) == 16), ::int4, void>>>;
+
+ using handle_type = Kokkos::Impl::CudaLDGFetch<value_type, alias_type>;
+
+ KOKKOS_INLINE_FUNCTION
+ static handle_type const& assign(handle_type const& arg_handle,
+ track_type const& /* arg_tracker */) {
+ return arg_handle;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static handle_type const assign(handle_type const& arg_handle,
+ size_t offset) {
+ return handle_type(arg_handle, offset);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static handle_type assign(value_type* arg_data_ptr,
+ track_type const& /*arg_tracker*/) {
+ if (arg_data_ptr == nullptr) return handle_type();
+ return handle_type(arg_data_ptr);
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
+#endif /* #ifndef KOKKOS_CUDA_VIEW_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CUDA_WORKGRAPHPOLICY_HPP
#define KOKKOS_CUDA_WORKGRAPHPOLICY_HPP
-#include <Kokkos_Cuda.hpp>
+#include <Cuda/Kokkos_Cuda.hpp>
#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
namespace Kokkos {
Policy const& get_policy() const { return m_policy; }
__device__ inline void operator()() const noexcept {
- if (0 == (threadIdx.y % 16)) {
+ // The following makes most threads idle,
+ // which helps significantly with throughput due to reducing conflict rates
+ // on the work acquisition, updated based on perf experiments of the
+ // static Fibonacci experiment on Volta
+ if (0 == (threadIdx.y % 4)) {
// Spin until COMPLETED_TOKEN.
// END_TOKEN indicates no work is currently available.
exec_one<typename Policy::work_tag>(w);
m_policy.completed_work(w);
}
+// On pre-volta architectures we need a __syncwarp here to prevent
+// infinite loops depending on the scheduling order above
+#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
+ defined(KOKKOS_ARCH_PASCAL)
+ __syncwarp(__activemask());
+#endif
}
}
}
inline void execute() {
const int warps_per_block = 4;
- const dim3 grid(Kokkos::Impl::cuda_internal_multiprocessor_count(), 1, 1);
+ const int multi_processor_count =
+ m_policy.space().cuda_device_prop().multiProcessorCount;
+ const dim3 grid(multi_processor_count, 1, 1);
const dim3 block(1, Kokkos::Impl::CudaTraits::WarpSize, warps_per_block);
const int shared = 0;
Kokkos::Impl::CudaParallelLaunch<Self>(
- *this, grid, block, shared, Cuda().impl_internal_space_instance(),
- false);
+ *this, grid, block, shared, Cuda().impl_internal_space_instance());
}
inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#ifndef KOKKOS_CUDA_ZEROMEMSET_HPP
+#define KOKKOS_CUDA_ZEROMEMSET_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Cuda/Kokkos_Cuda.hpp>
+#include <impl/Kokkos_ZeroMemset_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct ZeroMemset<Kokkos::Cuda> {
+ ZeroMemset(const Kokkos::Cuda& exec_space_instance, void* dst, size_t cnt) {
+ KOKKOS_IMPL_CUDA_SAFE_CALL(
+ (exec_space_instance.impl_internal_space_instance()
+ ->cuda_memset_async_wrapper(dst, 0, cnt)));
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // !defined(KOKKOS_CUDA_ZEROMEMSET_HPP)
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_ABORT_HPP
+#define KOKKOS_CUDA_ABORT_HPP
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <cuda.h>
+
+extern "C" {
+/* Cuda runtime function, declared in <crt/device_runtime.h>
+ * Requires capability 2.x or better.
+ */
+[[noreturn]] __device__ void __assertfail(const void *message, const void *file,
+ unsigned int line,
+ const void *function,
+ size_t charsize);
+}
+
+namespace Kokkos {
+namespace Impl {
+
+[[noreturn]] __device__ static void cuda_abort(const char *const message) {
+ const char empty[] = "";
+
+ __assertfail((const void *)message, (const void *)empty, (unsigned int)0,
+ (const void *)empty, sizeof(char));
+}
+
+} // namespace Impl
+} // namespace Kokkos
+#else
+void KOKKOS_CORE_SRC_CUDA_ABORT_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
+#endif /* #ifndef KOKKOS_CUDA_ABORT_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <HIP/Kokkos_HIP.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <hip/hip_runtime_api.h>
+
+#include <iostream>
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+int HIP::concurrency() {
+#else
+int HIP::concurrency() const {
+#endif
+ return Impl::HIPInternal::concurrency();
+}
+
+int HIP::impl_is_initialized() {
+ return Impl::HIPInternal::singleton().is_initialized();
+}
+
+void HIP::impl_initialize(InitializationSettings const& settings) {
+ const std::vector<int>& visible_devices = Impl::get_visible_devices();
+ const int hip_device_id =
+ Impl::get_gpu(settings).value_or(visible_devices[0]);
+
+ Impl::HIPInternal::m_hipDev = hip_device_id;
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipGetDeviceProperties(&Impl::HIPInternal::m_deviceProp, hip_device_id));
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipSetDevice(hip_device_id));
+
+ // Check that we are running on the expected architecture. We print a warning
+ // instead of erroring out because AMD does not guarantee that gcnArchName
+ // will always contain the gfx flag.
+ if (Kokkos::show_warnings()) {
+ if (std::string_view arch_name =
+ Impl::HIPInternal::m_deviceProp.gcnArchName;
+ arch_name.find(KOKKOS_ARCH_AMD_GPU) != 0) {
+ std::cerr
+ << "Kokkos::HIP::initialize WARNING: running kernels compiled for "
+ << KOKKOS_ARCH_AMD_GPU << " on " << arch_name << " device.\n";
+ }
+ }
+
+ // Print a warning if the user did not select the right GFX942 architecture
+#ifdef KOKKOS_ARCH_AMD_GFX942
+ if ((Kokkos::show_warnings()) &&
+ (Impl::HIPInternal::m_deviceProp.integrated == 1)) {
+ std::cerr << "Kokkos::HIP::initialize WARNING: running kernels for MI300X "
+ "(discrete GPU) on a MI300A (APU).\n";
+ }
+#endif
+#ifdef KOKKOS_ARCH_AMD_GFX942_APU
+ if ((Kokkos::show_warnings()) &&
+ (Impl::HIPInternal::m_deviceProp.integrated == 0)) {
+ std::cerr << "Kokkos::HIP::initialize WARNING: running kernels for MI300A "
+ "(APU) on a MI300X (discrete GPU).\n";
+ }
+#endif
+
+ // theoretically on GFX 9XX GPUs, we can get 40 WF's / CU, but only can
+ // sustain 32 see
+ // https://github.com/ROCm/clr/blob/4d0b815d06751735e6a50fa46e913fdf85f751f0/hipamd/src/hip_platform.cpp#L362-L366
+ const int maxWavesPerCU =
+ Impl::HIPInternal::m_deviceProp.major <= 9 ? 32 : 64;
+ Impl::HIPInternal::m_maxThreadsPerSM =
+ maxWavesPerCU * Impl::HIPTraits::WarpSize;
+
+ // Init the array for used for arbitrarily sized atomics
+ desul::Impl::init_lock_arrays(); // FIXME
+
+ // Allocate a staging buffer for constant mem in pinned host memory
+ // and an event to avoid overwriting driver for previous kernel launches
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipHostMalloc((void**)&Impl::HIPInternal::constantMemHostStaging,
+ Impl::HIPTraits::ConstantMemoryUsage));
+
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipEventCreate(&Impl::HIPInternal::constantMemReusable));
+
+ hipStream_t singleton_stream;
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamCreate(&singleton_stream));
+ Impl::HIPInternal::singleton().initialize(singleton_stream);
+}
+
+void HIP::impl_finalize() {
+ (void)Impl::hip_global_unique_token_locks(true);
+
+ desul::Impl::finalize_lock_arrays(); // FIXME
+
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipEventDestroy(Impl::HIPInternal::constantMemReusable));
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipHostFree(Impl::HIPInternal::constantMemHostStaging));
+
+ Impl::HIPInternal::singleton().finalize();
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipStreamDestroy(Impl::HIPInternal::singleton().m_stream));
+}
+
+HIP::HIP()
+ : m_space_instance(&Impl::HIPInternal::singleton(),
+ [](Impl::HIPInternal*) {}) {
+ Impl::HIPInternal::singleton().verify_is_initialized(
+ "HIP instance constructor");
+}
+
+HIP::HIP(hipStream_t const stream, Impl::ManageStream manage_stream)
+ : m_space_instance(
+ new Impl::HIPInternal, [manage_stream](Impl::HIPInternal* ptr) {
+ ptr->finalize();
+ if (static_cast<bool>(manage_stream)) {
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamDestroy(ptr->m_stream));
+ }
+ delete ptr;
+ }) {
+ Impl::HIPInternal::singleton().verify_is_initialized(
+ "HIP instance constructor");
+ m_space_instance->initialize(stream);
+}
+
+KOKKOS_DEPRECATED HIP::HIP(hipStream_t const stream, bool manage_stream)
+ : HIP(stream,
+ manage_stream ? Impl::ManageStream::yes : Impl::ManageStream::no) {}
+
+void HIP::print_configuration(std::ostream& os, bool /*verbose*/) const {
+ os << "Device Execution Space:\n";
+ os << " KOKKOS_ENABLE_HIP: yes\n";
+
+ os << "HIP Options:\n";
+ os << " KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE: ";
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+ os << "yes\n";
+#else
+ os << "no\n";
+#endif
+
+ os << "\nRuntime Configuration:\n";
+
+ m_space_instance->print_configuration(os);
+}
+
+uint32_t HIP::impl_instance_id() const noexcept {
+ return m_space_instance->impl_get_instance_id();
+}
+void HIP::impl_static_fence(const std::string& name) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<HIP>(
+ name,
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ GlobalDeviceSynchronization,
+ [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceSynchronize()); });
+}
+
+void HIP::fence(const std::string& name) const {
+ m_space_instance->fence(name);
+}
+
+hipStream_t HIP::hip_stream() const { return m_space_instance->m_stream; }
+
+int HIP::hip_device() const { return impl_internal_space_instance()->m_hipDev; }
+
+hipDeviceProp_t const& HIP::hip_device_prop() {
+ return Impl::HIPInternal::singleton().m_deviceProp;
+}
+
+const char* HIP::name() { return "HIP"; }
+
+namespace Impl {
+
+int g_hip_space_factory_initialized = initialize_space_factory<HIP>("150_HIP");
+
+} // namespace Impl
+
+} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_HPP
+#define KOKKOS_HIP_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_Layout.hpp>
+#include <HIP/Kokkos_HIP_Space.hpp>
+
+#include <hip/hip_runtime_api.h>
+
+namespace Kokkos {
+namespace Impl {
+class HIPInternal;
+enum class ManageStream : bool { no, yes };
+} // namespace Impl
+/// \class HIP
+/// \brief Kokkos device for multicore processors in the host memory space.
+class HIP {
+ public:
+ //------------------------------------
+ //! \name Type declarations that all Kokkos devices must provide.
+ //@{
+
+ //! Tag this class as a kokkos execution space
+ using execution_space = HIP;
+ using memory_space = HIPSpace;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+
+ using array_layout = LayoutLeft;
+ using size_type = HIPSpace::size_type;
+
+ using scratch_memory_space = ScratchMemorySpace<HIP>;
+
+ HIP();
+
+ explicit HIP(hipStream_t stream) : HIP(stream, Impl::ManageStream::no) {}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ template <typename T = void>
+ KOKKOS_DEPRECATED_WITH_COMMENT(
+ "HIP execution space should be constructed explicitly.")
+ HIP(hipStream_t stream)
+ : HIP(stream) {}
+#endif
+
+ HIP(hipStream_t stream, Impl::ManageStream manage_stream);
+
+ KOKKOS_DEPRECATED HIP(hipStream_t stream, bool manage_stream);
+
+ //@}
+ //------------------------------------
+ //! \name Functions that all Kokkos devices must implement.
+ //@{
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION static int in_parallel() {
+#if defined(__HIP_DEVICE_COMPILE__)
+ return true;
+#else
+ return false;
+#endif
+ }
+#endif
+
+ /** \brief Wait until all dispatched functors complete.
+ *
+ * The parallel_for or parallel_reduce dispatch of a functor may return
+ * asynchronously, before the functor completes. This method does not return
+ * until all dispatched functors on this device have completed.
+ */
+ static void impl_static_fence(const std::string& name);
+
+ void fence(const std::string& name =
+ "Kokkos::HIP::fence(): Unnamed Instance Fence") const;
+
+ hipStream_t hip_stream() const;
+
+ /// \brief Print configuration information to the given output stream.
+ void print_configuration(std::ostream& os, bool verbose = false) const;
+
+ /// \brief Free any resources being consumed by the device.
+ static void impl_finalize();
+
+ /** \brief Initialize the device.
+ *
+ */
+ int hip_device() const;
+ static hipDeviceProp_t const& hip_device_prop();
+
+ static void impl_initialize(InitializationSettings const&);
+
+ static int impl_is_initialized();
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED static size_type detect_device_count() {
+ int count;
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGetDeviceCount(&count));
+ return count;
+ }
+#endif
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ static int concurrency();
+#else
+ int concurrency() const;
+#endif
+ static const char* name();
+
+ inline Impl::HIPInternal* impl_internal_space_instance() const {
+ return m_space_instance.get();
+ }
+
+ uint32_t impl_instance_id() const noexcept;
+
+ private:
+ friend bool operator==(HIP const& lhs, HIP const& rhs) {
+ return lhs.impl_internal_space_instance() ==
+ rhs.impl_internal_space_instance();
+ }
+ friend bool operator!=(HIP const& lhs, HIP const& rhs) {
+ return !(lhs == rhs);
+ }
+ Kokkos::Impl::HostSharedPtr<Impl::HIPInternal> m_space_instance;
+};
+
+namespace Impl {
+template <>
+struct MemorySpaceAccess<HIPSpace, HIP::scratch_memory_space> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = false };
+};
+} // namespace Impl
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<HIP> {
+ static constexpr DeviceType id = DeviceType::HIP;
+ static int device_id(const HIP& exec) { return exec.hip_device(); }
+};
+} // namespace Experimental
+} // namespace Tools
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_ABORT_HPP
+#define KOKKOS_HIP_ABORT_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <hip/hip_runtime.h>
+
+namespace Kokkos {
+namespace Impl {
+
+// The two keywords below are not contradictory. `noinline` is a
+// directive to the optimizer.
+[[noreturn]] __device__ __attribute__((noinline)) inline void hip_abort(
+ char const *msg) {
+ const char empty[] = "";
+ __assert_fail(msg, empty, 0, empty);
+ // This loop is never executed. It's intended to suppress warnings that the
+ // function returns, even though it does not. This is necessary because
+ // abort() is not marked as [[noreturn]], even though it does not return.
+ while (true)
+ ;
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_BLOCKSIZE_DEDUCTION_HPP
#define KOKKOS_HIP_BLOCKSIZE_DEDUCTION_HPP
#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
namespace Kokkos {
-namespace Experimental {
namespace Impl {
enum class BlockType { Max, Preferred };
HIPLaunchMechanism LaunchMechanism =
DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
unsigned get_preferred_blocksize_impl() {
- // FIXME_HIP - could be if constexpr for c++17
- if (!HIPParallelLaunch<DriverType, LaunchBounds,
- LaunchMechanism>::default_launchbounds()) {
+ if constexpr (!HIPParallelLaunch<DriverType, LaunchBounds,
+ LaunchMechanism>::default_launchbounds()) {
// use the user specified value
return LaunchBounds::maxTperB;
} else {
}
}
-// FIXME_HIP - entire function could be constexpr for c++17
template <typename DriverType, typename LaunchBounds = Kokkos::LaunchBounds<>,
HIPLaunchMechanism LaunchMechanism =
DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
-unsigned get_max_blocksize_impl() {
- // FIXME_HIP - could be if constexpr for c++17
- if (!HIPParallelLaunch<DriverType, LaunchBounds,
- LaunchMechanism>::default_launchbounds()) {
+constexpr unsigned get_max_blocksize_impl() {
+ if constexpr (!HIPParallelLaunch<DriverType, LaunchBounds,
+ LaunchMechanism>::default_launchbounds()) {
// use the user specified value
return LaunchBounds::maxTperB;
} else {
return HIPParallelLaunch<DriverType, LaunchBounds,
LaunchMechanism>::get_hip_func_attributes();
#else
- // FIXME_HIP - could be if constexpr for c++17
- if (!HIPParallelLaunch<DriverType, LaunchBounds,
- LaunchMechanism>::default_launchbounds()) {
+ if constexpr (!HIPParallelLaunch<DriverType, LaunchBounds,
+ LaunchMechanism>::default_launchbounds()) {
// for user defined, we *always* honor the request
return HIPParallelLaunch<DriverType, LaunchBounds,
LaunchMechanism>::get_hip_func_attributes();
} else {
- // FIXME_HIP - could be if constexpr for c++17
- if (BlockSize == BlockType::Max) {
+ if constexpr (BlockSize == BlockType::Max) {
return HIPParallelLaunch<
DriverType, Kokkos::LaunchBounds<HIPTraits::MaxThreadsPerBlock, 1>,
LaunchMechanism>::get_hip_func_attributes();
const unsigned min_waves_per_eu =
LaunchBounds::minBperSM ? LaunchBounds::minBperSM : 1;
const unsigned min_threads_per_sm = min_waves_per_eu * HIPTraits::WarpSize;
- const unsigned shmem_per_sm = hip_instance->m_shmemPerSM;
- unsigned block_size = tperb_reg;
+ const unsigned shmem_per_sm =
+ hip_instance->m_deviceProp.maxSharedMemoryPerMultiProcessor;
+ unsigned block_size = tperb_reg;
do {
unsigned total_shmem = f(block_size);
// find how many threads we can fit with this blocksize based on LDS usage
unsigned tperb_shmem = total_shmem > shmem_per_sm ? 0 : block_size;
- // FIXME_HIP - could be if constexpr for c++17
- if (BlockSize == BlockType::Max) {
+ if constexpr (BlockSize == BlockType::Max) {
// we want the maximum blocksize possible
// just wait until we get a case where we can fit the LDS per SM
if (tperb_shmem) return block_size;
}
} // namespace Impl
-} // namespace Experimental
} // namespace Kokkos
#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <HIP/Kokkos_HIP_DeepCopy.hpp>
+#include <HIP/Kokkos_HIP_Error.hpp> // HIP_SAFE_CALL
+#include <HIP/Kokkos_HIP.hpp>
+
+namespace Kokkos {
+namespace Impl {
+namespace {
+hipStream_t get_deep_copy_stream() {
+ static hipStream_t s = nullptr;
+ if (s == nullptr) {
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamCreate(&s));
+ }
+ return s;
+}
+} // namespace
+
+void DeepCopyHIP(void* dst, void const* src, size_t n) {
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyAsync(dst, src, n, hipMemcpyDefault));
+}
+
+void DeepCopyAsyncHIP(const HIP& instance, void* dst, void const* src,
+ size_t n) {
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipMemcpyAsync(dst, src, n, hipMemcpyDefault, instance.hip_stream()));
+}
+
+void DeepCopyAsyncHIP(void* dst, void const* src, size_t n) {
+ hipStream_t s = get_deep_copy_stream();
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyAsync(dst, src, n, hipMemcpyDefault, s));
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<HIP>(
+ "Kokkos::Impl::DeepCopyAsyncHIP: Post Deep Copy Fence on Deep-Copy "
+ "stream",
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ DeepCopyResourceSynchronization,
+ [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(s)); });
+}
+} // namespace Impl
+} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_DEEP_COPY_HPP
+#define KOKKOS_HIP_DEEP_COPY_HPP
+
+#include <HIP/Kokkos_HIP_Space.hpp>
+#include <HIP/Kokkos_HIP_Error.hpp> // HIP_SAFE_CALL
+
+#include <hip/hip_runtime_api.h>
+
+namespace Kokkos {
+namespace Impl {
+
+void DeepCopyHIP(void* dst, const void* src, size_t n);
+void DeepCopyAsyncHIP(const HIP& instance, void* dst, const void* src,
+ size_t n);
+void DeepCopyAsyncHIP(void* dst, const void* src, size_t n);
+
+template <class MemSpace>
+struct DeepCopy<MemSpace, HostSpace, HIP,
+ std::enable_if_t<is_hip_type_space<MemSpace>::value>> {
+ DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
+ DeepCopy(const HIP& instance, void* dst, const void* src, size_t n) {
+ DeepCopyAsyncHIP(instance, dst, src, n);
+ }
+};
+
+template <class MemSpace>
+struct DeepCopy<HostSpace, MemSpace, HIP,
+ std::enable_if_t<is_hip_type_space<MemSpace>::value>> {
+ DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
+ DeepCopy(const HIP& instance, void* dst, const void* src, size_t n) {
+ DeepCopyAsyncHIP(instance, dst, src, n);
+ }
+};
+
+template <class MemSpace1, class MemSpace2>
+struct DeepCopy<MemSpace1, MemSpace2, HIP,
+ std::enable_if_t<is_hip_type_space<MemSpace1>::value &&
+ is_hip_type_space<MemSpace2>::value>> {
+ DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
+ DeepCopy(const HIP& instance, void* dst, const void* src, size_t n) {
+ DeepCopyAsyncHIP(instance, dst, src, n);
+ }
+};
+
+template <class MemSpace1, class MemSpace2, class ExecutionSpace>
+struct DeepCopy<MemSpace1, MemSpace2, ExecutionSpace,
+ std::enable_if_t<is_hip_type_space<MemSpace1>::value &&
+ is_hip_type_space<MemSpace2>::value &&
+ !std::is_same<ExecutionSpace, HIP>::value>> {
+ inline DeepCopy(void* dst, const void* src, size_t n) {
+ DeepCopyHIP(dst, src, n);
+ }
+
+ inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+ size_t n) {
+ exec.fence(fence_string());
+ DeepCopyAsyncHIP(dst, src, n);
+ }
+
+ private:
+ static const std::string& fence_string() {
+ static const std::string string =
+ std::string("Kokkos::Impl::DeepCopy<") + MemSpace1::name() + "Space, " +
+ MemSpace2::name() +
+ "Space, ExecutionSpace>::DeepCopy: fence before copy";
+ return string;
+ }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<MemSpace, HostSpace, ExecutionSpace,
+ std::enable_if_t<is_hip_type_space<MemSpace>::value &&
+ !std::is_same<ExecutionSpace, HIP>::value>> {
+ inline DeepCopy(void* dst, const void* src, size_t n) {
+ DeepCopyHIP(dst, src, n);
+ }
+
+ inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+ size_t n) {
+ exec.fence(fence_string());
+ DeepCopyAsyncHIP(dst, src, n);
+ }
+
+ private:
+ static const std::string& fence_string() {
+ static const std::string string =
+ std::string("Kokkos::Impl::DeepCopy<") + MemSpace::name() +
+ "Space, HostSpace, ExecutionSpace>::DeepCopy: fence before copy";
+ return string;
+ }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<HostSpace, MemSpace, ExecutionSpace,
+ std::enable_if_t<is_hip_type_space<MemSpace>::value &&
+ !std::is_same<ExecutionSpace, HIP>::value>> {
+ inline DeepCopy(void* dst, const void* src, size_t n) {
+ DeepCopyHIP(dst, src, n);
+ }
+
+ inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+ size_t n) {
+ exec.fence(fence_string());
+ DeepCopyAsyncHIP(dst, src, n);
+ }
+
+ private:
+ static const std::string& fence_string() {
+ static const std::string string =
+ std::string("Kokkos::Impl::DeepCopy<HostSpace, ") + MemSpace::name() +
+ "Space, ExecutionSpace>::DeepCopy: fence before copy";
+ return string;
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_ERROR_HPP
+#define KOKKOS_HIP_ERROR_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+#include <hip/hip_runtime.h>
+
+namespace Kokkos {
+namespace Impl {
+
+void hip_internal_error_throw(hipError_t e, const char* name,
+ const char* file = nullptr, const int line = 0);
+
+inline void hip_internal_safe_call(hipError_t e, const char* name,
+ const char* file = nullptr,
+ const int line = 0) {
+ if (hipSuccess != e) {
+ hip_internal_error_throw(e, name, file, line);
+ }
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#define KOKKOS_IMPL_HIP_SAFE_CALL(call) \
+ Kokkos::Impl::hip_internal_safe_call(call, #call, __FILE__, __LINE__)
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_GRAPHNODEKERNEL_HPP
+#define KOKKOS_HIP_GRAPHNODEKERNEL_HPP
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>
+
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_Parallel_Reduce.hpp>
+#include <Kokkos_PointerOwnership.hpp>
+
+#include <HIP/Kokkos_HIP_GraphNode_Impl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename PolicyType, typename Functor, typename PatternTag,
+ typename... Args>
+class GraphNodeKernelImpl<Kokkos::HIP, PolicyType, Functor, PatternTag, Args...>
+ : public PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
+ Args..., Kokkos::HIP>::type {
+ public:
+ using Policy = PolicyType;
+ using graph_kernel = GraphNodeKernelImpl;
+ using base_t =
+ typename PatternImplSpecializationFromTag<PatternTag, Functor, Policy,
+ Args..., Kokkos::HIP>::type;
+
+ // TODO use the name and executionspace
+ template <typename PolicyDeduced, typename... ArgsDeduced>
+ GraphNodeKernelImpl(std::string label_, HIP const&, Functor arg_functor,
+ PolicyDeduced&& arg_policy, ArgsDeduced&&... args)
+ : base_t(std::move(arg_functor), (PolicyDeduced&&)arg_policy,
+ (ArgsDeduced&&)args...),
+ label(std::move(label_)) {}
+
+ template <typename PolicyDeduced>
+ GraphNodeKernelImpl(Kokkos::HIP const& exec_space, Functor arg_functor,
+ PolicyDeduced&& arg_policy)
+ : GraphNodeKernelImpl("[unlabeled]", exec_space, std::move(arg_functor),
+ (PolicyDeduced&&)arg_policy) {}
+
+ void set_hip_graph_ptr(hipGraph_t* arg_graph_ptr) {
+ m_graph_ptr = arg_graph_ptr;
+ }
+
+ void set_hip_graph_node_ptr(hipGraphNode_t* arg_node_ptr) {
+ m_graph_node_ptr = arg_node_ptr;
+ }
+
+ hipGraphNode_t* get_hip_graph_node_ptr() const { return m_graph_node_ptr; }
+
+ hipGraph_t const* get_hip_graph_ptr() const { return m_graph_ptr; }
+
+ Kokkos::ObservingRawPtr<base_t> allocate_driver_memory_buffer(
+ const HIP& exec) const {
+ KOKKOS_EXPECTS(m_driver_storage == nullptr);
+ std::string alloc_label =
+ label + " - GraphNodeKernel global memory functor storage";
+ m_driver_storage = std::shared_ptr<base_t>(
+ static_cast<base_t*>(
+ HIPSpace().allocate(exec, alloc_label.c_str(), sizeof(base_t))),
+ // FIXME_HIP Custom deletor should use same 'exec' as for allocation.
+ [alloc_label](base_t* ptr) {
+ HIPSpace().deallocate(alloc_label.c_str(), ptr, sizeof(base_t));
+ });
+ KOKKOS_ENSURES(m_driver_storage != nullptr);
+ return m_driver_storage.get();
+ }
+
+ auto get_driver_storage() const { return m_driver_storage; }
+
+ private:
+ Kokkos::ObservingRawPtr<const hipGraph_t> m_graph_ptr = nullptr;
+ Kokkos::ObservingRawPtr<hipGraphNode_t> m_graph_node_ptr = nullptr;
+ mutable std::shared_ptr<base_t> m_driver_storage = nullptr;
+ std::string label;
+};
+
+struct HIPGraphNodeAggregateKernel {
+ using graph_kernel = HIPGraphNodeAggregateKernel;
+
+ // Aggregates don't need a policy, but for the purposes of checking the static
+ // assertions about graph kernels,
+ struct Policy {
+ using is_graph_kernel = std::true_type;
+ };
+};
+
+template <typename KernelType,
+ typename Tag =
+ typename PatternTagFromImplSpecialization<KernelType>::type>
+struct get_graph_node_kernel_type
+ : type_identity<
+ GraphNodeKernelImpl<Kokkos::HIP, typename KernelType::Policy,
+ typename KernelType::functor_type, Tag>> {};
+
+template <typename KernelType>
+struct get_graph_node_kernel_type<KernelType, Kokkos::ParallelReduceTag>
+ : type_identity<GraphNodeKernelImpl<
+ Kokkos::HIP, typename KernelType::Policy,
+ CombinedFunctorReducer<typename KernelType::functor_type,
+ typename KernelType::reducer_type>,
+ Kokkos::ParallelReduceTag>> {};
+
+template <typename KernelType>
+auto* allocate_driver_storage_for_kernel(const HIP& exec,
+ KernelType const& kernel) {
+ using graph_node_kernel_t =
+ typename get_graph_node_kernel_type<KernelType>::type;
+ auto const& kernel_as_graph_kernel =
+ static_cast<graph_node_kernel_t const&>(kernel);
+
+ return kernel_as_graph_kernel.allocate_driver_memory_buffer(exec);
+}
+
+template <typename KernelType>
+auto const& get_hip_graph_from_kernel(KernelType const& kernel) {
+ using graph_node_kernel_t =
+ typename get_graph_node_kernel_type<KernelType>::type;
+ auto const& kernel_as_graph_kernel =
+ static_cast<graph_node_kernel_t const&>(kernel);
+ hipGraph_t const* graph_ptr = kernel_as_graph_kernel.get_hip_graph_ptr();
+ KOKKOS_EXPECTS(graph_ptr != nullptr);
+
+ return *graph_ptr;
+}
+
+template <typename KernelType>
+auto& get_hip_graph_node_from_kernel(KernelType const& kernel) {
+ using graph_node_kernel_t =
+ typename get_graph_node_kernel_type<KernelType>::type;
+ auto const& kernel_as_graph_kernel =
+ static_cast<graph_node_kernel_t const&>(kernel);
+ auto* graph_node_ptr = kernel_as_graph_kernel.get_hip_graph_node_ptr();
+ KOKKOS_EXPECTS(graph_node_ptr != nullptr);
+
+ return *graph_node_ptr;
+}
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_GRAPHNODE_IMPL_HPP
+#define KOKKOS_HIP_GRAPHNODE_IMPL_HPP
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>
+
+#include <HIP/Kokkos_HIP.hpp>
+
+namespace Kokkos {
+namespace Impl {
+template <>
+struct GraphNodeBackendSpecificDetails<Kokkos::HIP> {
+ hipGraphNode_t node = nullptr;
+
+ explicit GraphNodeBackendSpecificDetails() = default;
+
+ explicit GraphNodeBackendSpecificDetails(
+ _graph_node_is_root_ctor_tag) noexcept {}
+};
+
+template <typename Kernel, typename PredecessorRef>
+struct GraphNodeBackendDetailsBeforeTypeErasure<Kokkos::HIP, Kernel,
+ PredecessorRef> {
+ protected:
+ GraphNodeBackendDetailsBeforeTypeErasure(
+ Kokkos::HIP const &, Kernel &, PredecessorRef const &,
+ GraphNodeBackendSpecificDetails<Kokkos::HIP> &) noexcept {}
+
+ GraphNodeBackendDetailsBeforeTypeErasure(
+ Kokkos::HIP const &, _graph_node_is_root_ctor_tag,
+ GraphNodeBackendSpecificDetails<Kokkos::HIP> &) noexcept {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_GRAPH_IMPL_HPP
+#define KOKKOS_HIP_GRAPH_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>
+#include <impl/Kokkos_GraphNodeImpl.hpp>
+
+#include <HIP/Kokkos_HIP_GraphNodeKernel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+template <>
+class GraphImpl<Kokkos::HIP> {
+ public:
+ using node_details_t = GraphNodeBackendSpecificDetails<Kokkos::HIP>;
+ using root_node_impl_t =
+ GraphNodeImpl<Kokkos::HIP, Kokkos::Experimental::TypeErasedTag,
+ Kokkos::Experimental::TypeErasedTag>;
+ using aggregate_kernel_impl_t = HIPGraphNodeAggregateKernel;
+ using aggregate_node_impl_t =
+ GraphNodeImpl<Kokkos::HIP, aggregate_kernel_impl_t,
+ Kokkos::Experimental::TypeErasedTag>;
+
+ // Not movable or copyable; it spends its whole life as a shared_ptr in the
+ // Graph object.
+ GraphImpl() = delete;
+ GraphImpl(GraphImpl const&) = delete;
+ GraphImpl(GraphImpl&&) = delete;
+ GraphImpl& operator=(GraphImpl const&) = delete;
+ GraphImpl& operator=(GraphImpl&&) = delete;
+
+ ~GraphImpl();
+
+ explicit GraphImpl(Kokkos::HIP instance);
+
+ void add_node(std::shared_ptr<aggregate_node_impl_t> const& arg_node_ptr);
+
+ template <class NodeImpl>
+ void add_node(std::shared_ptr<NodeImpl> const& arg_node_ptr);
+
+ template <class NodeImplPtr, class PredecessorRef>
+ void add_predecessor(NodeImplPtr arg_node_ptr, PredecessorRef arg_pred_ref);
+
+ void submit(const Kokkos::HIP& exec);
+
+ Kokkos::HIP const& get_execution_space() const noexcept;
+
+ auto create_root_node_ptr();
+
+ template <class... PredecessorRefs>
+ auto create_aggregate_ptr(PredecessorRefs&&...);
+
+ void instantiate() {
+ KOKKOS_EXPECTS(!m_graph_exec);
+ constexpr size_t error_log_size = 256;
+ hipGraphNode_t error_node = nullptr;
+ char error_log[error_log_size];
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphInstantiate(
+ &m_graph_exec, m_graph, &error_node, error_log, error_log_size));
+ KOKKOS_ENSURES(m_graph_exec);
+ }
+
+ hipGraph_t hip_graph() { return m_graph; }
+ hipGraphExec_t hip_graph_exec() { return m_graph_exec; }
+
+ private:
+ Kokkos::HIP m_execution_space;
+ hipGraph_t m_graph = nullptr;
+ hipGraphExec_t m_graph_exec = nullptr;
+
+ // Store drivers for the kernel nodes that launch in global memory.
+ // This is required as lifetime of drivers must be bounded to this instance's
+ // lifetime.
+ std::vector<std::shared_ptr<void>> m_driver_storage;
+};
+
+inline GraphImpl<Kokkos::HIP>::~GraphImpl() {
+ m_execution_space.fence("Kokkos::GraphImpl::~GraphImpl: Graph Destruction");
+ KOKKOS_EXPECTS(m_graph);
+ if (m_graph_exec) {
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphExecDestroy(m_graph_exec));
+ }
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphDestroy(m_graph));
+}
+
+inline GraphImpl<Kokkos::HIP>::GraphImpl(Kokkos::HIP instance)
+ : m_execution_space(std::move(instance)) {
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphCreate(&m_graph, 0));
+}
+
+inline void GraphImpl<Kokkos::HIP>::add_node(
+ std::shared_ptr<aggregate_node_impl_t> const& arg_node_ptr) {
+ // All of the predecessors are just added as normal, so all we need to
+ // do here is add an empty node
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipGraphAddEmptyNode(&(arg_node_ptr->node_details_t::node), m_graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0));
+}
+
+// Requires NodeImplPtr is a shared_ptr to specialization of GraphNodeImpl
+// Also requires that the kernel has the graph node tag in its policy
+template <class NodeImpl>
+inline void GraphImpl<Kokkos::HIP>::add_node(
+ std::shared_ptr<NodeImpl> const& arg_node_ptr) {
+ static_assert(NodeImpl::kernel_type::Policy::is_graph_kernel::value);
+ KOKKOS_EXPECTS(arg_node_ptr);
+ // The Kernel launch from the execute() method has been shimmed to insert
+ // the node into the graph
+ auto& kernel = arg_node_ptr->get_kernel();
+ auto& node = static_cast<node_details_t*>(arg_node_ptr.get())->node;
+ KOKKOS_EXPECTS(!node);
+ kernel.set_hip_graph_ptr(&m_graph);
+ kernel.set_hip_graph_node_ptr(&node);
+ kernel.execute();
+ KOKKOS_ENSURES(node);
+ if (std::shared_ptr<void> tmp = kernel.get_driver_storage())
+ m_driver_storage.push_back(std::move(tmp));
+}
+
+// Requires PredecessorRef is a specialization of GraphNodeRef that has
+// already been added to this graph and NodeImpl is a specialization of
+// GraphNodeImpl that has already been added to this graph.
+template <class NodeImplPtr, class PredecessorRef>
+inline void GraphImpl<Kokkos::HIP>::add_predecessor(
+ NodeImplPtr arg_node_ptr, PredecessorRef arg_pred_ref) {
+ KOKKOS_EXPECTS(arg_node_ptr);
+ auto pred_ptr = GraphAccess::get_node_ptr(arg_pred_ref);
+ KOKKOS_EXPECTS(pred_ptr);
+
+ auto const& pred_node = pred_ptr->node_details_t::node;
+ KOKKOS_EXPECTS(pred_node);
+
+ auto const& node = arg_node_ptr->node_details_t::node;
+ KOKKOS_EXPECTS(node);
+
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipGraphAddDependencies(m_graph, &pred_node, &node, 1));
+}
+
+inline void GraphImpl<Kokkos::HIP>::submit(const Kokkos::HIP& exec) {
+ if (!m_graph_exec) {
+ instantiate();
+ }
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphLaunch(m_graph_exec, exec.hip_stream()));
+}
+
+inline Kokkos::HIP const& GraphImpl<Kokkos::HIP>::get_execution_space()
+ const noexcept {
+ return m_execution_space;
+}
+
+inline auto GraphImpl<Kokkos::HIP>::create_root_node_ptr() {
+ KOKKOS_EXPECTS(m_graph);
+ KOKKOS_EXPECTS(!m_graph_exec);
+ auto rv = std::make_shared<root_node_impl_t>(get_execution_space(),
+ _graph_node_is_root_ctor_tag{});
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphAddEmptyNode(&(rv->node_details_t::node),
+ m_graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0));
+ KOKKOS_ENSURES(rv->node_details_t::node);
+ return rv;
+}
+
+template <class... PredecessorRefs>
+inline auto GraphImpl<Kokkos::HIP>::create_aggregate_ptr(PredecessorRefs&&...) {
+ // The attachment to predecessors, which is all we really need, happens
+ // in the generic layer, which calls through to add_predecessor for
+ // each predecessor ref, so all we need to do here is create the (trivial)
+ // aggregate node.
+ return std::make_shared<aggregate_node_impl_t>(m_execution_space,
+ _graph_node_kernel_ctor_tag{},
+ aggregate_kernel_impl_t{});
+}
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_HALF_HPP_
#define KOKKOS_HIP_HALF_HPP_
#ifdef KOKKOS_IMPL_HALF_TYPE_DEFINED
#include <Kokkos_Half.hpp>
-#include <Kokkos_NumericTraits.hpp> // reduction_identity
+#include <Kokkos_ReductionIdentity.hpp>
namespace Kokkos {
namespace Experimental {
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_HALF_IMPL_TYPE_HPP_
+#define KOKKOS_HIP_HALF_IMPL_TYPE_HPP_
+
+#include <hip/hip_fp16.h>
+
+#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
+// Make sure no one else tries to define half_t
+#define KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_HIP_HALF_TYPE_DEFINED
+
+namespace Kokkos {
+namespace Impl {
+struct half_impl_t {
+ using type = __half;
+};
+} // namespace Impl
+} // namespace Kokkos
+#endif // KOKKOS_IMPL_HALF_TYPE_DEFINED
+#endif // KOKKOS_ENABLE_HIP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+/*--------------------------------------------------------------------------*/
+/* Kokkos interfaces */
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <HIP/Kokkos_HIP.hpp>
+#include <HIP/Kokkos_HIP_Space.hpp>
+#include <impl/Kokkos_CheckedIntegerOps.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+/*--------------------------------------------------------------------------*/
+/* Standard 'C' libraries */
+#include <stdlib.h>
+
+/* Standard 'C++' libraries */
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+__device__ __constant__ unsigned long kokkos_impl_hip_constant_memory_buffer
+ [Kokkos::Impl::HIPTraits::ConstantMemoryUsage / sizeof(unsigned long)];
+#endif
+
+namespace Kokkos {
+namespace Impl {
+Kokkos::View<uint32_t *, HIPSpace> hip_global_unique_token_locks(
+ bool deallocate) {
+ static Kokkos::View<uint32_t *, HIPSpace> locks =
+ Kokkos::View<uint32_t *, HIPSpace>();
+ if (!deallocate && locks.extent(0) == 0)
+ locks = Kokkos::View<uint32_t *, HIPSpace>(
+ "Kokkos::UniqueToken<HIP>::m_locks", HIPInternal::concurrency());
+ if (deallocate) locks = Kokkos::View<uint32_t *, HIPSpace>();
+ return locks;
+}
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+namespace {
+
+using ScratchGrain = Kokkos::HIP::size_type[Impl::HIPTraits::WarpSize];
+constexpr auto sizeScratchGrain = sizeof(ScratchGrain);
+
+std::size_t scratch_count(const std::size_t size) {
+ return (size + sizeScratchGrain - 1) / sizeScratchGrain;
+}
+
+} // namespace
+
+//----------------------------------------------------------------------------
+
+int HIPInternal::concurrency() {
+ static int const concurrency =
+ m_maxThreadsPerSM * m_deviceProp.multiProcessorCount;
+
+ return concurrency;
+}
+
+void HIPInternal::print_configuration(std::ostream &s) const {
+ s << "macro KOKKOS_ENABLE_HIP : defined" << '\n';
+#if defined(HIP_VERSION)
+ s << "macro HIP_VERSION = " << HIP_VERSION << " = version "
+ << HIP_VERSION_MAJOR << '.' << HIP_VERSION_MINOR << '.' << HIP_VERSION_PATCH
+ << '\n';
+#endif
+
+ s << "macro KOKKOS_ENABLE_ROCTHRUST : "
+#if defined(KOKKOS_ENABLE_ROCTHRUST)
+ << "defined\n";
+#else
+ << "undefined\n";
+#endif
+
+ s << "macro KOKKOS_ENABLE_IMPL_HIP_MALLOC_ASYNC: ";
+#ifdef KOKKOS_ENABLE_IMPL_HIP_MALLOC_ASYNC
+ s << "yes\n";
+#else
+ s << "no\n";
+#endif
+
+ for (int i : get_visible_devices()) {
+ hipDeviceProp_t hipProp;
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGetDeviceProperties(&hipProp, i));
+ std::string gpu_type = hipProp.integrated == 1 ? "APU" : "dGPU";
+
+ s << "Kokkos::HIP[ " << i << " ] "
+ << "gcnArch " << hipProp.gcnArchName << ", Total Global Memory: "
+ << ::Kokkos::Impl::human_memory_size(hipProp.totalGlobalMem)
+ << ", Shared Memory per Block: "
+ << ::Kokkos::Impl::human_memory_size(hipProp.sharedMemPerBlock)
+ << ", APU or dGPU: " << gpu_type
+ << ", Is Large Bar: " << hipProp.isLargeBar
+ << ", Supports Managed Memory: " << hipProp.managedMemory
+ << ", Wavefront Size: " << hipProp.warpSize;
+ if (m_hipDev == i) s << " : Selected";
+ s << '\n';
+ }
+}
+
+//----------------------------------------------------------------------------
+
+HIPInternal::~HIPInternal() {
+ if (m_scratchSpace || m_scratchFlags) {
+ std::cerr << "Kokkos::HIP ERROR: Failed to call "
+ "Kokkos::HIP::finalize()"
+ << std::endl;
+ std::cerr.flush();
+ }
+
+ m_scratchSpaceCount = 0;
+ m_scratchFlagsCount = 0;
+ m_scratchSpace = nullptr;
+ m_scratchFlags = nullptr;
+ m_stream = nullptr;
+}
+
+int HIPInternal::verify_is_initialized(const char *const label) const {
+ if (m_hipDev < 0) {
+ Kokkos::abort((std::string("Kokkos::HIP::") + label +
+ " : ERROR device not initialized\n")
+ .c_str());
+ }
+ return 0 <= m_hipDev;
+}
+
+uint32_t HIPInternal::impl_get_instance_id() const noexcept {
+ return m_instance_id;
+}
+HIPInternal &HIPInternal::singleton() {
+ static HIPInternal *self = nullptr;
+ if (!self) {
+ self = new HIPInternal();
+ }
+ return *self;
+}
+
+void HIPInternal::fence() const {
+ fence("Kokkos::HIPInternal::fence: Unnamed Internal Fence");
+}
+void HIPInternal::fence(const std::string &name) const {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::HIP>(
+ name,
+ Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
+ impl_get_instance_id()},
+ [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(m_stream)); });
+}
+
+void HIPInternal::initialize(hipStream_t stream) {
+ KOKKOS_EXPECTS(!is_initialized());
+
+ if (was_finalized)
+ Kokkos::abort("Calling HIP::initialize after HIP::finalize is illegal\n");
+
+ m_stream = stream;
+
+ //----------------------------------
+ // Multiblock reduction uses scratch flags for counters
+ // and scratch space for partial reduction values.
+ // Allocate some initial space. This will grow as needed.
+ {
+ // Maximum number of warps,
+ // at most one warp per thread in a warp for reduction.
+ unsigned int maxWarpCount =
+ m_deviceProp.maxThreadsPerBlock / Impl::HIPTraits::WarpSize;
+ if (Impl::HIPTraits::WarpSize < maxWarpCount) {
+ maxWarpCount = Impl::HIPTraits::WarpSize;
+ }
+
+ const unsigned reduce_block_count =
+ maxWarpCount * Impl::HIPTraits::WarpSize;
+
+ (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type));
+ (void)scratch_space(reduce_block_count * 16 * sizeof(size_type));
+ }
+
+ m_num_scratch_locks = concurrency();
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipMalloc(&m_scratch_locks, sizeof(int32_t) * m_num_scratch_locks));
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipMemset(m_scratch_locks, 0, sizeof(int32_t) * m_num_scratch_locks));
+}
+
+//----------------------------------------------------------------------------
+
+Kokkos::HIP::size_type *HIPInternal::scratch_space(const std::size_t size) {
+ if (verify_is_initialized("scratch_space") &&
+ m_scratchSpaceCount < scratch_count(size)) {
+ Kokkos::HIPSpace mem_space;
+
+ if (m_scratchSpace) {
+ mem_space.deallocate(m_scratchSpace,
+ m_scratchSpaceCount * sizeScratchGrain);
+ }
+
+ m_scratchSpaceCount = scratch_count(size);
+
+ std::size_t alloc_size =
+ multiply_overflow_abort(m_scratchSpaceCount, sizeScratchGrain);
+ m_scratchSpace = static_cast<size_type *>(
+ mem_space.allocate("Kokkos::InternalScratchSpace", alloc_size));
+ }
+
+ return m_scratchSpace;
+}
+
+Kokkos::HIP::size_type *HIPInternal::scratch_flags(const std::size_t size) {
+ if (verify_is_initialized("scratch_flags") &&
+ m_scratchFlagsCount < scratch_count(size)) {
+ Kokkos::HIPSpace mem_space;
+
+ if (m_scratchFlags) {
+ mem_space.deallocate(m_scratchFlags,
+ m_scratchFlagsCount * sizeScratchGrain);
+ }
+
+ m_scratchFlagsCount = scratch_count(size);
+
+ std::size_t alloc_size =
+ multiply_overflow_abort(m_scratchFlagsCount, sizeScratchGrain);
+ m_scratchFlags = static_cast<size_type *>(
+ mem_space.allocate("Kokkos::InternalScratchFlags", alloc_size));
+
+ // We only zero-initialize the allocation when we actually allocate.
+ // It's the responsibility of the features using scratch_flags,
+ // namely parallel_reduce and parallel_scan, to reset the used values to 0.
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipMemset(m_scratchFlags, 0, alloc_size));
+ }
+
+ return m_scratchFlags;
+}
+
+Kokkos::HIP::size_type *HIPInternal::stage_functor_for_execution(
+ void const *driver, std::size_t const size) const {
+ if (verify_is_initialized("scratch_functor") && m_scratchFunctorSize < size) {
+ Kokkos::HIPSpace device_mem_space;
+ Kokkos::HIPHostPinnedSpace host_mem_space;
+
+ if (m_scratchFunctor) {
+ device_mem_space.deallocate(m_scratchFunctor, m_scratchFunctorSize);
+ host_mem_space.deallocate(m_scratchFunctorHost, m_scratchFunctorSize);
+ }
+
+ m_scratchFunctorSize = size;
+
+ m_scratchFunctor = static_cast<size_type *>(device_mem_space.allocate(
+ "Kokkos::InternalScratchFunctor", m_scratchFunctorSize));
+ m_scratchFunctorHost = static_cast<size_type *>(host_mem_space.allocate(
+ "Kokkos::InternalScratchFunctorHost", m_scratchFunctorSize));
+ }
+
+ // When using HSA_XNACK=1, it is necessary to copy the driver to the host to
+ // ensure that the driver is not destroyed before the computation is done.
+ // Without this fix, all the atomic tests fail. It is not obvious that this
+ // problem is limited to HSA_XNACK=1 even if all the tests pass when
+ // HSA_XNACK=0. That's why we always copy the driver.
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(m_stream));
+ std::memcpy(m_scratchFunctorHost, driver, size);
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyAsync(m_scratchFunctor,
+ m_scratchFunctorHost, size,
+ hipMemcpyDefault, m_stream));
+
+ return m_scratchFunctor;
+}
+
+int HIPInternal::acquire_team_scratch_space() {
+ int current_team_scratch = 0;
+ int zero = 0;
+ while (!m_team_scratch_pool[current_team_scratch].compare_exchange_weak(
+ zero, 1, std::memory_order_release, std::memory_order_relaxed)) {
+ current_team_scratch = (current_team_scratch + 1) % m_n_team_scratch;
+ }
+
+ return current_team_scratch;
+}
+
+void *HIPInternal::resize_team_scratch_space(int scratch_pool_id,
+ std::int64_t bytes,
+ bool force_shrink) {
+ // Multiple ParallelFor/Reduce Teams can call this function at the same time
+ // and invalidate the m_team_scratch_ptr. We use a pool to avoid any race
+ // condition.
+ if (m_team_scratch_current_size[scratch_pool_id] == 0) {
+ m_team_scratch_current_size[scratch_pool_id] = bytes;
+ m_team_scratch_ptr[scratch_pool_id] =
+ Kokkos::kokkos_malloc<Kokkos::HIPSpace>(
+ "Kokkos::HIPSpace::TeamScratchMemory",
+ m_team_scratch_current_size[scratch_pool_id]);
+ }
+ if ((bytes > m_team_scratch_current_size[scratch_pool_id]) ||
+ ((bytes < m_team_scratch_current_size[scratch_pool_id]) &&
+ (force_shrink))) {
+ m_team_scratch_current_size[scratch_pool_id] = bytes;
+ m_team_scratch_ptr[scratch_pool_id] =
+ Kokkos::kokkos_realloc<Kokkos::HIPSpace>(
+ m_team_scratch_ptr[scratch_pool_id],
+ m_team_scratch_current_size[scratch_pool_id]);
+ }
+ return m_team_scratch_ptr[scratch_pool_id];
+}
+
+void HIPInternal::release_team_scratch_space(int scratch_pool_id) {
+ m_team_scratch_pool[scratch_pool_id] = 0;
+}
+
+//----------------------------------------------------------------------------
+
+void HIPInternal::finalize() {
+ this->fence("Kokkos::HIPInternal::finalize: fence on finalization");
+ was_finalized = true;
+
+ if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {
+ Kokkos::HIPSpace device_mem_space;
+
+ device_mem_space.deallocate(m_scratchFlags,
+ m_scratchSpaceCount * sizeScratchGrain);
+ device_mem_space.deallocate(m_scratchSpace,
+ m_scratchFlagsCount * sizeScratchGrain);
+
+ if (m_scratchFunctorSize > 0) {
+ device_mem_space.deallocate(m_scratchFunctor, m_scratchFunctorSize);
+ Kokkos::HIPHostPinnedSpace host_mem_space;
+ host_mem_space.deallocate(m_scratchFunctorHost, m_scratchFunctorSize);
+ }
+ }
+
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ if (m_team_scratch_current_size[i] > 0)
+ Kokkos::kokkos_free<Kokkos::HIPSpace>(m_team_scratch_ptr[i]);
+ }
+
+ m_scratchSpaceCount = 0;
+ m_scratchFlagsCount = 0;
+ m_scratchSpace = nullptr;
+ m_scratchFlags = nullptr;
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ m_team_scratch_current_size[i] = 0;
+ m_team_scratch_ptr[i] = nullptr;
+ }
+
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(m_scratch_locks));
+ m_scratch_locks = nullptr;
+ m_num_scratch_locks = 0;
+}
+
+int HIPInternal::m_hipDev = -1;
+int HIPInternal::m_maxThreadsPerSM = 0;
+
+hipDeviceProp_t HIPInternal::m_deviceProp;
+
+std::mutex HIPInternal::scratchFunctorMutex;
+unsigned long *HIPInternal::constantMemHostStaging = nullptr;
+hipEvent_t HIPInternal::constantMemReusable = nullptr;
+std::mutex HIPInternal::constantMemMutex;
+
+//----------------------------------------------------------------------------
+
+Kokkos::HIP::size_type hip_internal_multiprocessor_count() {
+ return HIPInternal::singleton().m_deviceProp.multiProcessorCount;
+}
+
+Kokkos::HIP::size_type *hip_internal_scratch_space(const HIP &instance,
+ const std::size_t size) {
+ return instance.impl_internal_space_instance()->scratch_space(size);
+}
+
+Kokkos::HIP::size_type *hip_internal_scratch_flags(const HIP &instance,
+ const std::size_t size) {
+ return instance.impl_internal_space_instance()->scratch_flags(size);
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+void hip_internal_error_throw(hipError_t e, const char *name, const char *file,
+ const int line) {
+ std::ostringstream out;
+ out << name << " error( " << hipGetErrorName(e)
+ << "): " << hipGetErrorString(e);
+ if (file) {
+ out << " " << file << ":" << line;
+ }
+ throw_runtime_exception(out.str());
+}
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+void Kokkos::Impl::create_HIP_instances(std::vector<HIP> &instances) {
+ for (int s = 0; s < int(instances.size()); s++) {
+ hipStream_t stream;
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamCreate(&stream));
+ instances[s] = HIP(stream, ManageStream::yes);
+ }
+}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+/*--------------------------------------------------------------------------*/
+
+#ifndef KOKKOS_HIP_INSTANCE_HPP
+#define KOKKOS_HIP_INSTANCE_HPP
+
+#include <HIP/Kokkos_HIP_Space.hpp>
+#include <HIP/Kokkos_HIP_Error.hpp>
+
+#include <atomic>
+#include <mutex>
+
+namespace Kokkos {
+namespace Impl {
+
+struct HIPTraits {
+#if defined(KOKKOS_ARCH_AMD_GFX906) || defined(KOKKOS_ARCH_AMD_GFX908) || \
+ defined(KOKKOS_ARCH_AMD_GFX90A) || defined(KOKKOS_ARCH_AMD_GFX940) || \
+ defined(KOKKOS_ARCH_AMD_GFX942) || defined(KOKKOS_ARCH_AMD_GFX942_APU)
+ static constexpr int WarpSize = 64;
+ static constexpr int WarpIndexMask = 0x003f; /* hexadecimal for 63 */
+ static constexpr int WarpIndexShift = 6; /* WarpSize == 1 << WarpShift*/
+#elif defined(KOKKOS_ARCH_AMD_GFX1030) || defined(KOKKOS_ARCH_AMD_GFX1100) || \
+ defined(KOKKOS_ARCH_AMD_GFX1103)
+ static constexpr int WarpSize = 32;
+ static constexpr int WarpIndexMask = 0x001f; /* hexadecimal for 31 */
+ static constexpr int WarpIndexShift = 5; /* WarpSize == 1 << WarpShift*/
+#endif
+ static constexpr int ConservativeThreadsPerBlock =
+ 256; // conservative fallback blocksize in case of spills
+ static constexpr int MaxThreadsPerBlock =
+ 1024; // the maximum we can fit in a block
+ static constexpr int ConstantMemoryUsage = 0x008000; /* 32k bytes */
+ static constexpr int KernelArgumentLimit = 0x001000; /* 4k bytes */
+ static constexpr int ConstantMemoryUseThreshold = 0x000200; /* 512 bytes */
+};
+
+//----------------------------------------------------------------------------
+
+HIP::size_type hip_internal_multiprocessor_count();
+
+HIP::size_type *hip_internal_scratch_space(const HIP &instance,
+ const std::size_t size);
+HIP::size_type *hip_internal_scratch_flags(const HIP &instance,
+ const std::size_t size);
+
+//----------------------------------------------------------------------------
+
+class HIPInternal {
+ private:
+ HIPInternal(const HIPInternal &);
+ HIPInternal &operator=(const HIPInternal &);
+
+ public:
+ using size_type = ::Kokkos::HIP::size_type;
+
+ static int m_hipDev;
+ static int m_maxThreadsPerSM;
+
+ static hipDeviceProp_t m_deviceProp;
+
+ static int concurrency();
+
+ // Scratch Spaces for Reductions
+ std::size_t m_scratchSpaceCount = 0;
+ std::size_t m_scratchFlagsCount = 0;
+ mutable std::size_t m_scratchFunctorSize = 0;
+
+ size_type *m_scratchSpace = nullptr;
+ size_type *m_scratchFlags = nullptr;
+ mutable size_type *m_scratchFunctor = nullptr;
+ mutable size_type *m_scratchFunctorHost = nullptr;
+ static std::mutex scratchFunctorMutex;
+
+ hipStream_t m_stream = nullptr;
+ uint32_t m_instance_id =
+ Kokkos::Tools::Experimental::Impl::idForInstance<HIP>(
+ reinterpret_cast<uintptr_t>(this));
+
+ // Team Scratch Level 1 Space
+ int m_n_team_scratch = 10;
+ mutable int64_t m_team_scratch_current_size[10] = {};
+ mutable void *m_team_scratch_ptr[10] = {};
+ mutable std::atomic_int m_team_scratch_pool[10] = {};
+ int32_t *m_scratch_locks = nullptr;
+ size_t m_num_scratch_locks = 0;
+
+ bool was_finalized = false;
+
+ // FIXME_HIP: these want to be per-device, not per-stream... use of 'static'
+ // here will break once there are multiple devices though
+ static unsigned long *constantMemHostStaging;
+ static hipEvent_t constantMemReusable;
+ static std::mutex constantMemMutex;
+
+ static HIPInternal &singleton();
+
+ int verify_is_initialized(const char *const label) const;
+
+ int is_initialized() const {
+ return nullptr != m_scratchSpace && nullptr != m_scratchFlags;
+ }
+
+ void initialize(hipStream_t stream);
+ void finalize();
+
+ void print_configuration(std::ostream &) const;
+
+ void fence() const;
+ void fence(const std::string &) const;
+
+ ~HIPInternal();
+
+ HIPInternal() = default;
+
+ // Resizing of reduction related scratch spaces
+ size_type *scratch_space(std::size_t const size);
+ size_type *scratch_flags(std::size_t const size);
+ size_type *stage_functor_for_execution(void const *driver,
+ std::size_t const size) const;
+ uint32_t impl_get_instance_id() const noexcept;
+ int acquire_team_scratch_space();
+ // Resizing of team level 1 scratch
+ void *resize_team_scratch_space(int scratch_pool_id, std::int64_t bytes,
+ bool force_shrink = false);
+ void release_team_scratch_space(int scratch_pool_id);
+};
+
+void create_HIP_instances(std::vector<HIP> &instances);
+} // namespace Impl
+
+namespace Experimental {
+// Partitioning an Execution Space: expects space and integer arguments for
+// relative weight
+// Customization point for backends
+// Default behavior is to return the passed in instance
+
+template <class... Args>
+std::vector<HIP> partition_space(const HIP &, Args...) {
+ static_assert(
+ (... && std::is_arithmetic_v<Args>),
+ "Kokkos Error: partitioning arguments must be integers or floats");
+
+ std::vector<HIP> instances(sizeof...(Args));
+ Kokkos::Impl::create_HIP_instances(instances);
+ return instances;
+}
+
+template <class T>
+std::vector<HIP> partition_space(const HIP &, std::vector<T> const &weights) {
+ static_assert(
+ std::is_arithmetic<T>::value,
+ "Kokkos Error: partitioning arguments must be integers or floats");
+
+ // We only care about the number of instances to create and ignore weights
+ // otherwise.
+ std::vector<HIP> instances(weights.size());
+ Kokkos::Impl::create_HIP_instances(instances);
+ return instances;
+}
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_KERNEL_LAUNCH_HPP
#define KOKKOS_HIP_KERNEL_LAUNCH_HPP
#include <HIP/Kokkos_HIP_Error.hpp>
#include <HIP/Kokkos_HIP_Instance.hpp>
-#include <Kokkos_HIP_Space.hpp>
-#include <HIP/Kokkos_HIP_Locks.hpp>
+#include <HIP/Kokkos_HIP_Space.hpp>
+
+#ifdef KOKKOS_IMPL_HIP_NATIVE_GRAPH
+#include <HIP/Kokkos_HIP_GraphNodeKernel.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp>
+#endif
// Must use global variable on the device with HIP-Clang
#ifdef __HIP__
kokkos_impl_hip_constant_memory_buffer[];
#else
__device__ __constant__ unsigned long kokkos_impl_hip_constant_memory_buffer
- [Kokkos::Experimental::Impl::HIPTraits::ConstantMemoryUsage /
- sizeof(unsigned long)];
+ [Kokkos::Impl::HIPTraits::ConstantMemoryUsage / sizeof(unsigned long)];
#endif
#endif
namespace Kokkos {
-namespace Experimental {
template <typename T>
inline __device__ T *kokkos_impl_hip_shared_memory() {
- extern __shared__ Kokkos::Experimental::HIPSpace::size_type sh[];
+ extern __shared__ Kokkos::HIPSpace::size_type sh[];
return (T *)sh;
}
-} // namespace Experimental
} // namespace Kokkos
namespace Kokkos {
-namespace Experimental {
namespace Impl {
// The hip_parallel_launch_*_memory code is identical to the cuda code
template <class DriverType>
__global__ static void hip_parallel_launch_local_memory(
- const DriverType *driver) {
- // FIXME_HIP driver() pass by copy
- driver->operator()();
+ const DriverType driver) {
+ driver();
}
template <class DriverType, unsigned int maxTperB, unsigned int minBperSM>
__global__ __launch_bounds__(
maxTperB,
minBperSM) static void hip_parallel_launch_local_memory(const DriverType
- *driver) {
- // FIXME_HIP driver() pass by copy
- driver->operator()();
+ driver) {
+ driver();
}
template <typename DriverType>
light_weight = Kokkos::Experimental::WorkItemProperty::HintLightWeight;
static constexpr Kokkos::Experimental::WorkItemProperty::HintHeavyWeight_t
heavy_weight = Kokkos::Experimental::WorkItemProperty::HintHeavyWeight;
+ static constexpr Kokkos::Experimental::WorkItemProperty::
+ ImplForceGlobalLaunch_t force_global_launch =
+ Kokkos::Experimental::WorkItemProperty::ImplForceGlobalLaunch;
static constexpr typename DriverType::Policy::work_item_property property =
typename DriverType::Policy::work_item_property();
// Kal<F<CMU CG LCG C C CG LG C G CG CG C C
// CMU<F G LCG G G G LG G G G CG G G
static constexpr HIPLaunchMechanism launch_mechanism =
- ((property & light_weight) == light_weight)
+ ((property & force_global_launch) == force_global_launch)
+ ? HIPLaunchMechanism::GlobalMemory
+ : ((property & light_weight) == light_weight)
? (sizeof(DriverType) < HIPTraits::KernelArgumentLimit
? HIPLaunchMechanism::LocalMemory
: HIPLaunchMechanism::GlobalMemory)
}
};
+//---------------------------------------------------------------//
+// Helper function //
+//---------------------------------------------------------------//
+inline bool is_empty_launch(dim3 const &grid, dim3 const &block) {
+ return (grid.x == 0) || ((block.x * block.y * block.z) == 0);
+}
+
//---------------------------------------------------------------//
// HIPParallelLaunchKernelFunc structure and its specializations //
//---------------------------------------------------------------//
using base_t = HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
HIPLaunchMechanism::LocalMemory>;
- static void invoke_kernel(DriverType const *driver, dim3 const &grid,
+ static void invoke_kernel(DriverType const &driver, dim3 const &grid,
dim3 const &block, int shmem,
HIPInternal const *hip_instance) {
(base_t::get_kernel_func())<<<grid, block, shmem, hip_instance->m_stream>>>(
driver);
}
+
+#ifdef KOKKOS_IMPL_HIP_NATIVE_GRAPH
+ static void create_parallel_launch_graph_node(
+ DriverType const &driver, dim3 const &grid, dim3 const &block, int shmem,
+ HIPInternal const * /*hip_instance*/) {
+ auto const &graph = get_hip_graph_from_kernel(driver);
+ KOKKOS_EXPECTS(graph);
+ auto &graph_node = get_hip_graph_node_from_kernel(driver);
+ // Expect node not yet initialized
+ KOKKOS_EXPECTS(!graph_node);
+
+ if (!is_empty_launch(grid, block)) {
+ void const *args[] = {&driver};
+
+ hipKernelNodeParams params = {};
+
+ params.blockDim = block;
+ params.gridDim = grid;
+ params.sharedMemBytes = shmem;
+ params.func = (void *)base_t::get_kernel_func();
+ params.kernelParams = (void **)args;
+ params.extra = nullptr;
+
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphAddKernelNode(
+ &graph_node, graph, /* dependencies = */ nullptr,
+ /* numDependencies = */ 0, ¶ms));
+ } else {
+ // We still need an empty node for the dependency structure
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipGraphAddEmptyNode(&graph_node, graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0));
+ }
+ KOKKOS_ENSURES(graph_node);
+ }
+#endif
};
// HIPLaunchMechanism::GlobalMemory specialization
using base_t = HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
HIPLaunchMechanism::GlobalMemory>;
- // FIXME_HIP the code is different than cuda because driver cannot be passed
- // by copy
- static void invoke_kernel(DriverType const *driver, dim3 const &grid,
+ static void invoke_kernel(DriverType const &driver, dim3 const &grid,
dim3 const &block, int shmem,
HIPInternal const *hip_instance) {
+ // Wait until the previous kernel that uses m_scratchFuntor is done
+ std::lock_guard<std::mutex> lock(HIPInternal::scratchFunctorMutex);
+ DriverType *driver_ptr = reinterpret_cast<DriverType *>(
+ hip_instance->stage_functor_for_execution(
+ reinterpret_cast<void const *>(&driver), sizeof(DriverType)));
(base_t::get_kernel_func())<<<grid, block, shmem, hip_instance->m_stream>>>(
- driver);
+ driver_ptr);
+ }
+
+#ifdef KOKKOS_IMPL_HIP_NATIVE_GRAPH
+ static void create_parallel_launch_graph_node(
+ DriverType const &driver, dim3 const &grid, dim3 const &block, int shmem,
+ HIPInternal const *hip_instance) {
+ auto const &graph = get_hip_graph_from_kernel(driver);
+ KOKKOS_EXPECTS(graph);
+ auto &graph_node = get_hip_graph_node_from_kernel(driver);
+ // Expect node not yet initialized
+ KOKKOS_EXPECTS(!graph_node);
+
+ if (!Impl::is_empty_launch(grid, block)) {
+ auto *driver_ptr = Impl::allocate_driver_storage_for_kernel(
+ HIP(hip_instance->m_stream, ManageStream::no), driver);
+
+ // Unlike in the non-graph case, we can get away with doing an async copy
+ // here because the `DriverType` instance is held in the GraphNodeImpl
+ // which is guaranteed to be alive until the graph instance itself is
+ // destroyed, where there should be a fence ensuring that the allocation
+ // associated with this kernel on the device side isn't deleted.
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipMemcpyAsync(driver_ptr, &driver, sizeof(DriverType),
+ hipMemcpyDefault, hip_instance->m_stream));
+
+ void const *args[] = {&driver_ptr};
+
+ hipKernelNodeParams params = {};
+
+ params.blockDim = block;
+ params.gridDim = grid;
+ params.sharedMemBytes = shmem;
+ params.func = (void *)base_t::get_kernel_func();
+ params.kernelParams = (void **)args;
+ params.extra = nullptr;
+
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGraphAddKernelNode(
+ &graph_node, graph, /* dependencies = */ nullptr,
+ /* numDependencies = */ 0, ¶ms));
+ } else {
+ // We still need an empty node for the dependency structure
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipGraphAddEmptyNode(&graph_node, graph,
+ /* dependencies = */ nullptr,
+ /* numDependencies = */ 0));
+ }
+ KOKKOS_ENSURES(bool(graph_node))
}
+#endif
};
// HIPLaunchMechanism::ConstantMemory specializations
"Kokkos Error: Requested HIPLaunchConstantMemory with a "
"Functor larger than 32kB.");
- static void invoke_kernel(DriverType const *driver, dim3 const &grid,
+ static void invoke_kernel(DriverType const &driver, dim3 const &grid,
dim3 const &block, int shmem,
HIPInternal const *hip_instance) {
// Wait until the previous kernel that uses the constant buffer is done
// Copy functor (synchronously) to staging buffer in pinned host memory
unsigned long *staging = hip_instance->constantMemHostStaging;
- std::memcpy((void *)staging, (void *)driver, sizeof(DriverType));
+ std::memcpy(static_cast<void *>(staging),
+ static_cast<const void *>(&driver), sizeof(DriverType));
// Copy functor asynchronously from there to constant memory on the device
KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyToSymbolAsync(
LaunchMechanism>;
HIPParallelLaunch(const DriverType &driver, const dim3 &grid,
- const dim3 &block, const int shmem,
+ const dim3 &block, const unsigned int shmem,
const HIPInternal *hip_instance,
const bool /*prefer_shmem*/) {
if ((grid.x != 0) && ((block.x * block.y * block.z) != 0)) {
- if (hip_instance->m_maxShmemPerBlock < shmem) {
+ if (hip_instance->m_deviceProp.sharedMemPerBlock < shmem) {
Kokkos::Impl::throw_runtime_exception(
"HIPParallelLaunch FAILED: shared memory request is too large");
}
- KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE();
-
- std::lock_guard<std::mutex> const lock(hip_instance->m_mutexWorkArray);
+ desul::ensure_hip_lock_arrays_on_device();
// Invoke the driver function on the device
- DriverType *d_driver = reinterpret_cast<DriverType *>(
- hip_instance->get_next_driver(sizeof(DriverType)));
- std::memcpy((void *)d_driver, (void *)&driver, sizeof(DriverType));
- base_t::invoke_kernel(d_driver, grid, block, shmem, hip_instance);
+ base_t::invoke_kernel(driver, grid, block, shmem, hip_instance);
#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
KOKKOS_IMPL_HIP_SAFE_CALL(hipGetLastError());
hip_instance->fence(
- "Kokkos::Experimental::Impl::HIParallelLaunch: Debug Only Check for "
+ "Kokkos::Impl::HIParallelLaunch: Debug Only Check for "
"Execution Error");
#endif
}
// al.
template <typename DriverType, typename LaunchBounds = Kokkos::LaunchBounds<>,
HIPLaunchMechanism LaunchMechanism =
- DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
+ DeduceHIPLaunchMechanism<DriverType>::launch_mechanism,
+ bool DoGraph = DriverType::Policy::is_graph_kernel::value>
void hip_parallel_launch(const DriverType &driver, const dim3 &grid,
const dim3 &block, const int shmem,
const HIPInternal *hip_instance,
const bool prefer_shmem) {
+#ifdef KOKKOS_IMPL_HIP_NATIVE_GRAPH
+ if constexpr (DoGraph) {
+ // Graph launch
+ using base_t = HIPParallelLaunchKernelInvoker<DriverType, LaunchBounds,
+ LaunchMechanism>;
+ base_t::create_parallel_launch_graph_node(driver, grid, block, shmem,
+ hip_instance);
+ } else
+#endif
+ {
+ // Regular kernel launch
#ifndef KOKKOS_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS
- HIPParallelLaunch<DriverType, LaunchBounds, LaunchMechanism>(
- driver, grid, block, shmem, hip_instance, prefer_shmem);
-#else
- // FIXME_HIP - could be if constexpr for c++17
- if (!HIPParallelLaunch<DriverType, LaunchBounds,
- LaunchMechanism>::default_launchbounds()) {
- // for user defined, we *always* honor the request
HIPParallelLaunch<DriverType, LaunchBounds, LaunchMechanism>(
driver, grid, block, shmem, hip_instance, prefer_shmem);
- } else {
- // we can do what we like
- const unsigned flat_block_size = block.x * block.y * block.z;
- if (flat_block_size <= HIPTraits::ConservativeThreadsPerBlock) {
- // we have to use the large blocksize
- HIPParallelLaunch<
- DriverType,
- Kokkos::LaunchBounds<HIPTraits::ConservativeThreadsPerBlock, 1>,
- LaunchMechanism>(driver, grid, block, shmem, hip_instance,
- prefer_shmem);
+#else
+ if constexpr (!HIPParallelLaunch<DriverType, LaunchBounds,
+ LaunchMechanism>::default_launchbounds()) {
+ // for user defined, we *always* honor the request
+ HIPParallelLaunch<DriverType, LaunchBounds, LaunchMechanism>(
+ driver, grid, block, shmem, hip_instance, prefer_shmem);
} else {
- HIPParallelLaunch<DriverType,
- Kokkos::LaunchBounds<HIPTraits::MaxThreadsPerBlock, 1>,
- LaunchMechanism>(driver, grid, block, shmem,
- hip_instance, prefer_shmem);
+ // we can do what we like
+ const unsigned flat_block_size = block.x * block.y * block.z;
+ if (flat_block_size <= HIPTraits::ConservativeThreadsPerBlock) {
+ // we have to use the large blocksize
+ HIPParallelLaunch<
+ DriverType,
+ Kokkos::LaunchBounds<HIPTraits::ConservativeThreadsPerBlock, 1>,
+ LaunchMechanism>(driver, grid, block, shmem, hip_instance,
+ prefer_shmem);
+ } else {
+ HIPParallelLaunch<
+ DriverType, Kokkos::LaunchBounds<HIPTraits::MaxThreadsPerBlock, 1>,
+ LaunchMechanism>(driver, grid, block, shmem, hip_instance,
+ prefer_shmem);
+ }
}
- }
#endif
+ }
}
} // namespace Impl
-} // namespace Experimental
} // namespace Kokkos
#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_MDRANGEPOLICY_HPP_
+#define KOKKOS_HIP_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+
+template <>
+struct default_outer_direction<HIP> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+template <>
+struct default_inner_direction<HIP> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+namespace Impl {
+
+// Settings for MDRangePolicy
+template <>
+inline TileSizeProperties get_tile_size_properties<HIP>(const HIP& space) {
+ TileSizeProperties properties;
+ properties.max_threads =
+ space.impl_internal_space_instance()->m_maxThreadsPerSM;
+ properties.default_largest_tile_size = 16;
+ properties.default_tile_size = 4;
+ properties.max_total_tile_size = HIPTraits::MaxThreadsPerBlock;
+ return properties;
+}
+
+// Settings for TeamMDRangePolicy
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, HIP, ThreadAndVector>
+ : AcceleratorBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // Namespace Impl
+} // Namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_PARALLEL_FOR_MDRANGE_HPP
+#define KOKKOS_HIP_PARALLEL_FOR_MDRANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+#include <impl/KokkosExp_IterateTileGPU.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// ParallelFor
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>, HIP> {
+ public:
+ using Policy = Kokkos::MDRangePolicy<Traits...>;
+ using functor_type = FunctorType;
+
+ private:
+ using array_index_type = typename Policy::array_index_type;
+ using index_type = typename Policy::index_type;
+ using LaunchBounds = typename Policy::launch_bounds;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ public:
+ ParallelFor() = delete;
+ ParallelFor(ParallelFor const&) = default;
+ ParallelFor& operator=(ParallelFor const&) = delete;
+
+ inline __device__ void operator()() const {
+ Kokkos::Impl::DeviceIterateTile<Policy::rank, Policy, FunctorType,
+ typename Policy::work_tag>(m_policy,
+ m_functor)
+ .exec_range();
+ }
+
+ inline void execute() const {
+ using ClosureType = ParallelFor<FunctorType, Policy, HIP>;
+ if (m_policy.m_num_tiles == 0) return;
+ auto const maxblocks = m_policy.space().hip_device_prop().maxGridSize;
+ if (Policy::rank == 2) {
+ dim3 const block(m_policy.m_tile[0], m_policy.m_tile[1], 1);
+ dim3 const grid(
+ std::min<array_index_type>(
+ (m_policy.m_upper[0] - m_policy.m_lower[0] + block.x - 1) /
+ block.x,
+ maxblocks[0]),
+ std::min<array_index_type>(
+ (m_policy.m_upper[1] - m_policy.m_lower[1] + block.y - 1) /
+ block.y,
+ maxblocks[1]),
+ 1);
+ hip_parallel_launch<ClosureType, LaunchBounds>(
+ *this, grid, block, 0,
+ m_policy.space().impl_internal_space_instance(), false);
+ } else if (Policy::rank == 3) {
+ dim3 const block(m_policy.m_tile[0], m_policy.m_tile[1],
+ m_policy.m_tile[2]);
+ dim3 const grid(
+ std::min<array_index_type>(
+ (m_policy.m_upper[0] - m_policy.m_lower[0] + block.x - 1) /
+ block.x,
+ maxblocks[0]),
+ std::min<array_index_type>(
+ (m_policy.m_upper[1] - m_policy.m_lower[1] + block.y - 1) /
+ block.y,
+ maxblocks[1]),
+ std::min<array_index_type>(
+ (m_policy.m_upper[2] - m_policy.m_lower[2] + block.z - 1) /
+ block.z,
+ maxblocks[2]));
+ hip_parallel_launch<ClosureType, LaunchBounds>(
+ *this, grid, block, 0,
+ m_policy.space().impl_internal_space_instance(), false);
+ } else if (Policy::rank == 4) {
+ // id0,id1 encoded within threadIdx.x; id2 to threadIdx.y; id3 to
+ // threadIdx.z
+ dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
+ m_policy.m_tile[2], m_policy.m_tile[3]);
+ dim3 const grid(
+ std::min<array_index_type>(
+ m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
+ std::min<array_index_type>(
+ (m_policy.m_upper[2] - m_policy.m_lower[2] + block.y - 1) /
+ block.y,
+ maxblocks[1]),
+ std::min<array_index_type>(
+ (m_policy.m_upper[3] - m_policy.m_lower[3] + block.z - 1) /
+ block.z,
+ maxblocks[2]));
+ hip_parallel_launch<ClosureType, LaunchBounds>(
+ *this, grid, block, 0,
+ m_policy.space().impl_internal_space_instance(), false);
+ } else if (Policy::rank == 5) {
+ // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y; id4
+ // to threadIdx.z
+ dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
+ m_policy.m_tile[2] * m_policy.m_tile[3],
+ m_policy.m_tile[4]);
+ dim3 const grid(
+ std::min<array_index_type>(
+ m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
+ std::min<array_index_type>(
+ m_policy.m_tile_end[2] * m_policy.m_tile_end[3], maxblocks[1]),
+ std::min<array_index_type>(
+ (m_policy.m_upper[4] - m_policy.m_lower[4] + block.z - 1) /
+ block.z,
+ maxblocks[2]));
+ hip_parallel_launch<ClosureType, LaunchBounds>(
+ *this, grid, block, 0,
+ m_policy.space().impl_internal_space_instance(), false);
+ } else if (Policy::rank == 6) {
+ // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y;
+ // id4,id5 to threadIdx.z
+ dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
+ m_policy.m_tile[2] * m_policy.m_tile[3],
+ m_policy.m_tile[4] * m_policy.m_tile[5]);
+ dim3 const grid(
+ std::min<array_index_type>(
+ m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
+ std::min<array_index_type>(
+ m_policy.m_tile_end[2] * m_policy.m_tile_end[3], maxblocks[1]),
+ std::min<array_index_type>(
+ m_policy.m_tile_end[4] * m_policy.m_tile_end[5], maxblocks[2]));
+ hip_parallel_launch<ClosureType, LaunchBounds>(
+ *this, grid, block, 0,
+ m_policy.space().impl_internal_space_instance(), false);
+ } else {
+ Kokkos::abort("Kokkos::MDRange Error: Exceeded rank bounds with HIP\n");
+ }
+
+ } // end execute
+
+ ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ using closure_type =
+ ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>, HIP>;
+ unsigned block_size = hip_get_max_blocksize<closure_type, LaunchBounds>();
+ if (block_size == 0)
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelFor< HIP > could not find a valid "
+ "tile size."));
+ return block_size;
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_PARALLEL_FOR_RANGE_HPP
+#define KOKKOS_HIP_PARALLEL_FOR_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>, Kokkos::HIP> {
+ public:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+ using Member = typename Policy::member_type;
+ using WorkTag = typename Policy::work_tag;
+ using LaunchBounds = typename Policy::launch_bounds;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ template <class TagType>
+ inline __device__ std::enable_if_t<std::is_void<TagType>::value> exec_range(
+ const Member i) const {
+ m_functor(i);
+ }
+
+ template <class TagType>
+ inline __device__ std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+ const Member i) const {
+ m_functor(TagType(), i);
+ }
+
+ public:
+ using functor_type = FunctorType;
+
+ ParallelFor() = delete;
+ ParallelFor(ParallelFor const&) = default;
+ ParallelFor& operator=(ParallelFor const&) = delete;
+
+ inline __device__ void operator()() const {
+ const Member work_stride = blockDim.y * gridDim.x;
+ const Member work_end = m_policy.end();
+
+ for (Member iwork =
+ m_policy.begin() + threadIdx.y + blockDim.y * blockIdx.x;
+ iwork < work_end;
+ iwork = iwork < work_end - work_stride ? iwork + work_stride
+ : work_end) {
+ this->template exec_range<WorkTag>(iwork);
+ }
+ }
+
+ inline void execute() const {
+ const typename Policy::index_type nwork = m_policy.end() - m_policy.begin();
+
+ using DriverType = ParallelFor<FunctorType, Policy, Kokkos::HIP>;
+ const int block_size =
+ Kokkos::Impl::hip_get_preferred_blocksize<DriverType, LaunchBounds>();
+ const dim3 block(1, block_size, 1);
+ const dim3 grid(
+ typename Policy::index_type((nwork + block.y - 1) / block.y), 1, 1);
+
+ if (block_size == 0) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelFor< HIP > could not find a "
+ "valid execution configuration."));
+ }
+ Kokkos::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
+ *this, grid, block, 0, m_policy.space().impl_internal_space_instance(),
+ false);
+ }
+
+ ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_PARALLEL_FOR_TEAM_HPP
+#define KOKKOS_HIP_PARALLEL_FOR_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_Team.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <HIP/Kokkos_HIP_TeamPolicyInternal.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename FunctorType, typename... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>, HIP> {
+ public:
+ using Policy = TeamPolicy<Properties...>;
+ using functor_type = FunctorType;
+ using size_type = HIP::size_type;
+
+ private:
+ using member_type = typename Policy::member_type;
+ using work_tag = typename Policy::work_tag;
+ using launch_bounds = typename Policy::launch_bounds;
+
+ // Algorithmic constraints: blockDim.y is a power of two AND
+ // blockDim.y == blockDim.z == 1 shared memory utilization:
+ //
+ // [ team reduce space ]
+ // [ team shared space ]
+
+ FunctorType const m_functor;
+ Policy const m_policy;
+ size_type const m_league_size;
+ int m_team_size;
+ size_type const m_vector_size;
+ int m_shmem_begin;
+ int m_shmem_size;
+ void* m_scratch_ptr[2];
+ size_t m_scratch_size[2];
+ int m_scratch_pool_id = -1;
+ int32_t* m_scratch_locks;
+ size_t m_num_scratch_locks;
+
+ template <typename TagType>
+ __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
+ const member_type& member) const {
+ m_functor(member);
+ }
+
+ template <typename TagType>
+ __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
+ const member_type& member) const {
+ m_functor(TagType(), member);
+ }
+
+ public:
+ ParallelFor() = delete;
+ ParallelFor(ParallelFor const&) = default;
+ ParallelFor& operator=(ParallelFor const&) = delete;
+
+ __device__ inline void operator()() const {
+ // Iterate this block through the league
+ int64_t threadid = 0;
+ if (m_scratch_size[1] > 0) {
+ threadid = hip_get_scratch_index(m_league_size, m_scratch_locks,
+ m_num_scratch_locks);
+ }
+
+ int const int_league_size = static_cast<int>(m_league_size);
+ for (int league_rank = blockIdx.x; league_rank < int_league_size;
+ league_rank += gridDim.x) {
+ this->template exec_team<work_tag>(typename Policy::member_type(
+ kokkos_impl_hip_shared_memory<void>(), m_shmem_begin, m_shmem_size,
+ static_cast<void*>(static_cast<char*>(m_scratch_ptr[1]) +
+ ptrdiff_t(threadid / (blockDim.x * blockDim.y)) *
+ m_scratch_size[1]),
+ m_scratch_size[1], league_rank, m_league_size));
+ }
+ if (m_scratch_size[1] > 0) {
+ hip_release_scratch_index(m_scratch_locks, threadid);
+ }
+ }
+
+ inline void execute() const {
+ int64_t const shmem_size_total = m_shmem_begin + m_shmem_size;
+ dim3 const grid(static_cast<int>(m_league_size), 1, 1);
+ dim3 const block(static_cast<int>(m_vector_size),
+ static_cast<int>(m_team_size), 1);
+
+ using closure_type =
+ ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>, HIP>;
+ Impl::hip_parallel_launch<closure_type, launch_bounds>(
+ *this, grid, block, shmem_size_total,
+ m_policy.space().impl_internal_space_instance(),
+ true); // copy to device and execute
+ }
+
+ ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
+ : m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_league_size(arg_policy.league_size()),
+ m_team_size(arg_policy.team_size()),
+ m_vector_size(arg_policy.impl_vector_length()) {
+ auto internal_space_instance =
+ m_policy.space().impl_internal_space_instance();
+ if (m_team_size < 0) {
+ m_team_size =
+ arg_policy.team_size_recommended(arg_functor, ParallelForTag());
+ if (m_team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelFor<HIP, TeamPolicy> could not find a "
+ "valid execution configuration.");
+ }
+
+ m_shmem_begin = (sizeof(double) * (m_team_size + 2));
+ m_shmem_size =
+ (m_policy.scratch_size(0, m_team_size) +
+ FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
+ m_scratch_size[0] = m_policy.scratch_size(0, m_team_size);
+ m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+ m_scratch_locks = internal_space_instance->m_scratch_locks;
+ m_num_scratch_locks = internal_space_instance->m_num_scratch_locks;
+
+ // Functor's reduce memory, team scan memory, and team shared memory depend
+ // upon team size.
+ m_scratch_ptr[0] = nullptr;
+ if (m_team_size <= 0) {
+ m_scratch_ptr[1] = nullptr;
+ } else {
+ m_scratch_pool_id = internal_space_instance->acquire_team_scratch_space();
+ m_scratch_ptr[1] = internal_space_instance->resize_team_scratch_space(
+ m_scratch_pool_id,
+ static_cast<std::int64_t>(m_scratch_size[1]) *
+ (std::min(
+ static_cast<std::int64_t>(HIP().concurrency() /
+ (m_team_size * m_vector_size)),
+ static_cast<std::int64_t>(m_league_size))));
+ }
+
+ unsigned int const shmem_size_total = m_shmem_begin + m_shmem_size;
+ if (internal_space_instance->m_deviceProp.sharedMemPerBlock <
+ shmem_size_total) {
+ Kokkos::Impl::throw_runtime_exception(std::string(
+ "Kokkos::Impl::ParallelFor< HIP > insufficient shared memory"));
+ }
+
+ size_t max_size = arg_policy.team_size_max(arg_functor, ParallelForTag());
+ if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
+ Kokkos::Impl::throw_runtime_exception(std::string(
+ "Kokkos::Impl::ParallelFor< HIP > requested too large team size."));
+ }
+ }
+
+ ~ParallelFor() {
+ if (m_scratch_pool_id >= 0) {
+ m_policy.space()
+ .impl_internal_space_instance()
+ ->release_team_scratch_space(m_scratch_pool_id);
+ }
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_PARALLEL_REDUCE_MDRANGE_HPP
+#define KOKKOS_HIP_PARALLEL_REDUCE_MDRANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_ReduceScan.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+#include <impl/KokkosExp_IterateTileGPU.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// ParallelReduce
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>, HIP> {
+ public:
+ using Policy = Kokkos::MDRangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ private:
+ using array_index_type = typename Policy::array_index_type;
+ using index_type = typename Policy::index_type;
+
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+ using LaunchBounds = typename Policy::launch_bounds;
+
+ public:
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
+ using functor_type = FunctorType;
+ using reducer_type = ReducerType;
+ using size_type = HIP::size_type;
+
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::HIP::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the reduction is performed.
+ // Within the reduction, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the reduction, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ static_assert(sizeof(size_type) == 4);
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < 4,
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>, size_type>;
+
+ // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
+ // blockDim.z == 1
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy; // used for workrange and nwork
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+ word_size_type* m_scratch_space;
+ size_type* m_scratch_flags;
+
+ using DeviceIteratePattern = typename Kokkos::Impl::Reduce::DeviceIterateTile<
+ Policy::rank, Policy, FunctorType, WorkTag, reference_type>;
+
+ public:
+ inline __device__ void exec_range(reference_type update) const {
+ DeviceIteratePattern(m_policy, m_functor_reducer.get_functor(), update)
+ .exec_range();
+ }
+
+ inline __device__ void operator()() const {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ const integral_nonzero_constant<word_size_type,
+ ReducerType::static_value_size() /
+ sizeof(word_size_type)>
+ word_count(reducer.value_size() / sizeof(word_size_type));
+
+ {
+ reference_type value = reducer.init(reinterpret_cast<pointer_type>(
+ kokkos_impl_hip_shared_memory<word_size_type>() +
+ threadIdx.y * word_count.value));
+
+ // Number of blocks is bounded so that the reduction can be limited to two
+ // passes. Each thread block is given an approximately equal amount of
+ // work to perform. Accumulate the values for this block. The accumulation
+ // ordering does not match the final pass, but is arithmetically
+ // equivalent.
+
+ this->exec_range(value);
+ }
+
+ // Reduce with final value at blockDim.y - 1 location.
+ // Problem: non power-of-two blockDim
+ if (::Kokkos::Impl::hip_single_inter_block_reduce_scan<false>(
+ reducer, blockIdx.x, gridDim.x,
+ kokkos_impl_hip_shared_memory<word_size_type>(), m_scratch_space,
+ m_scratch_flags)) {
+ // This is the final block with the final result at the final threads'
+ // location
+ word_size_type* const shared =
+ kokkos_impl_hip_shared_memory<word_size_type>() +
+ (blockDim.y - 1) * word_count.value;
+ word_size_type* const global =
+ m_result_ptr_device_accessible
+ ? reinterpret_cast<word_size_type*>(m_result_ptr)
+ : m_scratch_space;
+
+ if (threadIdx.y == 0) {
+ reducer.final(reinterpret_cast<value_type*>(shared));
+ }
+
+ if (Impl::HIPTraits::WarpSize < word_count.value) {
+ __syncthreads();
+ }
+
+ for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+ global[i] = shared[i];
+ }
+ }
+ }
+
+ // Determine block size constrained by shared memory:
+ // This is copy/paste from Kokkos_HIP_Parallel_Range
+ inline unsigned local_block_size(const FunctorType& f) {
+ const auto& instance = m_policy.space().impl_internal_space_instance();
+ auto shmem_functor = [&f](unsigned n) {
+ return hip_single_inter_block_reduce_scan_shmem<false, WorkTag,
+ value_type>(f, n);
+ };
+
+ unsigned block_size =
+ Kokkos::Impl::hip_get_preferred_blocksize<ParallelReduce, LaunchBounds>(
+ instance, shmem_functor);
+ if (block_size == 0) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
+ "valid tile size."));
+ }
+ return block_size;
+ }
+
+ inline void execute() {
+ ReducerType reducer = m_functor_reducer.get_reducer();
+
+ const auto nwork = m_policy.m_num_tiles;
+ if (nwork) {
+ int block_size = m_policy.m_prod_tile_dims;
+ // CONSTRAINT: Algorithm requires block_size >= product of tile dimensions
+ // Nearest power of two
+ int exponent_pow_two = std::ceil(std::log2(block_size));
+ block_size = std::pow(2, exponent_pow_two);
+ int suggested_blocksize =
+ local_block_size(m_functor_reducer.get_functor());
+
+ block_size = (block_size > suggested_blocksize)
+ ? block_size
+ : suggested_blocksize; // Note: block_size must be less
+ // than or equal to 512
+
+ m_scratch_space =
+ reinterpret_cast<word_size_type*>(hip_internal_scratch_space(
+ m_policy.space(),
+ reducer.value_size() *
+ block_size /* block_size == max block_count */));
+ m_scratch_flags =
+ hip_internal_scratch_flags(m_policy.space(), sizeof(size_type));
+
+ // REQUIRED ( 1 , N , 1 )
+ const dim3 block(1, block_size, 1);
+ // Required grid.x <= block.y
+ const dim3 grid(std::min(static_cast<uint32_t>(block.y),
+ static_cast<uint32_t>(nwork)),
+ 1, 1);
+
+ const int shmem =
+ ::Kokkos::Impl::hip_single_inter_block_reduce_scan_shmem<
+ false, WorkTag, value_type>(m_functor_reducer.get_functor(),
+ block.y);
+
+ hip_parallel_launch<ParallelReduce, LaunchBounds>(
+ *this, grid, block, shmem,
+ m_policy.space().impl_internal_space_instance(),
+ false); // copy to device and execute
+
+ if (!m_result_ptr_device_accessible && m_result_ptr) {
+ const int size = reducer.value_size();
+ DeepCopy<HostSpace, HIPSpace, HIP>(m_policy.space(), m_result_ptr,
+ m_scratch_space, size);
+ }
+ } else {
+ if (m_result_ptr) {
+ reducer.init(m_result_ptr);
+ }
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result.data()),
+ m_result_ptr_device_accessible(
+ MemorySpaceAccess<HIPSpace,
+ typename ViewType::memory_space>::accessible),
+ m_scratch_space(nullptr),
+ m_scratch_flags(nullptr) {}
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ using closure_type = ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>, HIP>;
+ unsigned block_size = hip_get_max_blocksize<closure_type, LaunchBounds>();
+ if (block_size == 0) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
+ "valid tile size."));
+ }
+ return block_size;
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_PARALLEL_REDUCE_RANGE_HPP
+#define KOKKOS_HIP_PARALLEL_REDUCE_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_ReduceScan.hpp>
+#include <HIP/Kokkos_HIP_Shuffle_Reduce.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::HIP> {
+ public:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ private:
+ using WorkRange = typename Policy::WorkRange;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+ using LaunchBounds = typename Policy::launch_bounds;
+
+ public:
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
+ using functor_type = FunctorType;
+ using reducer_type = ReducerType;
+ using size_type = Kokkos::HIP::size_type;
+ using index_type = typename Policy::index_type;
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::HIP::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the scan is performed.
+ // Within the scan, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the scan, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < sizeof(size_type),
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>, size_type>;
+
+ // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
+ // blockDim.z == 1
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+ const bool m_result_ptr_host_accessible;
+ word_size_type* m_scratch_space = nullptr;
+ size_type* m_scratch_flags = nullptr;
+
+ static constexpr bool UseShflReduction = false;
+
+ private:
+ struct ShflReductionTag {};
+ struct SHMEMReductionTag {};
+
+ // Make the exec_range calls call to Reduce::DeviceIterateTile
+ template <class TagType>
+ __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
+ const Member& i, reference_type update) const {
+ m_functor_reducer.get_functor()(i, update);
+ }
+
+ template <class TagType>
+ __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+ const Member& i, reference_type update) const {
+ m_functor_reducer.get_functor()(TagType(), i, update);
+ }
+
+ public:
+ __device__ inline void operator()() const {
+ using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
+ SHMEMReductionTag>;
+ run(ReductionTag{});
+ }
+
+ __device__ inline void run(SHMEMReductionTag) const {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+ const integral_nonzero_constant<word_size_type,
+ ReducerType::static_value_size() /
+ sizeof(word_size_type)>
+ word_count(reducer.value_size() / sizeof(word_size_type));
+
+ {
+ reference_type value = reducer.init(reinterpret_cast<pointer_type>(
+ ::Kokkos::kokkos_impl_hip_shared_memory<word_size_type>() +
+ threadIdx.y * word_count.value));
+
+ // Number of blocks is bounded so that the reduction can be limited to two
+ // passes. Each thread block is given an approximately equal amount of
+ // work to perform. Accumulate the values for this block. The accumulation
+ // ordering does not match the final pass, but is arithmetically
+ // equivalent.
+
+ const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+ for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+ iwork < iwork_end; iwork += blockDim.y) {
+ this->template exec_range<WorkTag>(iwork, value);
+ }
+ }
+
+ // Reduce with final value at blockDim.y - 1 location.
+ // Shortcut for length zero reduction
+ bool do_final_reduction = m_policy.begin() == m_policy.end();
+ if (!do_final_reduction)
+ do_final_reduction = hip_single_inter_block_reduce_scan<false>(
+ reducer, blockIdx.x, gridDim.x,
+ ::Kokkos::kokkos_impl_hip_shared_memory<word_size_type>(),
+ m_scratch_space, m_scratch_flags);
+ if (do_final_reduction) {
+ // This is the final block with the final result at the final threads'
+ // location
+
+ word_size_type* const shared =
+ ::Kokkos::kokkos_impl_hip_shared_memory<word_size_type>() +
+ (blockDim.y - 1) * word_count.value;
+ word_size_type* const global =
+ m_result_ptr_device_accessible
+ ? reinterpret_cast<word_size_type*>(m_result_ptr)
+ : m_scratch_space;
+
+ if (threadIdx.y == 0) {
+ reducer.final(reinterpret_cast<value_type*>(shared));
+ }
+
+ if (::Kokkos::Impl::HIPTraits::WarpSize < word_count.value) {
+ __syncthreads();
+ }
+
+ for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+ global[i] = shared[i];
+ }
+ }
+ }
+
+ __device__ inline void run(ShflReductionTag) const {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ value_type value;
+ reducer.init(&value);
+ // Number of blocks is bounded so that the reduction can be limited to two
+ // passes. Each thread block is given an approximately equal amount of work
+ // to perform. Accumulate the values for this block. The accumulation
+ // ordering does not match the final pass, but is arithmetically equivalent.
+
+ WorkRange const range(m_policy, blockIdx.x, gridDim.x);
+
+ for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+ iwork < iwork_end; iwork += blockDim.y) {
+ this->template exec_range<WorkTag>(iwork, value);
+ }
+
+ pointer_type const result = reinterpret_cast<pointer_type>(m_scratch_space);
+
+ int max_active_thread = static_cast<int>(range.end() - range.begin()) <
+ static_cast<int>(blockDim.y)
+ ? range.end() - range.begin()
+ : blockDim.y;
+
+ max_active_thread =
+ (max_active_thread == 0) ? blockDim.y : max_active_thread;
+
+ value_type init;
+ reducer.init(&init);
+ if (m_policy.begin() == m_policy.end()) {
+ reducer.final(&value);
+ pointer_type const final_result =
+ m_result_ptr_device_accessible ? m_result_ptr : result;
+ *final_result = value;
+ } else if (Impl::hip_inter_block_shuffle_reduction<>(
+ value, init, reducer, m_scratch_space, result,
+ m_scratch_flags, max_active_thread)) {
+ unsigned int const id = threadIdx.y * blockDim.x + threadIdx.x;
+ if (id == 0) {
+ reducer.final(&value);
+ pointer_type const final_result =
+ m_result_ptr_device_accessible ? m_result_ptr : result;
+ *final_result = value;
+ }
+ }
+ }
+
+ // Determine block size constrained by shared memory:
+ inline unsigned local_block_size(const FunctorType& f) {
+ const auto& instance = m_policy.space().impl_internal_space_instance();
+ auto shmem_functor = [&f](unsigned n) {
+ return hip_single_inter_block_reduce_scan_shmem<false, WorkTag,
+ value_type>(f, n);
+ };
+ return Kokkos::Impl::hip_get_preferred_blocksize<ParallelReduce,
+ LaunchBounds>(
+ instance, shmem_functor);
+ }
+
+ inline void execute() {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ const index_type nwork = m_policy.end() - m_policy.begin();
+ const bool need_device_set = ReducerType::has_init_member_function() ||
+ ReducerType::has_final_member_function() ||
+ !m_result_ptr_host_accessible ||
+ !std::is_same<ReducerType, InvalidType>::value;
+ if ((nwork > 0) || need_device_set) {
+ const int block_size = local_block_size(m_functor_reducer.get_functor());
+ if (block_size == 0) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
+ "valid execution configuration."));
+ }
+
+ // REQUIRED ( 1 , N , 1 )
+ dim3 block(1, block_size, 1);
+ // use a slightly less constrained, but still well bounded limit for
+ // scratch
+ int nblocks = (nwork + block.y - 1) / block.y;
+ // Heuristic deciding the value of nblocks.
+ // The general idea here is we want to:
+ // 1. Not undersubscribe the device (i.e., we want at least
+ // preferred_block_min blocks)
+ // 2. Have each thread reduce > 1 value to minimize overheads
+ // 3. Limit the total # of blocks, to avoid unbounded scratch space
+ constexpr int block_max = 4096;
+ constexpr int preferred_block_min = 1024;
+
+ if (nblocks < preferred_block_min) {
+ // keep blocks as is, already have low parallelism
+ } else if (nblocks > block_max) {
+ // "large dispatch" -> already have lots of parallelism
+ nblocks = block_max;
+ } else {
+ // in the intermediate range, try to have each thread process multiple
+ // items to offset the cost of the reduction (with not enough
+ // parallelism to hide it)
+ int items_per_thread =
+ (nwork + nblocks * block_size - 1) / (nblocks * block_size);
+ if (items_per_thread < 4) {
+ int ratio = std::min(
+ (nblocks + preferred_block_min - 1) / preferred_block_min,
+ (4 + items_per_thread - 1) / items_per_thread);
+ nblocks /= ratio;
+ }
+ }
+
+ // TODO: down casting these uses more space than required?
+ m_scratch_space =
+ (word_size_type*)::Kokkos::Impl::hip_internal_scratch_space(
+ m_policy.space(), reducer.value_size() * nblocks);
+ // Intentionally do not downcast to word_size_type since we use HIP
+ // atomics in Kokkos_HIP_ReduceScan.hpp
+ m_scratch_flags = ::Kokkos::Impl::hip_internal_scratch_flags(
+ m_policy.space(), sizeof(size_type));
+ // Required grid.x <= block.y
+ dim3 grid(nblocks, 1, 1);
+
+ if (nwork == 0) {
+ block = dim3(1, 1, 1);
+ grid = dim3(1, 1, 1);
+ }
+ const int shmem =
+ UseShflReduction
+ ? 0
+ : hip_single_inter_block_reduce_scan_shmem<false, WorkTag,
+ value_type>(
+ m_functor_reducer.get_functor(), block.y);
+
+ Kokkos::Impl::hip_parallel_launch<ParallelReduce, LaunchBounds>(
+ *this, grid, block, shmem,
+ m_policy.space().impl_internal_space_instance(),
+ false); // copy to device and execute
+
+ if (!m_result_ptr_device_accessible && m_result_ptr) {
+ const int size = reducer.value_size();
+ DeepCopy<HostSpace, HIPSpace, HIP>(m_policy.space(), m_result_ptr,
+ m_scratch_space, size);
+ }
+ } else {
+ if (m_result_ptr) {
+ reducer.init(m_result_ptr);
+ }
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result.data()),
+ m_result_ptr_device_accessible(
+ MemorySpaceAccess<HIPSpace,
+ typename ViewType::memory_space>::accessible),
+ m_result_ptr_host_accessible(
+ MemorySpaceAccess<Kokkos::HostSpace,
+ typename ViewType::memory_space>::accessible) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_PARALLEL_REDUCE_TEAM_HPP
+#define KOKKOS_HIP_PARALLEL_REDUCE_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_Team.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <HIP/Kokkos_HIP_TeamPolicyInternal.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Properties>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>, HIP> {
+ public:
+ using Policy = TeamPolicy<Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ private:
+ using member_type = typename Policy::member_type;
+ using work_tag = typename Policy::work_tag;
+ using launch_bounds = typename Policy::launch_bounds;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+ using value_type = typename ReducerType::value_type;
+
+ public:
+ using functor_type = FunctorType;
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::HIP::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the reduction is performed.
+ // Within the reduction, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the reduction, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < sizeof(Kokkos::HIP::size_type),
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>,
+ Kokkos::HIP::size_type>;
+ using reducer_type = ReducerType;
+ using size_type = HIP::size_type;
+
+ // static int constexpr UseShflReduction = false;
+ // FIXME_HIP This should be disabled unconditionally for best performance, but
+ // it currently causes tests to fail.
+ static constexpr int UseShflReduction =
+ (ReducerType::static_value_size() != 0);
+
+ private:
+ struct ShflReductionTag {};
+ struct SHMEMReductionTag {};
+
+ // Algorithmic constraints: blockDim.y is a power of two AND
+ // blockDim.y == blockDim.z == 1 shared memory utilization:
+ //
+ // [ global reduce space ]
+ // [ team reduce space ]
+ // [ team shared space ]
+ //
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+ const bool m_result_ptr_host_accessible;
+ word_size_type* m_scratch_space;
+ size_type* m_scratch_flags;
+ size_type m_team_begin;
+ size_type m_shmem_begin;
+ size_type m_shmem_size;
+ void* m_scratch_ptr[2];
+ size_t m_scratch_size[2];
+ int m_scratch_pool_id = -1;
+ int32_t* m_scratch_locks;
+ size_t m_num_scratch_locks;
+ const size_type m_league_size;
+ int m_team_size;
+ const size_type m_vector_size;
+
+ template <class TagType>
+ __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
+ member_type const& member, reference_type update) const {
+ m_functor_reducer.get_functor()(member, update);
+ }
+
+ template <class TagType>
+ __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
+ member_type const& member, reference_type update) const {
+ m_functor_reducer.get_functor()(TagType(), member, update);
+ }
+
+ __device__ inline void iterate_through_league(int const threadid,
+ reference_type value) const {
+ int const int_league_size = static_cast<int>(m_league_size);
+ for (int league_rank = blockIdx.x; league_rank < int_league_size;
+ league_rank += gridDim.x) {
+ this->template exec_team<work_tag>(
+ member_type(
+ kokkos_impl_hip_shared_memory<char>() + m_team_begin,
+ m_shmem_begin, m_shmem_size,
+ reinterpret_cast<void*>(
+ reinterpret_cast<char*>(m_scratch_ptr[1]) +
+ static_cast<ptrdiff_t>(threadid / (blockDim.x * blockDim.y)) *
+ m_scratch_size[1]),
+ m_scratch_size[1], league_rank, m_league_size),
+ value);
+ }
+ }
+
+ int compute_block_count() const {
+ constexpr auto light_weight =
+ Kokkos::Experimental::WorkItemProperty::HintLightWeight;
+ constexpr typename Policy::work_item_property property;
+ // Numbers were tuned on MI210 using dot product and yAx benchmarks
+ constexpr int block_max =
+ (property & light_weight) == light_weight ? 2097152 : 65536;
+ constexpr int preferred_block_min = 1024;
+ int block_count = m_league_size;
+ if (block_count < preferred_block_min) {
+ // keep blocks as is, already low parallelism
+ } else if (block_count >= block_max) {
+ block_count = block_max;
+
+ } else {
+ int nwork = m_league_size * m_team_size;
+ int items_per_thread =
+ (nwork + block_count * m_team_size - 1) / (block_count * m_team_size);
+ if (items_per_thread < 4) {
+ int ratio = std::min(
+ (block_count + preferred_block_min - 1) / preferred_block_min,
+ (4 + items_per_thread - 1) / items_per_thread);
+ block_count /= ratio;
+ }
+ }
+
+ return block_count;
+ }
+
+ public:
+ __device__ inline void operator()() const {
+ int64_t threadid = 0;
+ if (m_scratch_size[1] > 0) {
+ threadid = hip_get_scratch_index(m_league_size, m_scratch_locks,
+ m_num_scratch_locks);
+ }
+
+ using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
+ SHMEMReductionTag>;
+ run(ReductionTag{}, threadid);
+
+ if (m_scratch_size[1] > 0) {
+ hip_release_scratch_index(m_scratch_locks, threadid);
+ }
+ }
+
+ __device__ inline void run(SHMEMReductionTag, int const threadid) const {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ integral_nonzero_constant<word_size_type, ReducerType::static_value_size() /
+ sizeof(word_size_type)> const
+ word_count(reducer.value_size() / sizeof(word_size_type));
+
+ reference_type value = reducer.init(reinterpret_cast<pointer_type>(
+ kokkos_impl_hip_shared_memory<word_size_type>() +
+ threadIdx.y * word_count.value));
+ // Iterate this block through the league
+ iterate_through_league(threadid, value);
+
+ // Reduce with final value at blockDim.y - 1 location.
+ bool do_final_reduce = (m_league_size == 0);
+ if (!do_final_reduce)
+ do_final_reduce = hip_single_inter_block_reduce_scan<false>(
+ reducer, blockIdx.x, gridDim.x,
+ kokkos_impl_hip_shared_memory<word_size_type>(), m_scratch_space,
+ m_scratch_flags);
+ if (do_final_reduce) {
+ // This is the final block with the final result at the final threads'
+ // location
+
+ word_size_type* const shared =
+ kokkos_impl_hip_shared_memory<word_size_type>() +
+ (blockDim.y - 1) * word_count.value;
+ size_type* const global =
+ m_result_ptr_device_accessible
+ ? reinterpret_cast<word_size_type*>(m_result_ptr)
+ : m_scratch_space;
+
+ if (threadIdx.y == 0) {
+ reducer.final(reinterpret_cast<value_type*>(shared));
+ }
+
+ if (HIPTraits::WarpSize < word_count.value) {
+ __syncthreads();
+ }
+
+ for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+ global[i] = shared[i];
+ }
+ }
+ }
+
+ __device__ inline void run(ShflReductionTag, int const threadid) const {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ value_type value;
+ reducer.init(&value);
+
+ // Iterate this block through the league
+ iterate_through_league(threadid, value);
+
+ pointer_type const result =
+ m_result_ptr_device_accessible
+ ? m_result_ptr
+ : reinterpret_cast<pointer_type>(m_scratch_space);
+
+ value_type init;
+ reducer.init(&init);
+ if (m_league_size == 0) {
+ reducer.final(&value);
+ *result = value;
+ } else if (Impl::hip_inter_block_shuffle_reduction(
+ value, init, reducer,
+ reinterpret_cast<pointer_type>(m_scratch_space), result,
+ m_scratch_flags, blockDim.y)) {
+ unsigned int const id = threadIdx.y * blockDim.x + threadIdx.x;
+ if (id == 0) {
+ reducer.final(&value);
+ *result = value;
+ }
+ }
+ }
+
+ inline void execute() {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ const bool is_empty_range = m_league_size == 0 || m_team_size == 0;
+ const bool need_device_set = ReducerType::has_init_member_function() ||
+ ReducerType::has_final_member_function() ||
+ !m_result_ptr_host_accessible ||
+ Policy::is_graph_kernel::value ||
+ !std::is_same<ReducerType, InvalidType>::value;
+ if (!is_empty_range || need_device_set) {
+ int const block_count = compute_block_count();
+
+ m_scratch_space =
+ reinterpret_cast<word_size_type*>(hip_internal_scratch_space(
+ m_policy.space(), reducer.value_size() * block_count));
+ m_scratch_flags =
+ hip_internal_scratch_flags(m_policy.space(), sizeof(size_type));
+
+ dim3 block(m_vector_size, m_team_size, 1);
+ dim3 grid(block_count, 1, 1);
+ if (is_empty_range) {
+ block = dim3(1, 1, 1);
+ grid = dim3(1, 1, 1);
+ }
+ const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
+
+ Impl::hip_parallel_launch<ParallelReduce, launch_bounds>(
+ *this, grid, block, shmem_size_total,
+ m_policy.space().impl_internal_space_instance(),
+ true); // copy to device and execute
+
+ if (!m_result_ptr_device_accessible) {
+ m_policy.space().impl_internal_space_instance()->fence();
+
+ if (m_result_ptr) {
+ const int size = reducer.value_size();
+ DeepCopy<HostSpace, HIPSpace, HIP>(m_policy.space(), m_result_ptr,
+ m_scratch_space, size);
+ }
+ }
+ } else {
+ if (m_result_ptr) {
+ reducer.init(m_result_ptr);
+ }
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(CombinedFunctorReducerType const& arg_functor_reducer,
+ Policy const& arg_policy, ViewType const& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result.data()),
+ m_result_ptr_device_accessible(
+ MemorySpaceAccess<HIPSpace,
+ typename ViewType::memory_space>::accessible),
+ m_result_ptr_host_accessible(
+ MemorySpaceAccess<Kokkos::HostSpace,
+ typename ViewType::memory_space>::accessible),
+ m_scratch_space(nullptr),
+ m_scratch_flags(nullptr),
+ m_team_begin(0),
+ m_shmem_begin(0),
+ m_shmem_size(0),
+ m_scratch_ptr{nullptr, nullptr},
+ m_league_size(arg_policy.league_size()),
+ m_team_size(arg_policy.team_size()),
+ m_vector_size(arg_policy.impl_vector_length()) {
+ auto internal_space_instance =
+ m_policy.space().impl_internal_space_instance();
+ if (m_team_size < 0) {
+ m_team_size = arg_policy.team_size_recommended(
+ arg_functor_reducer.get_functor(), arg_functor_reducer.get_reducer(),
+ ParallelReduceTag());
+ if (m_team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelReduce<HIP, TeamPolicy> could not find a "
+ "valid execution configuration.");
+ }
+
+ m_team_begin =
+ UseShflReduction
+ ? 0
+ : hip_single_inter_block_reduce_scan_shmem<false, work_tag,
+ value_type>(
+ arg_functor_reducer.get_functor(), m_team_size);
+ m_shmem_begin = sizeof(double) * (m_team_size + 2);
+ m_shmem_size = m_policy.scratch_size(0, m_team_size) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor_reducer.get_functor(), m_team_size);
+ m_scratch_size[0] = m_shmem_size;
+ m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+ m_scratch_locks = internal_space_instance->m_scratch_locks;
+ m_num_scratch_locks = internal_space_instance->m_num_scratch_locks;
+ if (m_team_size <= 0) {
+ m_scratch_ptr[1] = nullptr;
+ } else {
+ m_scratch_pool_id = internal_space_instance->acquire_team_scratch_space();
+ m_scratch_ptr[1] = internal_space_instance->resize_team_scratch_space(
+ m_scratch_pool_id,
+ static_cast<std::int64_t>(m_scratch_size[1]) *
+ (std::min(
+ static_cast<std::int64_t>(HIP().concurrency() /
+ (m_team_size * m_vector_size)),
+ static_cast<std::int64_t>(m_league_size))));
+ }
+
+ // The global parallel_reduce does not support vector_length other than 1 at
+ // the moment
+ if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
+ Impl::throw_runtime_exception(
+ "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
+ "greater than 1 is not currently supported for HIP for dynamic "
+ "sized reduction types.");
+
+ if ((m_team_size < HIPTraits::WarpSize) && !UseShflReduction)
+ Impl::throw_runtime_exception(
+ "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
+ "than 64 is not currently supported with HIP for dynamic sized "
+ "reduction types.");
+
+ // Functor's reduce memory, team scan memory, and team shared memory depend
+ // upon team size.
+
+ const unsigned int shmem_size_total =
+ m_team_begin + m_shmem_begin + m_shmem_size;
+
+ if (!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
+ !UseShflReduction) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelReduce< HIP > bad team size"));
+ }
+
+ if (internal_space_instance->m_deviceProp.sharedMemPerBlock <
+ shmem_size_total) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelReduce< HIP > requested too much "
+ "L0 scratch memory"));
+ }
+
+ size_t max_size = arg_policy.team_size_max(
+ arg_functor_reducer.get_functor(), arg_functor_reducer.get_reducer(),
+ ParallelReduceTag());
+ if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelReduce< HIP > requested too "
+ "large team size."));
+ }
+ }
+
+ ~ParallelReduce() {
+ if (m_scratch_pool_id >= 0) {
+ m_policy.space()
+ .impl_internal_space_instance()
+ ->release_team_scratch_space(m_scratch_pool_id);
+ }
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_PARALLEL_SCAN_RANGE_HPP
+#define KOKKOS_HIP_PARALLEL_SCAN_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_ReduceScan.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class ValueType, class... Traits>
+class ParallelScanHIPBase {
+ public:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+
+ protected:
+ using Member = typename Policy::member_type;
+ using WorkTag = typename Policy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using LaunchBounds = typename Policy::launch_bounds;
+
+ using Analysis =
+ Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN, Policy,
+ FunctorType, ValueType>;
+
+ public:
+ using value_type = typename Analysis::value_type;
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+ using functor_type = FunctorType;
+ using size_type = HIP::size_type;
+ using index_type = typename Policy::index_type;
+ // Conditionally set word_size_type to int16_t or int8_t if value_type is
+ // smaller than int32_t (Kokkos::HIP::size_type)
+ // word_size_type is used to determine the word count, shared memory buffer
+ // size, and global memory buffer size before the scan is performed.
+ // Within the scan, the word count is recomputed based on word_size_type
+ // and when calculating indexes into the shared/global memory buffers for
+ // performing the scan, word_size_type is used again.
+ // For scalars > 4 bytes in size, indexing into shared/global memory relies
+ // on the block and grid dimensions to ensure that we index at the correct
+ // offset rather than at every 4 byte word; such that, when the join is
+ // performed, we have the correct data that was copied over in chunks of 4
+ // bytes.
+ using word_size_type = std::conditional_t<
+ sizeof(value_type) < sizeof(size_type),
+ std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>, size_type>;
+
+ protected:
+ // Algorithmic constraints:
+ // (a) blockDim.y is a power of two
+ // (b) blockDim.x == blockDim.z == 1
+ // (c) gridDim.x <= blockDim.y * blockDim.y
+ // (d) gridDim.y == gridDim.z == 1
+
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>
+ m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+ word_size_type* m_scratch_space = nullptr;
+ size_type* m_scratch_flags = nullptr;
+ size_type m_final = false;
+ int m_grid_x = 0;
+
+ private:
+ template <class TagType>
+ __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
+ const Member& i, reference_type update, const bool final_result) const {
+ m_functor_reducer.get_functor()(i, update, final_result);
+ }
+
+ template <class TagType>
+ __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+ const Member& i, reference_type update, const bool final_result) const {
+ m_functor_reducer.get_functor()(TagType(), i, update, final_result);
+ }
+
+ //----------------------------------------
+
+ __device__ inline void initial() const {
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
+
+ const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
+ sizeof(word_size_type)>
+ word_count(final_reducer.value_size() / sizeof(word_size_type));
+
+ pointer_type const shared_value = reinterpret_cast<pointer_type>(
+ kokkos_impl_hip_shared_memory<word_size_type>() +
+ word_count.value * threadIdx.y);
+
+ final_reducer.init(shared_value);
+
+ // Number of blocks is bounded so that the reduction can be limited to two
+ // passes. Each thread block is given an approximately equal amount of work
+ // to perform. Accumulate the values for this block. The accumulation
+ // ordering does not match the final pass, but is arithmetically equivalent.
+
+ const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+ for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+ iwork < iwork_end; iwork += blockDim.y) {
+ this->template exec_range<WorkTag>(
+ iwork, final_reducer.reference(shared_value), false);
+ }
+
+ // Reduce and scan, writing out scan of blocks' totals and block-groups'
+ // totals. Blocks' scan values are written to 'blockIdx.x' location.
+ // Block-groups' scan values are at: i = ( j * blockDim.y - 1 ) for i <
+ // gridDim.x
+ hip_single_inter_block_reduce_scan<true>(
+ final_reducer, blockIdx.x, gridDim.x,
+ kokkos_impl_hip_shared_memory<word_size_type>(), m_scratch_space,
+ m_scratch_flags);
+ }
+
+ //----------------------------------------
+
+ __device__ inline void final() const {
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
+
+ const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
+ sizeof(word_size_type)>
+ word_count(final_reducer.value_size() / sizeof(word_size_type));
+
+ // Use shared memory as an exclusive scan: { 0 , value[0] , value[1] ,
+ // value[2] , ... }
+ word_size_type* const shared_data =
+ kokkos_impl_hip_shared_memory<word_size_type>();
+ word_size_type* const shared_prefix =
+ shared_data + word_count.value * threadIdx.y;
+ word_size_type* const shared_accum =
+ shared_data + word_count.value * (blockDim.y + 1);
+
+ // Starting value for this thread block is the previous block's total.
+ if (blockIdx.x) {
+ word_size_type* const block_total =
+ m_scratch_space + word_count.value * (blockIdx.x - 1);
+ for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+ shared_accum[i] = block_total[i];
+ }
+ } else if (0 == threadIdx.y) {
+ final_reducer.init(reinterpret_cast<pointer_type>(shared_accum));
+ }
+
+ const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+ for (typename Policy::member_type iwork_base = range.begin();
+ iwork_base < range.end(); iwork_base += blockDim.y) {
+ const typename Policy::member_type iwork = iwork_base + threadIdx.y;
+
+ __syncthreads(); // Don't overwrite previous iteration values until they
+ // are used
+
+ final_reducer.init(
+ reinterpret_cast<pointer_type>(shared_prefix + word_count.value));
+
+ // Copy previous block's accumulation total into thread[0] prefix and
+ // inclusive scan value of this block
+ for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+ shared_data[i + word_count.value] = shared_data[i] = shared_accum[i];
+ }
+
+ // Make sure the write is seen by all threads
+ __threadfence_block();
+
+ // Call functor to accumulate inclusive scan value for this work item
+ const bool doWork = (iwork < range.end());
+ if (doWork) {
+ this->template exec_range<WorkTag>(
+ iwork,
+ final_reducer.reference(reinterpret_cast<pointer_type>(
+ shared_prefix + word_count.value)),
+ false);
+ }
+
+ // Scan block values into locations shared_data[1..blockDim.y]
+ hip_intra_block_reduce_scan<true>(
+ final_reducer,
+ typename Analysis::pointer_type(shared_data + word_count.value));
+
+ {
+ word_size_type* const block_total =
+ shared_data + word_count.value * blockDim.y;
+ for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+ shared_accum[i] = block_total[i];
+ }
+ }
+
+ // Call functor with exclusive scan value
+ if (doWork) {
+ this->template exec_range<WorkTag>(
+ iwork,
+ final_reducer.reference(
+ reinterpret_cast<pointer_type>(shared_prefix)),
+ true);
+ }
+ if (iwork + 1 == m_policy.end() && m_policy.end() == range.end() &&
+ m_result_ptr_device_accessible)
+ *m_result_ptr = *reinterpret_cast<pointer_type>(shared_prefix);
+ }
+ }
+
+ public:
+ //----------------------------------------
+
+ __device__ inline void operator()() const {
+ if (!m_final) {
+ initial();
+ } else {
+ final();
+ }
+ }
+
+ inline void impl_execute(int block_size) {
+ const index_type nwork = m_policy.end() - m_policy.begin();
+ if (nwork) {
+ // FIXME_HIP we cannot choose it larger for large work sizes to work
+ // correctly, the unit tests fail with wrong results
+ const int gridMaxComputeCapability_2x = 0x01fff;
+
+ const int grid_max =
+ std::min(block_size * block_size, gridMaxComputeCapability_2x);
+
+ // At most 'max_grid' blocks:
+ const int max_grid =
+ std::min<int>(grid_max, (nwork + block_size - 1) / block_size);
+
+ // How much work per block:
+ const int work_per_block = (nwork + max_grid - 1) / max_grid;
+
+ // How many block are really needed for this much work:
+ m_grid_x = (nwork + work_per_block - 1) / work_per_block;
+
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
+ m_scratch_space =
+ reinterpret_cast<word_size_type*>(Impl::hip_internal_scratch_space(
+ m_policy.space(), final_reducer.value_size() * m_grid_x));
+ m_scratch_flags = Impl::hip_internal_scratch_flags(m_policy.space(),
+ sizeof(size_type) * 1);
+
+ dim3 grid(m_grid_x, 1, 1);
+ dim3 block(1, block_size, 1); // REQUIRED DIMENSIONS ( 1 , N , 1 )
+ const int shmem = final_reducer.value_size() * (block_size + 2);
+
+ m_final = false;
+ // these ones are OK to be just the base because the specializations
+ // do not modify the kernel at all
+ Impl::hip_parallel_launch<ParallelScanHIPBase, LaunchBounds>(
+ *this, grid, block, shmem,
+ m_policy.space().impl_internal_space_instance(),
+ false); // copy to device and execute
+
+ m_final = true;
+ Impl::hip_parallel_launch<ParallelScanHIPBase, LaunchBounds>(
+ *this, grid, block, shmem,
+ m_policy.space().impl_internal_space_instance(),
+ false); // copy to device and execute
+ }
+ }
+
+ ParallelScanHIPBase(const FunctorType& arg_functor, const Policy& arg_policy,
+ pointer_type arg_result_ptr,
+ bool arg_result_ptr_device_accessible)
+ : m_functor_reducer(arg_functor, typename Analysis::Reducer{arg_functor}),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_ptr),
+ m_result_ptr_device_accessible(arg_result_ptr_device_accessible) {}
+};
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>, HIP>
+ : public ParallelScanHIPBase<FunctorType, void, Traits...> {
+ public:
+ using Base = ParallelScanHIPBase<FunctorType, void, Traits...>;
+ using Base::operator();
+
+ inline void execute() {
+ const int block_size = static_cast<int>(
+ local_block_size(Base::m_functor_reducer.get_functor()));
+ if (block_size == 0) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelScan< HIP > could not find a "
+ "valid execution configuration."));
+ }
+
+ Base::impl_execute(block_size);
+ }
+
+ ParallelScan(const FunctorType& arg_functor,
+ const typename Base::Policy& arg_policy)
+ : Base(arg_functor, arg_policy, nullptr, false) {}
+
+ inline unsigned local_block_size(const FunctorType& f) {
+ // blockDim.y must be power of two = 128 (2 warps) or 256 (4 warps) or
+ // 512 (8 warps) gridDim.x <= blockDim.y * blockDim.y
+
+ const auto& instance =
+ Base::m_policy.space().impl_internal_space_instance();
+ auto shmem_functor = [&f](unsigned n) {
+ return hip_single_inter_block_reduce_scan_shmem<
+ true, typename Base::WorkTag, void>(f, n);
+ };
+ using DriverType = ParallelScan<FunctorType, typename Base::Policy, HIP>;
+ return Impl::hip_get_preferred_blocksize<DriverType,
+ typename Base::LaunchBounds>(
+ instance, shmem_functor);
+ }
+};
+
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+ ReturnType, HIP>
+ : public ParallelScanHIPBase<FunctorType, ReturnType, Traits...> {
+ public:
+ using Base = ParallelScanHIPBase<FunctorType, ReturnType, Traits...>;
+ using Base::operator();
+
+ inline void execute() {
+ const int block_size = static_cast<int>(
+ local_block_size(Base::m_functor_reducer.get_functor()));
+ if (block_size == 0) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("Kokkos::Impl::ParallelScan< HIP > could not find a "
+ "valid execution configuration."));
+ }
+
+ Base::impl_execute(block_size);
+
+ const auto nwork = Base::m_policy.end() - Base::m_policy.begin();
+ if (nwork && !Base::m_result_ptr_device_accessible) {
+ const int size =
+ Base::Analysis::value_size(Base::m_functor_reducer.get_functor());
+ DeepCopy<HostSpace, HIPSpace, HIP>(
+ Base::m_policy.space(), Base::m_result_ptr,
+ Base::m_scratch_space + (Base::m_grid_x - 1) * size /
+ sizeof(typename Base::word_size_type),
+ size);
+ }
+ }
+
+ template <class ViewType>
+ ParallelScanWithTotal(const FunctorType& arg_functor,
+ const typename Base::Policy& arg_policy,
+ const ViewType& arg_result_view)
+ : Base(arg_functor, arg_policy, arg_result_view.data(),
+ MemorySpaceAccess<HIPSpace,
+ typename ViewType::memory_space>::accessible) {}
+
+ inline unsigned local_block_size(const FunctorType& f) {
+ // blockDim.y must be power of two = 128 (2 warps) or 256 (4 warps) or
+ // 512 (8 warps) gridDim.x <= blockDim.y * blockDim.y
+
+ const auto& instance =
+ Base::m_policy.space().impl_internal_space_instance();
+ auto shmem_functor = [&f](unsigned n) {
+ return hip_single_inter_block_reduce_scan_shmem<
+ true, typename Base::WorkTag, ReturnType>(f, n);
+ };
+ using DriverType = ParallelScanWithTotal<FunctorType, typename Base::Policy,
+ ReturnType, HIP>;
+ return hip_get_preferred_blocksize<DriverType, typename Base::LaunchBounds>(
+ instance, shmem_functor);
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_REDUCESCAN_HPP
#define KOKKOS_HIP_REDUCESCAN_HPP
#if defined(__HIPCC__)
+#include <HIP/Kokkos_HIP.hpp>
#include <HIP/Kokkos_HIP_Vectorization.hpp>
namespace Kokkos {
int const width, // How much of the warp participates
Scalar& result) {
for (int delta = skip_vector ? blockDim.x : 1; delta < width; delta *= 2) {
- Scalar tmp = Kokkos::Experimental::shfl_down(value, delta, width);
+ Scalar tmp = shfl_down(value, delta, width);
functor.join(&value, &tmp);
}
- Experimental::Impl::in_place_shfl(result, value, 0, width);
+ in_place_shfl(result, value, 0, width);
}
__device__ static inline void scalar_intra_block_reduction(
FunctorType const& functor, Scalar value, bool const skip,
Scalar* my_global_team_buffer_element, int const shared_elements,
Scalar* shared_team_buffer_element) {
- unsigned int constexpr warp_size =
- Kokkos::Experimental::Impl::HIPTraits::WarpSize;
- int const warp_id = (threadIdx.y * blockDim.x) / warp_size;
+ constexpr unsigned int warp_size = HIPTraits::WarpSize;
+ int const warp_id = (threadIdx.y * blockDim.x) / warp_size;
Scalar* const my_shared_team_buffer_element =
shared_team_buffer_element + warp_id % shared_elements;
}
scalar_intra_warp_reduction(functor, value, false, warp_size,
*my_global_team_buffer_element);
+ __threadfence();
}
}
__device__ static inline bool scalar_inter_block_reduction(
- FunctorType const& functor,
- ::Kokkos::Experimental::HIP::size_type const block_count,
- ::Kokkos::Experimental::HIP::size_type* const shared_data,
- ::Kokkos::Experimental::HIP::size_type* const global_data,
- ::Kokkos::Experimental::HIP::size_type* const global_flags) {
+ FunctorType const& functor, HIP::size_type const block_count,
+ HIP::size_type* const shared_data, HIP::size_type* const global_data,
+ HIP::size_type* const global_flags) {
Scalar* const global_team_buffer_element =
reinterpret_cast<Scalar*>(global_data);
Scalar* const my_global_team_buffer_element =
global_team_buffer_element + blockIdx.x;
Scalar* shared_team_buffer_elements =
reinterpret_cast<Scalar*>(shared_data);
- Scalar value = shared_team_buffer_elements[threadIdx.y];
- unsigned int constexpr warp_size =
- Kokkos::Experimental::Impl::HIPTraits::WarpSize;
- int shared_elements = blockDim.x * blockDim.y / warp_size;
- int global_elements = block_count;
+ Scalar value = shared_team_buffer_elements[threadIdx.y];
+ constexpr unsigned int warp_size = Impl::HIPTraits::WarpSize;
+ int shared_elements = blockDim.x * blockDim.y / warp_size;
+ int global_elements = block_count;
__syncthreads();
scalar_intra_block_reduction(functor, value, true,
// Use the last block that is done to do the do the reduction across the
// block
- __shared__ unsigned int num_teams_done;
+ unsigned int num_teams_done = 0;
if (threadIdx.x + threadIdx.y == 0) {
num_teams_done = Kokkos::atomic_fetch_add(global_flags, 1) + 1;
}
bool is_last_block = false;
- // FIXME_HIP HIP does not support syncthreads_or. That's why we need to make
- // num_teams_done __shared__
- // if (__syncthreads_or(num_teams_done == gridDim.x)) {*/
- __syncthreads();
- if (num_teams_done == gridDim.x) {
+ if (__syncthreads_or(num_teams_done == gridDim.x)) {
is_last_block = true;
*global_flags = 0;
functor.init(&value);
// part of the reduction
int const width) // How much of the warp participates
{
- int const lane_id = (threadIdx.y * blockDim.x + threadIdx.x) %
- ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+ int const lane_id =
+ (threadIdx.y * blockDim.x + threadIdx.x) % HIPTraits::WarpSize;
for (int delta = skip_vector ? blockDim.x : 1; delta < width; delta *= 2) {
- if (lane_id + delta < ::Kokkos::Experimental::Impl::HIPTraits::WarpSize) {
+ if (lane_id + delta < HIPTraits::WarpSize &&
+ (lane_id % (delta * 2) == 0)) {
functor.join(value, value + delta);
}
}
__device__ static inline void scalar_intra_block_reduction(
FunctorType const& functor, Scalar value, bool const skip, Scalar* result,
int const /*shared_elements*/, Scalar* shared_team_buffer_element) {
- int const warp_id = (threadIdx.y * blockDim.x) /
- ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+ int const warp_id = (threadIdx.y * blockDim.x) / HIPTraits::WarpSize;
Scalar* const my_shared_team_buffer_element =
shared_team_buffer_element + threadIdx.y * blockDim.x + threadIdx.x;
*my_shared_team_buffer_element = value;
// Warp Level Reduction, ignoring Kokkos vector entries
- scalar_intra_warp_reduction(
- functor, my_shared_team_buffer_element, skip,
- ::Kokkos::Experimental::Impl::HIPTraits::WarpSize);
+ scalar_intra_warp_reduction(functor, my_shared_team_buffer_element, skip,
+ HIPTraits::WarpSize);
// Wait for every warp to be done before using one warp to do final cross
// warp reduction
__syncthreads();
if (warp_id == 0) {
const unsigned int delta =
- (threadIdx.y * blockDim.x + threadIdx.x) *
- ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+ (threadIdx.y * blockDim.x + threadIdx.x) * HIPTraits::WarpSize;
if (delta < blockDim.x * blockDim.y)
*my_shared_team_buffer_element = shared_team_buffer_element[delta];
scalar_intra_warp_reduction(
functor, my_shared_team_buffer_element, false,
- blockDim.x * blockDim.y /
- ::Kokkos::Experimental::Impl::HIPTraits::WarpSize);
- if (threadIdx.x + threadIdx.y == 0) *result = *shared_team_buffer_element;
+ blockDim.x * blockDim.y / HIPTraits::WarpSize);
+ if (threadIdx.x + threadIdx.y == 0) {
+ *result = *shared_team_buffer_element;
+ if (skip) __threadfence();
+ }
}
}
+ template <typename SizeType>
__device__ static inline bool scalar_inter_block_reduction(
- FunctorType const& functor,
- ::Kokkos::Experimental::HIP::size_type const block_count,
- ::Kokkos::Experimental::HIP::size_type* const shared_data,
- ::Kokkos::Experimental::HIP::size_type* const global_data,
- ::Kokkos::Experimental::HIP::size_type* const global_flags) {
+ FunctorType const& functor, HIP::size_type const block_count,
+ SizeType* const shared_data, SizeType* const global_data,
+ HIP::size_type* const global_flags) {
Scalar* const global_team_buffer_element =
reinterpret_cast<Scalar*>(global_data);
Scalar* const my_global_team_buffer_element =
Scalar* shared_team_buffer_elements =
reinterpret_cast<Scalar*>(shared_data);
Scalar value = shared_team_buffer_elements[threadIdx.y];
- int shared_elements = (blockDim.x * blockDim.y) /
- ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+ int shared_elements = (blockDim.x * blockDim.y) / HIPTraits::WarpSize;
int global_elements = block_count;
__syncthreads();
// Use the last block that is done to do the do the reduction across the
// block
- __shared__ unsigned int num_teams_done;
+ unsigned int num_teams_done = 0;
if (threadIdx.x + threadIdx.y == 0) {
num_teams_done = Kokkos::atomic_fetch_add(global_flags, 1) + 1;
}
bool is_last_block = false;
- // FIXME_HIP HIP does not support syncthreads_or. That's why we need to make
- // num_teams_done __shared__
- // if (__syncthreads_or(num_teams_done == gridDim.x)) {*/
- __syncthreads();
- if (num_teams_done == gridDim.x) {
+ if (__syncthreads_or(num_teams_done == gridDim.x)) {
is_last_block = true;
*global_flags = 0;
functor.init(&value);
// For that warp, we shift all indices logically to the end and ignore join
// operations with unassigned indices in the warp when performing the intra
// warp reduction/scan.
- const bool is_full_warp =
- (((threadIdx.y >> Experimental::Impl::HIPTraits::WarpIndexShift) + 1)
- << Experimental::Impl::HIPTraits::WarpIndexShift) <= blockDim.y;
+ const bool is_full_warp = (((threadIdx.y >> HIPTraits::WarpIndexShift) + 1)
+ << HIPTraits::WarpIndexShift) <= blockDim.y;
auto block_reduce_step = [&functor, value_count](
int const R, pointer_type const TD, int const S,
};
// Intra-warp reduction:
+ int bit_shift = 0;
{
const unsigned mapped_idx =
- threadIdx.y + (is_full_warp
- ? 0
- : (not_less_power_of_two - blockDim.y) &
- (Experimental::Impl::HIPTraits::WarpSize - 1));
+ threadIdx.y + (is_full_warp ? 0
+ : (not_less_power_of_two - blockDim.y) &
+ (HIPTraits::WarpSize - 1));
const pointer_type tdata_intra = base_data + value_count * threadIdx.y;
const pointer_type warp_start =
- base_data +
- value_count *
- ((threadIdx.y >> Experimental::Impl::HIPTraits::WarpIndexShift)
- << Experimental::Impl::HIPTraits::WarpIndexShift);
- block_reduce_step(mapped_idx, tdata_intra, 0, warp_start, 0);
- block_reduce_step(mapped_idx, tdata_intra, 1, warp_start, 0);
- block_reduce_step(mapped_idx, tdata_intra, 2, warp_start, 0);
- block_reduce_step(mapped_idx, tdata_intra, 3, warp_start, 0);
- block_reduce_step(mapped_idx, tdata_intra, 4, warp_start, 0);
- block_reduce_step(mapped_idx, tdata_intra, 5, warp_start, 0);
+ base_data + value_count * ((threadIdx.y >> HIPTraits::WarpIndexShift)
+ << HIPTraits::WarpIndexShift);
+ for (; (1 << bit_shift) < HIPTraits::WarpSize; ++bit_shift) {
+ block_reduce_step(mapped_idx, tdata_intra, bit_shift, warp_start, 0);
+ }
}
__syncthreads(); // Wait for all warps to reduce
// following reduction, we shift all indices logically to the end of the
// next power-of-two to the number of warps.
const unsigned n_active_warps =
- ((blockDim.y - 1) >> Experimental::Impl::HIPTraits::WarpIndexShift) + 1;
+ ((blockDim.y - 1) >> HIPTraits::WarpIndexShift) + 1;
if (threadIdx.y < n_active_warps) {
const bool is_full_warp_inter =
- threadIdx.y <
- (blockDim.y >> Experimental::Impl::HIPTraits::WarpIndexShift);
+ threadIdx.y < (blockDim.y >> HIPTraits::WarpIndexShift);
pointer_type const tdata_inter =
base_data +
- value_count *
- (is_full_warp_inter
- ? (threadIdx.y
- << Experimental::Impl::HIPTraits::WarpIndexShift) +
- (Experimental::Impl::HIPTraits::WarpSize - 1)
- : blockDim.y - 1);
+ value_count * (is_full_warp_inter
+ ? (threadIdx.y << HIPTraits::WarpIndexShift) +
+ (HIPTraits::WarpSize - 1)
+ : blockDim.y - 1);
const unsigned index_shift =
is_full_warp_inter
? 0
- : blockDim.y - (threadIdx.y
- << Experimental::Impl::HIPTraits::WarpIndexShift);
- const int rtid_inter =
- (threadIdx.y << Experimental::Impl::HIPTraits::WarpIndexShift) +
- (Experimental::Impl::HIPTraits::WarpSize - 1) - index_shift;
-
- if ((1 << 6) < BlockSizeMask) {
- block_reduce_step(rtid_inter, tdata_inter, 6, base_data, index_shift);
- }
- if ((1 << 7) < BlockSizeMask) {
- block_reduce_step(rtid_inter, tdata_inter, 7, base_data, index_shift);
- }
- if ((1 << 8) < BlockSizeMask) {
- block_reduce_step(rtid_inter, tdata_inter, 8, base_data, index_shift);
- }
- if ((1 << 9) < BlockSizeMask) {
- block_reduce_step(rtid_inter, tdata_inter, 9, base_data, index_shift);
- }
- if ((1 << 10) < BlockSizeMask) {
- block_reduce_step(rtid_inter, tdata_inter, 10, base_data, index_shift);
+ : blockDim.y - (threadIdx.y << HIPTraits::WarpIndexShift);
+ const int rtid_inter = (threadIdx.y << HIPTraits::WarpIndexShift) +
+ (HIPTraits::WarpSize - 1) - index_shift;
+
+ for (; (1 << bit_shift) < BlockSizeMask; ++bit_shift) {
+ block_reduce_step(rtid_inter, tdata_inter, bit_shift, base_data,
+ index_shift);
}
}
}
if (DoScan) {
// Update all the values for the respective warps (except for the last one)
// by adding from the last value of the previous warp.
- const unsigned int WarpMask = Experimental::Impl::HIPTraits::WarpSize - 1;
+ const unsigned int WarpMask = HIPTraits::WarpSize - 1;
const int is_last_thread_in_warp =
- is_full_warp ? ((threadIdx.y & WarpMask) ==
- Experimental::Impl::HIPTraits::WarpSize - 1)
+ is_full_warp ? ((threadIdx.y & WarpMask) == HIPTraits::WarpSize - 1)
: (threadIdx.y == blockDim.y - 1);
- if (threadIdx.y >= Experimental::Impl::HIPTraits::WarpSize &&
- !is_last_thread_in_warp) {
+ if (threadIdx.y >= HIPTraits::WarpSize && !is_last_thread_in_warp) {
const int offset_to_previous_warp_total = (threadIdx.y & (~WarpMask)) - 1;
functor.join(base_data + value_count * threadIdx.y,
base_data + value_count * offset_to_previous_warp_total);
* Global reduce result is in the last threads' 'shared_data' location.
*/
-template <bool DoScan, class FunctorType>
+template <bool DoScan, typename FunctorType, typename SizeType>
__device__ bool hip_single_inter_block_reduce_scan_impl(
- FunctorType const& functor,
- ::Kokkos::Experimental::HIP::size_type const block_id,
- ::Kokkos::Experimental::HIP::size_type const block_count,
- ::Kokkos::Experimental::HIP::size_type* const shared_data,
- ::Kokkos::Experimental::HIP::size_type* const global_data,
- ::Kokkos::Experimental::HIP::size_type* const global_flags) {
- using size_type = ::Kokkos::Experimental::HIP::size_type;
-
+ FunctorType const& functor, HIP::size_type const block_id,
+ HIP::size_type const block_count, SizeType* const shared_data,
+ SizeType* const global_data, HIP::size_type* const global_flags) {
+ using size_type = SizeType;
using value_type = typename FunctorType::value_type;
using pointer_type = typename FunctorType::pointer_type;
for (size_t i = threadIdx.y; i < word_count.value; i += blockDim.y) {
global[i] = shared[i];
}
+ __threadfence();
}
// Contributing blocks note that their contribution has been completed via an
// atomic-increment flag If this block is not the last block to contribute to
// this group then the block is done.
- // FIXME_HIP __syncthreads_or is not supported by HIP yet.
- // const bool is_last_block = !__syncthreads_or(
- // threadIdx.y
- // ? 0
- // : (1 + atomicInc(global_flags, block_count - 1) < block_count));
- __shared__ int n_done;
- n_done = 0;
- __syncthreads();
- if (threadIdx.y == 0) {
- n_done = 1 + atomicInc(global_flags, block_count - 1);
- }
- __syncthreads();
- bool const is_last_block = (n_done == static_cast<int>(block_count));
-
+ const bool is_last_block = !__syncthreads_or(
+ threadIdx.y
+ ? 0
+ : (1 + atomicInc(global_flags, block_count - 1) < block_count));
if (is_last_block) {
size_type const b = (static_cast<long long int>(block_count) *
static_cast<long long int>(threadIdx.y)) >>
return is_last_block;
}
-template <bool DoScan, typename FunctorType>
+template <bool DoScan, typename FunctorType, typename SizeType>
__device__ bool hip_single_inter_block_reduce_scan(
- FunctorType const& functor,
- ::Kokkos::Experimental::HIP::size_type const block_id,
- ::Kokkos::Experimental::HIP::size_type const block_count,
- ::Kokkos::Experimental::HIP::size_type* const shared_data,
- ::Kokkos::Experimental::HIP::size_type* const global_data,
- ::Kokkos::Experimental::HIP::size_type* const global_flags) {
+ FunctorType const& functor, HIP::size_type const block_id,
+ HIP::size_type const block_count, SizeType* const shared_data,
+ SizeType* const global_data, HIP::size_type* const global_flags) {
// If we are doing a reduction and we don't do an array reduction, we use the
// reduction-only path. Otherwise, we use the common path between reduction
// and scan.
}
// Size in bytes required for inter block reduce or scan
-template <bool DoScan, class FunctorType, class ArgTag>
+template <bool DoScan, class ArgTag, class ValueType, class FunctorType>
inline std::enable_if_t<DoScan, unsigned>
hip_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
const unsigned BlockSize) {
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- RangePolicy<Experimental::HIP, ArgTag>,
- FunctorType>;
+ using Analysis =
+ Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ RangePolicy<HIP, ArgTag>, FunctorType, ValueType>;
return (BlockSize + 2) * Analysis::value_size(functor);
}
-template <bool DoScan, class FunctorType, class ArgTag>
+template <bool DoScan, class ArgTag, class ValueType, class FunctorType>
inline std::enable_if_t<!DoScan, unsigned>
hip_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
const unsigned BlockSize) {
- using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
- RangePolicy<Experimental::HIP, ArgTag>,
- FunctorType>;
+ using Analysis =
+ Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ RangePolicy<HIP, ArgTag>, FunctorType, ValueType>;
return (BlockSize + 2) * Analysis::value_size(functor);
}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <HIP/Kokkos_HIP.hpp>
+#include <HIP/Kokkos_HIP_DeepCopy.hpp>
+#include <HIP/Kokkos_HIP_SharedAllocationRecord.hpp>
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+#ifndef KOKKOS_IMPL_HIP_UNIFIED_MEMORY
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::HIPSpace);
+#else
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(Kokkos::HIPSpace);
+#endif
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::HIPHostPinnedSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::HIPManagedSpace);
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_SHARED_ALLOCATION_RECORD_HPP
+#define KOKKOS_HIP_SHARED_ALLOCATION_RECORD_HPP
+
+#include <HIP/Kokkos_HIP_Space.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+#if defined(KOKKOS_IMPL_HIP_UNIFIED_MEMORY)
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::HIPSpace);
+#else
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_SPECIALIZATION(
+ Kokkos::HIPSpace);
+#endif
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::HIPHostPinnedSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::HIPManagedSpace);
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_SHUFFLE_REDUCE_HPP
#define KOKKOS_HIP_SHUFFLE_REDUCE_HPP
unsigned int shift = 1;
// Reduce over values from threads with different threadIdx.y
- unsigned int constexpr warp_size =
- Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+ constexpr unsigned int warp_size = HIPTraits::WarpSize;
while (blockDim.x * shift < warp_size) {
- ValueType const tmp =
- Kokkos::Experimental::shfl_down(result, blockDim.x * shift, warp_size);
+ ValueType const tmp = shfl_down(result, blockDim.x * shift, warp_size);
// Only join if upper thread is active (this allows non power of two for
// blockDim.y)
if (threadIdx.y + shift < max_active_thread) {
}
// Broadcast the result to all the threads in the warp
- result = Kokkos::Experimental::shfl(result, 0, warp_size);
+ result = shfl(result, 0, warp_size);
}
template <typename ValueType, typename ReducerType>
__device__ inline void hip_inter_warp_shuffle_reduction(
ValueType& value, const ReducerType& reducer,
const int max_active_thread = blockDim.y) {
- unsigned int constexpr warp_size =
- Kokkos::Experimental::Impl::HIPTraits::WarpSize;
- int constexpr step_width = 8;
+ constexpr unsigned int warp_size = HIPTraits::WarpSize;
+ constexpr int step_width = 8;
// Depending on the ValueType __shared__ memory must be aligned up to 8 byte
// boundaries. The reason not to use ValueType directly is that for types with
// constructors it could lead to race conditions.
value = result[0];
for (int i = 1; (i * step < max_active_thread) && (i < step_width); ++i)
reducer.join(&value, &result[i]);
+ __syncthreads();
}
template <typename ValueType, typename ReducerType>
__device__ inline bool hip_inter_block_shuffle_reduction(
typename FunctorType::reference_type value,
typename FunctorType::reference_type neutral, FunctorType const& reducer,
- Kokkos::Experimental::HIP::size_type* const m_scratch_space,
+ typename FunctorType::pointer_type const m_scratch_space,
typename FunctorType::pointer_type const /*result*/,
- Kokkos::Experimental::HIP::size_type* const m_scratch_flags,
+ HIP::size_type* const m_scratch_flags,
int const max_active_thread = blockDim.y) {
using pointer_type = typename FunctorType::pointer_type;
using value_type = typename FunctorType::value_type;
// One thread in the block writes block result to global scratch_memory
if (id == 0) {
- pointer_type global =
- reinterpret_cast<pointer_type>(m_scratch_space) + blockIdx.x;
- *global = value;
+ pointer_type global = m_scratch_space + blockIdx.x;
+ *global = value;
+ __threadfence();
}
// One warp of last block performs inter block reduction through loading the
// block values from global scratch_memory
bool last_block = false;
__syncthreads();
- int constexpr warp_size = Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+ constexpr int warp_size = HIPTraits::WarpSize;
if (id < warp_size) {
- Kokkos::Experimental::HIP::size_type count;
+ HIP::size_type count;
// Figure out whether this is the last block
if (id == 0) count = Kokkos::atomic_fetch_add(m_scratch_flags, 1);
- count = Kokkos::Experimental::shfl(count, 0, warp_size);
+ count = shfl(count, 0, warp_size);
// Last block does the inter block reduction
if (count == gridDim.x - 1) {
last_block = true;
value = neutral;
- pointer_type const global =
- reinterpret_cast<pointer_type>(m_scratch_space);
+ pointer_type const global = m_scratch_space;
// Reduce all global values with splitting work over threads in one warp
const int step_size = blockDim.x * blockDim.y < warp_size
// valid (allows gridDim.x non power of two and <warp_size)
for (unsigned int i = 1; i < warp_size; i *= 2) {
if ((blockDim.x * blockDim.y) > i) {
- value_type tmp = Kokkos::Experimental::shfl_down(value, i, warp_size);
+ value_type tmp = shfl_down(value, i, warp_size);
if (id + i < gridDim.x) reducer.join(&value, &tmp);
}
}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core.hpp>
+#include <HIP/Kokkos_HIP_Space.hpp>
+
+#include <HIP/Kokkos_HIP_DeepCopy.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <hip/hip_runtime_api.h>
+
+#include <stdlib.h>
+#include <iostream>
+#include <sstream>
+#include <algorithm>
+#include <atomic>
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace {
+
+static std::atomic<bool> is_first_hip_managed_allocation(true);
+
+} // namespace
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+HIPSpace::HIPSpace()
+ : m_device(HIP().hip_device()), m_stream(HIP().hip_stream()) {}
+
+HIPHostPinnedSpace::HIPHostPinnedSpace() {}
+
+HIPManagedSpace::HIPManagedSpace() : m_device(HIP().hip_device()) {}
+
+#ifndef KOKKOS_IMPL_HIP_UNIFIED_MEMORY
+void* HIPSpace::allocate(const HIP& exec_space,
+ const size_t arg_alloc_size) const {
+ return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+
+void* HIPSpace::allocate(const HIP& exec_space, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(exec_space.hip_stream(), arg_label, arg_alloc_size,
+ arg_logical_size, true);
+}
+#endif
+
+void* HIPSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void* HIPSpace::allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(m_stream, arg_label, arg_alloc_size, arg_logical_size,
+ false);
+}
+
+void* HIPSpace::impl_allocate(
+ [[maybe_unused]] const hipStream_t stream, const char* arg_label,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ [[maybe_unused]] const bool stream_sync_only) const {
+ void* ptr = nullptr;
+
+#ifdef KOKKOS_ENABLE_IMPL_HIP_MALLOC_ASYNC
+ auto const error_code = hipMallocAsync(&ptr, arg_alloc_size, stream);
+ if (stream_sync_only) {
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(stream));
+ } else {
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceSynchronize());
+ }
+#else
+ auto const error_code = hipMalloc(&ptr, arg_alloc_size);
+#endif
+
+ if (error_code != hipSuccess) {
+ // This is the only way to clear the last error, which we should do here
+ // since we're turning it into an exception here
+ (void)hipGetLastError();
+ Kokkos::Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const Kokkos::Tools::SpaceHandle arg_handle =
+ Kokkos::Tools::make_space_handle(name());
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+
+ return ptr;
+}
+
+void* HIPHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+void* HIPHostPinnedSpace::allocate(const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void* HIPHostPinnedSpace::impl_allocate(
+ const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ void* ptr = nullptr;
+
+ auto const error_code =
+ hipHostMalloc(&ptr, arg_alloc_size, hipHostMallocNonCoherent);
+ if (error_code != hipSuccess) {
+ // This is the only way to clear the last error, which we should do here
+ // since we're turning it into an exception here
+ (void)hipGetLastError();
+ Kokkos::Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+
+ return ptr;
+}
+
+void* HIPManagedSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+void* HIPManagedSpace::allocate(const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void* HIPManagedSpace::impl_allocate(
+ const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ void* ptr = nullptr;
+
+ if (arg_alloc_size > 0) {
+ if (is_first_hip_managed_allocation.exchange(false) &&
+ Kokkos::show_warnings()) {
+ do { // hack to avoid spamming users with too many warnings
+ if (!impl_hip_driver_check_page_migration()) {
+ std::cerr << R"warning(
+Kokkos::HIP::allocation WARNING: The combination of device and system configuration
+ does not support page migration between device and host.
+ HIPManagedSpace might not work as expected.
+ Please refer to the ROCm documentation on unified/managed memory.)warning"
+ << std::endl;
+ break; // do not warn about HSA_XNACK environement variable
+ }
+
+ // check for correct runtime environment
+ const char* hsa_xnack = std::getenv("HSA_XNACK");
+ if (!hsa_xnack)
+ std::cerr << R"warning(
+Kokkos::HIP::runtime WARNING: Kokkos did not find an environment variable 'HSA_XNACK'
+ for the current process.
+ Nevertheless, xnack is enabled for all processes if
+ amdgpu.noretry=0 was set in the Linux kernel boot line.
+ Without xnack enabled, Kokkos::HIPManaged might not behave
+ as expected.)warning"
+ << std::endl;
+ else if (Kokkos::Impl::strcmp(hsa_xnack, "1") != 0)
+ std::cerr
+ << "Kokkos::HIP::runtime WARNING: Kokkos detected the "
+ "environement variable "
+ << "'HSA_XNACK'=" << hsa_xnack << "\n"
+ << "Kokkos advises to set it to '1' to enable it per process."
+ << std::endl;
+ } while (false);
+ }
+ auto const error_code = hipMallocManaged(&ptr, arg_alloc_size);
+ if (error_code != hipSuccess) {
+ // This is the only way to clear the last error, which we should do here
+ // since we're turning it into an exception here
+ (void)hipGetLastError();
+ Kokkos::Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipMemAdvise(
+ ptr, arg_alloc_size, hipMemAdviseSetCoarseGrain, m_device));
+ }
+
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+
+ return ptr;
+}
+bool HIPManagedSpace::impl_hip_driver_check_page_migration() const {
+ // check with driver if page migrating memory is available
+ // this driver query is copied from the hip documentation
+ int hasManagedMemory = 0; // false by default
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceGetAttribute(
+ &hasManagedMemory, hipDeviceAttributeManagedMemory, m_device));
+ if (!static_cast<bool>(hasManagedMemory)) return false;
+ // next, check pageableMemoryAccess
+ int hasPageableMemory = 0; // false by default
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceGetAttribute(
+ &hasPageableMemory, hipDeviceAttributePageableMemoryAccess, m_device));
+ return static_cast<bool>(hasPageableMemory);
+}
+
+void HIPSpace::deallocate(void* const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void HIPSpace::deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HIPSpace::impl_deallocate(
+ const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+#ifdef KOKKOS_ENABLE_IMPL_HIP_MALLOC_ASYNC
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipFreeAsync(arg_alloc_ptr, m_stream));
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceSynchronize());
+#else
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(arg_alloc_ptr));
+#endif
+}
+
+void HIPHostPinnedSpace::deallocate(void* const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void HIPHostPinnedSpace::deallocate(const char* arg_label,
+ void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HIPHostPinnedSpace::impl_deallocate(
+ const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(arg_alloc_ptr));
+}
+
+void HIPManagedSpace::deallocate(void* const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void HIPManagedSpace::deallocate(const char* arg_label,
+ void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HIPManagedSpace::impl_deallocate(
+ const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+ // We have to unset the CoarseGrain property manually as hipFree does not take
+ // care of it. Otherwise, the allocation would continue to linger in the
+ // kernel mem page table.
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipMemAdvise(
+ arg_alloc_ptr, arg_alloc_size, hipMemAdviseUnsetCoarseGrain, m_device));
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(arg_alloc_ptr));
+}
+
+} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIPSPACE_HPP
+#define KOKKOS_HIPSPACE_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <iosfwd>
+#include <typeinfo>
+#include <string>
+#include <cstddef>
+#include <iosfwd>
+
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <HIP/Kokkos_HIP_Error.hpp> // HIP_SAFE_CALL
+
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+
+#include <hip/hip_runtime_api.h>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename T>
+struct is_hip_type_space : public std::false_type {};
+
+} // namespace Impl
+
+/** \brief HIP on-device memory management */
+
+class HIPSpace {
+ public:
+ //! Tag this class as a kokkos memory space
+ using memory_space = HIPSpace;
+ using execution_space = HIP;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+
+ using size_type = unsigned int;
+
+ /*--------------------------------*/
+
+ HIPSpace();
+ HIPSpace(HIPSpace&& rhs) = default;
+ HIPSpace(const HIPSpace& rhs) = default;
+ HIPSpace& operator=(HIPSpace&& rhs) = default;
+ HIPSpace& operator=(const HIPSpace& rhs) = default;
+ ~HIPSpace() = default;
+
+ /**\brief Allocate untracked memory in the hip space */
+#ifdef KOKKOS_IMPL_HIP_UNIFIED_MEMORY
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+#else
+ void* allocate(const HIP& exec_space, const size_t arg_alloc_size) const;
+ void* allocate(const HIP& exec_space, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+#endif
+ void* allocate(const size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ /**\brief Deallocate untracked memory in the hip space */
+ void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ private:
+ void* impl_allocate(const hipStream_t stream, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ bool stream_sync_only) const;
+ void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+ /**\brief Return Name of the MemorySpace */
+ static constexpr const char* name() { return "HIP"; }
+
+ private:
+ int m_device; ///< Which HIP device
+ hipStream_t m_stream;
+};
+
+template <>
+struct Impl::is_hip_type_space<HIPSpace> : public std::true_type {};
+
+} // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+/** \brief Host memory that is accessible to HIP execution space
+ * through HIP's host-pinned memory allocation.
+ */
+class HIPHostPinnedSpace {
+ public:
+ //! Tag this class as a kokkos memory space
+ /** \brief Memory is in HostSpace so use the HostSpace::execution_space */
+ using execution_space = HostSpace::execution_space;
+ using memory_space = HIPHostPinnedSpace;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+ using size_type = unsigned int;
+
+ /*--------------------------------*/
+
+ HIPHostPinnedSpace();
+ HIPHostPinnedSpace(HIPHostPinnedSpace&& rhs) = default;
+ HIPHostPinnedSpace(const HIPHostPinnedSpace& rhs) = default;
+ HIPHostPinnedSpace& operator=(HIPHostPinnedSpace&& rhs) = default;
+ HIPHostPinnedSpace& operator=(const HIPHostPinnedSpace& rhs) = default;
+ ~HIPHostPinnedSpace() = default;
+
+ /**\brief Allocate untracked memory in the space */
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+ void* allocate(const size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ /**\brief Deallocate untracked memory in the space */
+ void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ private:
+ void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+ void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+ /**\brief Return Name of the MemorySpace */
+ static constexpr const char* name() { return "HIPHostPinned"; }
+
+ /*--------------------------------*/
+};
+
+template <>
+struct Impl::is_hip_type_space<HIPHostPinnedSpace> : public std::true_type {};
+
+} // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+/** \brief Memory that is accessible to HIP execution space
+ * and host through HIP's memory page migration.
+ */
+class HIPManagedSpace {
+ public:
+ //! Tag this class as a kokkos memory space
+ /** \brief Memory is unified to both device and host via page migration
+ * and therefore able to be used by HostSpace::execution_space and
+ * DeviceSpace::execution_space.
+ */
+ //! tag this class as a kokkos memory space
+ using memory_space = HIPManagedSpace;
+ using execution_space = HIP;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+ using size_type = unsigned int;
+
+ /*--------------------------------*/
+
+ HIPManagedSpace();
+ HIPManagedSpace(HIPManagedSpace&& rhs) = default;
+ HIPManagedSpace(const HIPManagedSpace& rhs) = default;
+ HIPManagedSpace& operator=(HIPManagedSpace&& rhs) = default;
+ HIPManagedSpace& operator=(const HIPManagedSpace& rhs) = default;
+ ~HIPManagedSpace() = default;
+
+ /**\brief Allocate untracked memory in the space */
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+ void* allocate(const size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ /**\brief Deallocate untracked memory in the space */
+ void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ // internal only method to determine whether page migration is supported
+ bool impl_hip_driver_check_page_migration() const;
+
+ private:
+ int m_device; ///< Which HIP device
+ void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+ void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+ /**\brief Return Name of the MemorySpace */
+ static constexpr const char* name() { return "HIPManaged"; }
+
+ /*--------------------------------*/
+};
+
+template <>
+struct Impl::is_hip_type_space<HIPManagedSpace> : public std::true_type {};
+
+} // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+static_assert(Kokkos::Impl::MemorySpaceAccess<HIPSpace, HIPSpace>::assignable);
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<HostSpace, HIPSpace> {
+ enum : bool { assignable = false };
+#if !defined(KOKKOS_IMPL_HIP_UNIFIED_MEMORY)
+ enum : bool{accessible = false};
+#else
+ enum : bool { accessible = true };
+#endif
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HostSpace, HIPHostPinnedSpace> {
+ // HostSpace::execution_space == HIPHostPinnedSpace::execution_space
+ enum : bool { assignable = true };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HostSpace, HIPManagedSpace> {
+ // HostSpace::execution_space != HIPManagedSpace::execution_space
+ enum : bool { assignable = false };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<HIPSpace, HostSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = false };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HIPSpace, HIPHostPinnedSpace> {
+ // HIPSpace::execution_space != HIPHostPinnedSpace::execution_space
+ enum : bool { assignable = false };
+ enum : bool { accessible = true }; // HIPSpace::execution_space
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HIPSpace, HIPManagedSpace> {
+ // HIPSpace::execution_space == HIPManagedSpace::execution_space
+ enum : bool { assignable = true };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// HIPHostPinnedSpace::execution_space == HostSpace::execution_space
+// HIPHostPinnedSpace accessible to both HIP and Host
+
+template <>
+struct MemorySpaceAccess<HIPHostPinnedSpace, HostSpace> {
+ enum : bool { assignable = false }; // Cannot access from HIP
+ enum : bool { accessible = true }; // HIPHostPinnedSpace::execution_space
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HIPHostPinnedSpace, HIPSpace> {
+ enum : bool { assignable = false }; // Cannot access from Host
+ enum : bool { accessible = false };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HIPHostPinnedSpace, HIPManagedSpace> {
+ enum : bool { assignable = false }; // different exec_space
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// HIPManagedSpace::execution_space != HostSpace::execution_space
+// HIPManagedSpace accessible to both HIP and Host
+
+template <>
+struct MemorySpaceAccess<HIPManagedSpace, HostSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = false }; // HIPHostPinnedSpace::execution_space
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HIPManagedSpace, HIPSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<HIPManagedSpace, HIPHostPinnedSpace> {
+ enum : bool { assignable = false }; // different exec_space
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* #define KOKKOS_HIPSPACE_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_TEAM_HPP
#define KOKKOS_HIP_TEAM_HPP
*/
class HIPTeamMember {
public:
- using execution_space = Kokkos::Experimental::HIP;
+ using execution_space = HIP;
using scratch_memory_space = execution_space::scratch_memory_space;
+ using team_handle = HIPTeamMember;
private:
mutable void* m_team_reduce;
val = *(reinterpret_cast<ValueType*>(m_team_reduce));
} else { // team <= warp
ValueType tmp(val); // input might not be a register variable
- ::Kokkos::Experimental::Impl::in_place_shfl(
- val, tmp, blockDim.x * thread_id, blockDim.x * blockDim.y);
+ in_place_shfl(val, tmp, blockDim.x * thread_id, blockDim.x * blockDim.y);
}
#else
(void)val;
typename ReducerType::value_type& value) const noexcept {
#ifdef __HIP_DEVICE_COMPILE__
typename Kokkos::Impl::FunctorAnalysis<
- FunctorPatternInterface::REDUCE, TeamPolicy<Experimental::HIP>,
- ReducerType>::Reducer wrapped_reducer(&reducer);
- hip_intra_block_shuffle_reduction(value, wrapped_reducer, blockDim.y);
+ FunctorPatternInterface::REDUCE, TeamPolicy<HIP>, ReducerType,
+ typename ReducerType::value_type>::Reducer wrapped_reducer(reducer);
+ impl_team_reduce(wrapped_reducer, value);
reducer.reference() = value;
#else
(void)reducer;
#endif
}
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<WrappedReducerType>::value>
+ impl_team_reduce(
+ WrappedReducerType const& wrapped_reducer,
+ typename WrappedReducerType::value_type& value) const noexcept {
+#ifdef __HIP_DEVICE_COMPILE__
+ hip_intra_block_shuffle_reduction(value, wrapped_reducer, blockDim.y);
+#else
+ (void)wrapped_reducer;
+ (void)value;
+#endif
+ }
+
//--------------------------------------------------------------------------
/** \brief Intra-team exclusive prefix sum with team_rank() ordering
* with intra-team non-deterministic ordering accumulation.
Impl::HIPJoinFunctor<Type> hip_join_functor;
typename Kokkos::Impl::FunctorAnalysis<
- FunctorPatternInterface::REDUCE, TeamPolicy<Experimental::HIP>,
- Impl::HIPJoinFunctor<Type>>::Reducer reducer(&hip_join_functor);
+ FunctorPatternInterface::REDUCE, TeamPolicy<HIP>,
+ Impl::HIPJoinFunctor<Type>, Type>::Reducer reducer(hip_join_functor);
Impl::hip_intra_block_reduce_scan<true>(reducer, base_data + 1);
if (global_accum) {
KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer<ReducerType>::value>
vector_reduce(ReducerType const& reducer,
typename ReducerType::value_type& value) {
+#ifdef __HIP_DEVICE_COMPILE__
+ using value_type = typename ReducerType::value_type;
+ using wrapped_reducer_type =
+ typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<HIP>, ReducerType,
+ value_type>::Reducer;
+
+ impl_vector_reduce(wrapped_reducer_type(reducer), value);
+ reducer.reference() = value;
+#else
+ (void)reducer;
+ (void)value;
+#endif
+ }
+
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+ is_reducer<WrappedReducerType>::value>
+ impl_vector_reduce(WrappedReducerType const& wrapped_reducer,
+ typename WrappedReducerType::value_type& value) {
#ifdef __HIP_DEVICE_COMPILE__
if (blockDim.x == 1) return;
// Intra vector lane shuffle reduction:
- typename ReducerType::value_type tmp(value);
- typename ReducerType::value_type tmp2 = tmp;
+ typename WrappedReducerType::value_type tmp(value);
+ typename WrappedReducerType::value_type tmp2 = tmp;
for (int i = blockDim.x; (i >>= 1);) {
- ::Kokkos::Experimental::Impl::in_place_shfl_down(tmp2, tmp, i,
- blockDim.x);
+ in_place_shfl_down(tmp2, tmp, i, blockDim.x);
if (static_cast<int>(threadIdx.x) < i) {
- reducer.join(tmp, tmp2);
+ wrapped_reducer.join(&tmp, &tmp2);
}
}
// because floating point summation is not associative
// and thus different threads could have different results.
- ::Kokkos::Experimental::Impl::in_place_shfl(tmp2, tmp, 0, blockDim.x);
- value = tmp2;
- reducer.reference() = tmp2;
+ in_place_shfl(tmp2, tmp, 0, blockDim.x);
+ value = tmp2;
#else
- (void)reducer;
+ (void)wrapped_reducer;
(void)value;
#endif
}
ThreadVectorRangeBoundariesStruct(const HIPTeamMember, index_type count)
: start(static_cast<index_type>(0)), end(count) {}
- KOKKOS_INLINE_FUNCTION
- ThreadVectorRangeBoundariesStruct(index_type count)
- : start(static_cast<index_type>(0)), end(count) {}
-
KOKKOS_INLINE_FUNCTION
ThreadVectorRangeBoundariesStruct(const HIPTeamMember, index_type arg_begin,
index_type arg_end)
: start(arg_begin), end(arg_end) {}
-
- KOKKOS_INLINE_FUNCTION
- ThreadVectorRangeBoundariesStruct(index_type arg_begin, index_type arg_end)
- : start(arg_begin), end(arg_end) {}
};
} // namespace Impl
iType, Impl::HIPTeamMember>& loop_boundaries,
const Closure& closure, const ReducerType& reducer) {
#ifdef __HIP_DEVICE_COMPILE__
- typename ReducerType::value_type value;
- reducer.init(value);
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HIPTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start + threadIdx.y; i < loop_boundaries.end;
i += blockDim.y) {
closure(i, value);
}
- loop_boundaries.member.team_reduce(reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
#else
(void)loop_boundaries;
(void)closure;
parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
iType, Impl::HIPTeamMember>& loop_boundaries,
const Closure& closure, ValueType& result) {
-#ifdef __HIP_DEVICE_COMPILE__
- ValueType val;
- Kokkos::Sum<ValueType> reducer(val);
+ KOKKOS_IF_ON_DEVICE(
+ (using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HIPTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
- reducer.init(reducer.reference());
+ wrapped_reducer_type wrapped_reducer(closure); value_type value;
+ wrapped_reducer.init(&value);
- for (iType i = loop_boundaries.start + threadIdx.y; i < loop_boundaries.end;
- i += blockDim.y) {
- closure(i, val);
- }
+ for (iType i = loop_boundaries.start + threadIdx.y;
+ i < loop_boundaries.end; i += blockDim.y) { closure(i, value); }
- loop_boundaries.member.team_reduce(reducer, val);
- result = reducer.reference();
-#else
- (void)loop_boundaries;
- (void)closure;
- (void)result;
-#endif
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value); result = value;))
+ KOKKOS_IF_ON_HOST(((void)loop_boundaries; (void)closure; (void)result;))
}
/** \brief Inter-thread parallel exclusive prefix sum.
* final == true.
*/
// This is the same code as in CUDA and largely the same as in OpenMPTarget
-template <typename iType, typename FunctorType>
+template <typename iType, typename FunctorType, typename ValueType>
KOKKOS_INLINE_FUNCTION void parallel_scan(
const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
loop_bounds,
- const FunctorType& lambda) {
- // Extract value_type from lambda
- using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void,
- FunctorType>::value_type;
+ const FunctorType& lambda, ValueType& return_val) {
+ // Extract ValueType from the Functor
+ using functor_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ ValueType>::value_type;
+ static_assert(std::is_same_v<functor_value_type, ValueType>,
+ "Non-matching value types of functor and return type");
const auto start = loop_bounds.start;
const auto end = loop_bounds.end;
const auto team_size = member.team_size();
const auto team_rank = member.team_rank();
const auto nchunk = (end - start + team_size - 1) / team_size;
- value_type accum = 0;
+ ValueType accum = {};
// each team has to process one or more chunks of the prefix scan
for (iType i = 0; i < nchunk; ++i) {
auto ii = start + i * team_size + team_rank;
// local accumulation for this chunk
- value_type local_accum = 0;
+ ValueType local_accum = 0;
// user updates value with prefix value
if (ii < loop_bounds.end) lambda(ii, local_accum, false);
// perform team scan
// broadcast last value to rest of the team
member.team_broadcast(accum, team_size - 1);
}
+ return_val = accum;
+}
+
+/** \brief Inter-thread parallel exclusive prefix sum.
+ *
+ * Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to each rank in the team (whose global rank is
+ * less than N) and a scan operation is performed. The last call to closure has
+ * final == true.
+ */
+template <typename iType, typename FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
+ loop_bounds,
+ const FunctorType& lambda) {
+ // Extract value_type from lambda
+ using value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+
+ value_type scan_val;
+ parallel_scan(loop_bounds, lambda, scan_val);
}
template <typename iType, class Closure>
iType, Impl::HIPTeamMember>& loop_boundaries,
const Closure& closure, const ReducerType& reducer) {
#ifdef __HIP_DEVICE_COMPILE__
- typename ReducerType::value_type value;
- reducer.init(value);
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HIPTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
i < loop_boundaries.end; i += blockDim.y * blockDim.x) {
closure(i, value);
}
- loop_boundaries.member.vector_reduce(reducer, value);
- loop_boundaries.member.team_reduce(reducer, value);
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
#else
(void)loop_boundaries;
(void)closure;
parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
iType, Impl::HIPTeamMember>& loop_boundaries,
const Closure& closure, ValueType& result) {
-#ifdef __HIP_DEVICE_COMPILE__
- ValueType val;
- Kokkos::Sum<ValueType> reducer(val);
-
- reducer.init(reducer.reference());
-
- for (iType i = loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
- i < loop_boundaries.end; i += blockDim.y * blockDim.x) {
- closure(i, val);
- }
-
- loop_boundaries.member.vector_reduce(reducer);
- loop_boundaries.member.team_reduce(reducer);
- result = reducer.reference();
-#else
- (void)loop_boundaries;
- (void)closure;
- (void)result;
-#endif
+ KOKKOS_IF_ON_DEVICE(
+ (using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HIPTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(closure); value_type value;
+ wrapped_reducer.init(&value);
+
+ for (iType i =
+ loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
+ i < loop_boundaries.end;
+ i += blockDim.y * blockDim.x) { closure(i, value); }
+
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value); result = value;))
+
+ KOKKOS_IF_ON_HOST(((void)loop_boundaries; (void)closure; (void)result;))
}
//----------------------------------------------------------------------------
iType, Impl::HIPTeamMember> const& loop_boundaries,
Closure const& closure, ReducerType const& reducer) {
#ifdef __HIP_DEVICE_COMPILE__
- reducer.init(reducer.reference());
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HIPTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start + threadIdx.x; i < loop_boundaries.end;
i += blockDim.x) {
- closure(i, reducer.reference());
+ closure(i, value);
}
- Impl::HIPTeamMember::vector_reduce(reducer);
+ Impl::HIPTeamMember::impl_vector_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
#else
(void)loop_boundaries;
(void)closure;
parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
iType, Impl::HIPTeamMember> const& loop_boundaries,
Closure const& closure, ValueType& result) {
-#ifdef __HIP_DEVICE_COMPILE__
- result = ValueType();
+ KOKKOS_IF_ON_DEVICE(
+ (using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HIPTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
- for (iType i = loop_boundaries.start + threadIdx.x; i < loop_boundaries.end;
- i += blockDim.x) {
- closure(i, result);
- }
+ wrapped_reducer_type wrapped_reducer(closure); value_type value;
+ wrapped_reducer.init(&value);
- Impl::HIPTeamMember::vector_reduce(Kokkos::Sum<ValueType>(result));
-#else
- (void)loop_boundaries;
- (void)closure;
- (void)result;
-#endif
+ for (iType i = loop_boundaries.start + threadIdx.x;
+ i < loop_boundaries.end; i += blockDim.x) { closure(i, value); }
+
+ Impl::HIPTeamMember::impl_vector_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value); result = value;))
+
+ KOKKOS_IF_ON_HOST(((void)loop_boundaries; (void)closure; (void)result;))
}
//----------------------------------------------------------------------------
// exclusive scan -- the final accumulation
// of i's val will be included in the second
// closure call later.
- if (i < loop_boundaries.end && threadIdx.x > 0) closure(i - 1, val, false);
+ if (i - 1 < loop_boundaries.end && threadIdx.x > 0)
+ closure(i - 1, val, false);
// Bottom up exclusive scan in triangular pattern
// where each HIP thread is the root of a reduction tree
// inversion.
for (int j = 1; j < static_cast<int>(blockDim.x); j <<= 1) {
value_type tmp = identity;
- ::Kokkos::Experimental::Impl::in_place_shfl_up(tmp, val, j, blockDim.x);
+ Impl::in_place_shfl_up(tmp, val, j, blockDim.x);
if (j <= static_cast<int>(threadIdx.x)) {
reducer.join(val, tmp);
}
// Update i's contribution into the val
// and add it to accum for next round
if (i < loop_boundaries.end) closure(i, val, true);
- ::Kokkos::Experimental::Impl::in_place_shfl(accum, val, blockDim.x - 1,
- blockDim.x);
+ Impl::in_place_shfl(accum, val, blockDim.x - 1, blockDim.x);
}
+ reducer.reference() = accum;
#else
(void)loop_boundaries;
(void)closure;
loop_boundaries,
const Closure& closure) {
using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
value_type dummy;
parallel_scan(loop_boundaries, closure, Kokkos::Sum<value_type>(dummy));
}
+/** \brief Intra-thread vector parallel exclusive prefix sum.
+ *
+ * Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to all vector lanes in the
+ * thread and a scan operation is performed.
+ * The last call to closure has final == true.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
+ loop_boundaries,
+ const Closure& closure, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using closure_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ ValueType>::value_type;
+ static_assert(std::is_same_v<closure_value_type, ValueType>,
+ "Non-matching value types of closure and return type");
+
+ ValueType accum;
+ parallel_scan(loop_boundaries, closure, Kokkos::Sum<ValueType>(accum));
+
+ return_val = accum;
+}
+
} // namespace Kokkos
namespace Kokkos {
const FunctorType& lambda, ValueType& val) {
#ifdef __HIP_DEVICE_COMPILE__
if (threadIdx.x == 0) lambda(val);
- ::Kokkos::Experimental::Impl::in_place_shfl(val, val, 0, blockDim.x);
+ Impl::in_place_shfl(val, val, 0, blockDim.x);
#else
(void)lambda;
(void)val;
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_TEAM_POLICY_INTERNAL_HPP
+#define KOKKOS_HIP_TEAM_POLICY_INTERNAL_HPP
+
+#include <Kokkos_MinMax.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename... Properties>
+class TeamPolicyInternal<HIP, Properties...>
+ : public PolicyTraits<Properties...> {
+ public:
+ using execution_policy = TeamPolicyInternal;
+
+ using traits = PolicyTraits<Properties...>;
+
+ template <typename ExecSpace, typename... OtherProperties>
+ friend class TeamPolicyInternal;
+
+ private:
+ typename traits::execution_space m_space;
+ int m_league_size;
+ int m_team_size;
+ int m_vector_length;
+ size_t m_team_scratch_size[2];
+ size_t m_thread_scratch_size[2];
+ int m_chunk_size;
+ bool m_tune_team_size;
+ bool m_tune_vector_length;
+
+ public:
+ using execution_space = HIP;
+
+ template <class... OtherProperties>
+ TeamPolicyInternal(TeamPolicyInternal<OtherProperties...> const& p) {
+ m_league_size = p.m_league_size;
+ m_team_size = p.m_team_size;
+ m_vector_length = p.m_vector_length;
+ m_team_scratch_size[0] = p.m_team_scratch_size[0];
+ m_team_scratch_size[1] = p.m_team_scratch_size[1];
+ m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+ m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+ m_chunk_size = p.m_chunk_size;
+ m_space = p.m_space;
+ m_tune_team_size = p.m_tune_team_size;
+ m_tune_vector_length = p.m_tune_vector_length;
+ }
+
+ template <typename FunctorType>
+ int team_size_max(FunctorType const& f, ParallelForTag const&) const {
+ using closure_type =
+ Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
+
+ return internal_team_size_common<BlockType::Max, closure_type, void>(f);
+ }
+
+ template <class FunctorType>
+ inline int team_size_max(const FunctorType& f,
+ const ParallelReduceTag&) const {
+ using functor_analysis_type =
+ Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicyInternal, FunctorType, void>;
+ using closure_type = Impl::ParallelReduce<
+ CombinedFunctorReducer<FunctorType,
+ typename functor_analysis_type::Reducer>,
+ TeamPolicy<Properties...>, Kokkos::HIP>;
+ return internal_team_size_common<
+ BlockType::Max, closure_type,
+ typename functor_analysis_type::value_type>(f);
+ }
+
+ template <typename FunctorType, typename ReducerType>
+ inline int team_size_max(const FunctorType& f, const ReducerType&,
+ const ParallelReduceTag&) const {
+ using closure_type =
+ Impl::ParallelReduce<CombinedFunctorReducer<FunctorType, ReducerType>,
+ TeamPolicy<Properties...>, Kokkos::HIP>;
+ return internal_team_size_common<BlockType::Max, closure_type,
+ typename ReducerType::value_type>(f);
+ }
+
+ template <typename FunctorType>
+ int team_size_recommended(FunctorType const& f, ParallelForTag const&) const {
+ using closure_type =
+ Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
+
+ return internal_team_size_common<BlockType::Preferred, closure_type, void>(
+ f);
+ }
+
+ template <typename FunctorType>
+ inline int team_size_recommended(FunctorType const& f,
+ ParallelReduceTag const&) const {
+ using functor_analysis_type =
+ Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicyInternal, FunctorType, void>;
+ using closure_type = Impl::ParallelReduce<
+ CombinedFunctorReducer<FunctorType,
+ typename functor_analysis_type::Reducer>,
+ TeamPolicy<Properties...>, Kokkos::HIP>;
+ return internal_team_size_common<
+ BlockType::Preferred, closure_type,
+ typename functor_analysis_type::value_type>(f);
+ }
+
+ template <typename FunctorType, typename ReducerType>
+ int team_size_recommended(FunctorType const& f, ReducerType const&,
+ ParallelReduceTag const&) const {
+ using closure_type =
+ Impl::ParallelReduce<CombinedFunctorReducer<FunctorType, ReducerType>,
+ TeamPolicy<Properties...>, Kokkos::HIP>;
+ return internal_team_size_common<BlockType::Preferred, closure_type,
+ typename ReducerType::value_type>(f);
+ }
+
+ inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
+ inline bool impl_auto_team_size() const { return m_tune_team_size; }
+ static int vector_length_max() { return HIPTraits::WarpSize; }
+
+ static int verify_requested_vector_length(int requested_vector_length) {
+ int test_vector_length =
+ std::min(requested_vector_length, vector_length_max());
+
+ // Allow only power-of-two vector_length
+ if (!(is_integral_power_of_two(test_vector_length))) {
+ int test_pow2 = 1;
+ constexpr int warp_size = HIPTraits::WarpSize;
+ while (test_pow2 < warp_size) {
+ test_pow2 <<= 1;
+ if (test_pow2 > test_vector_length) {
+ break;
+ }
+ }
+ test_vector_length = test_pow2 >> 1;
+ }
+
+ return test_vector_length;
+ }
+
+ inline static int scratch_size_max(int level) {
+ // HIP Teams use (team_size + 2)*sizeof(double) shared memory for team
+ // reductions. They also use one int64_t in static shared memory for a
+ // shared ID. Furthermore, they use additional scratch memory in some
+ // reduction scenarios, which depend on the size of the value_type and is
+ // NOT captured here
+ constexpr size_t max_possible_team_size = 1024;
+ constexpr size_t max_reserved_shared_mem_per_team =
+ (max_possible_team_size + 2) * sizeof(double) + sizeof(int64_t);
+ // arbitrarily setting level 1 scratch limit to 20MB, for a
+ // MI250 that would give us about 4.4GB for 2 teams per CU
+ constexpr size_t max_l1_scratch_size = 20 * 1024 * 1024;
+
+ size_t max_shmem = HIP().hip_device_prop().sharedMemPerBlock;
+ return (level == 0 ? max_shmem - max_reserved_shared_mem_per_team
+ : max_l1_scratch_size);
+ }
+
+ inline void impl_set_vector_length(size_t size) { m_vector_length = size; }
+ inline void impl_set_team_size(size_t size) { m_team_size = size; }
+ int impl_vector_length() const { return m_vector_length; }
+
+ int team_size() const { return m_team_size; }
+
+ int league_size() const { return m_league_size; }
+
+ size_t scratch_size(int level, int team_size_ = -1) const {
+ if (team_size_ < 0) team_size_ = m_team_size;
+ return m_team_scratch_size[level] +
+ team_size_ * m_thread_scratch_size[level];
+ }
+
+ size_t team_scratch_size(int level) const {
+ return m_team_scratch_size[level];
+ }
+
+ size_t thread_scratch_size(int level) const {
+ return m_thread_scratch_size[level];
+ }
+
+ typename traits::execution_space space() const { return m_space; }
+
+ TeamPolicyInternal()
+ : m_space(typename traits::execution_space()),
+ m_league_size(0),
+ m_team_size(-1),
+ m_vector_length(0),
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(HIPTraits::WarpSize),
+ m_tune_team_size(false),
+ m_tune_vector_length(false) {}
+
+ /** \brief Specify league size, request team size */
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ int team_size_request, int vector_length_request = 1)
+ : m_space(space_),
+ m_league_size(league_size_),
+ m_team_size(team_size_request),
+ m_vector_length(
+ (vector_length_request > 0)
+ ? verify_requested_vector_length(vector_length_request)
+ : (verify_requested_vector_length(1))),
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(HIPTraits::WarpSize),
+ m_tune_team_size(bool(team_size_request <= 0)),
+ m_tune_vector_length(bool(vector_length_request <= 0)) {
+ // Make sure league size is permissible
+ const int max_grid_size_x = m_space.hip_device_prop().maxGridSize[0];
+ if (league_size_ >= max_grid_size_x)
+ Impl::throw_runtime_exception(
+ "Requested too large league_size for TeamPolicy on HIP execution "
+ "space.");
+
+ // Make sure total block size is permissible
+ if (m_team_size * m_vector_length > HIPTraits::MaxThreadsPerBlock) {
+ Impl::throw_runtime_exception(
+ std::string("Kokkos::TeamPolicy< HIP > the team size is too large. "
+ "Team size x vector length must be smaller than 1024."));
+ }
+ }
+
+ /** \brief Specify league size, request team size */
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ int vector_length_request = 1)
+ : TeamPolicyInternal(space_, league_size_, -1, vector_length_request) {}
+ // FLAG
+ /** \brief Specify league size and team size, request vector length*/
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */
+ )
+ : TeamPolicyInternal(space_, league_size_, team_size_request, -1)
+
+ {}
+
+ /** \brief Specify league size, request team size and vector length*/
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ const Kokkos::AUTO_t& /* vector_length_request */
+
+ )
+ : TeamPolicyInternal(space_, league_size_, -1, -1)
+
+ {}
+
+ TeamPolicyInternal(int league_size_, int team_size_request,
+ int vector_length_request = 1)
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+ team_size_request, vector_length_request) {}
+
+ TeamPolicyInternal(int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ int vector_length_request = 1)
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+ vector_length_request) {}
+
+ /** \brief Specify league size and team size, request vector length*/
+ TeamPolicyInternal(int league_size_, int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */
+
+ )
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+ team_size_request, -1)
+
+ {}
+
+ /** \brief Specify league size, request team size and vector length*/
+ TeamPolicyInternal(int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ const Kokkos::AUTO_t& /* vector_length_request */
+
+ )
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+ -1) {}
+
+ int chunk_size() const { return m_chunk_size; }
+
+ TeamPolicyInternal& set_chunk_size(typename traits::index_type chunk_size_) {
+ m_chunk_size = chunk_size_;
+ return *this;
+ }
+
+ /** \brief set per team scratch size for a specific level of the scratch
+ * hierarchy */
+ TeamPolicyInternal& set_scratch_size(int level,
+ PerTeamValue const& per_team) {
+ m_team_scratch_size[level] = per_team.value;
+ return *this;
+ }
+
+ /** \brief set per thread scratch size for a specific level of the scratch
+ * hierarchy */
+ TeamPolicyInternal& set_scratch_size(int level,
+ PerThreadValue const& per_thread) {
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ /** \brief set per thread and per team scratch size for a specific level of
+ * the scratch hierarchy */
+ TeamPolicyInternal& set_scratch_size(int level, PerTeamValue const& per_team,
+ PerThreadValue const& per_thread) {
+ m_team_scratch_size[level] = per_team.value;
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ using member_type = Kokkos::Impl::HIPTeamMember;
+
+ protected:
+ template <BlockType BlockSize, class ClosureType, class ValueType,
+ class FunctorType>
+ int internal_team_size_common(FunctorType const& f) const {
+ const unsigned shmem_block = team_scratch_size(0) + 2 * sizeof(double);
+ unsigned shmem_thread = thread_scratch_size(0) + sizeof(double);
+ using Tag = typename PatternTagFromImplSpecialization<ClosureType>::type;
+ if constexpr (std::is_same_v<Tag, ParallelReduceTag>) {
+ using Interface =
+ typename Impl::DeduceFunctorPatternInterface<ClosureType>::type;
+ using Analysis =
+ Impl::FunctorAnalysis<Interface, typename ClosureType::Policy,
+ FunctorType, ValueType>;
+ shmem_thread +=
+ ((Analysis::StaticValueSize != 0) ? 0 : Analysis::value_size(f));
+ }
+ const int vector_length = impl_vector_length();
+
+ const auto functor = [&f, shmem_block, shmem_thread, vector_length](
+ const hipFuncAttributes& attr, int block_size) {
+ int functor_shmem =
+ ::Kokkos::Impl::FunctorTeamShmemSize<FunctorType>::value(
+ f, block_size / vector_length);
+ return shmem_block + shmem_thread * (block_size / vector_length) +
+ functor_shmem + attr.sharedSizeBytes;
+ };
+ int block_size;
+ if constexpr (BlockSize == BlockType::Max) {
+ block_size = hip_get_max_team_blocksize<ClosureType,
+ typename traits::launch_bounds>(
+ space().impl_internal_space_instance(), functor);
+ } else {
+ block_size =
+ hip_get_preferred_team_blocksize<ClosureType,
+ typename traits::launch_bounds>(
+ space().impl_internal_space_instance(), functor);
+ }
+
+ if (block_size == 0) {
+ Kokkos::Impl::throw_runtime_exception(std::string(
+ "Kokkos::Impl::ParallelFor/Reduce< HIP > could not find a valid "
+ "team size."));
+ }
+ if constexpr (std::is_same_v<Tag, ParallelForTag>) {
+ return block_size / impl_vector_length();
+ } else {
+ // Currently we require Power-of-2 team size for reductions.
+ int p2 = 1;
+ while (p2 <= block_size) p2 *= 2;
+ p2 /= 2;
+ return p2 / impl_vector_length();
+ }
+ }
+};
+
+__device__ inline int64_t hip_get_scratch_index(HIP::size_type league_size,
+ int32_t* scratch_locks,
+ size_t num_scratch_locks) {
+ int64_t threadid = 0;
+ __shared__ int64_t base_thread_id;
+ if (threadIdx.x == 0 && threadIdx.y == 0) {
+ int64_t const wraparound_len =
+ Kokkos::min(int64_t(league_size),
+ int64_t(num_scratch_locks) / (blockDim.x * blockDim.y));
+ threadid = (blockIdx.x * blockDim.z + threadIdx.z) % wraparound_len;
+ threadid *= blockDim.x * blockDim.y;
+ int done = 0;
+ while (!done) {
+ done = (0 == atomicCAS(&scratch_locks[threadid], 0, 1));
+ if (!done) {
+ threadid += blockDim.x * blockDim.y;
+ if (int64_t(threadid + blockDim.x * blockDim.y) >=
+ wraparound_len * blockDim.x * blockDim.y)
+ threadid = 0;
+ }
+ }
+ base_thread_id = threadid;
+ }
+ __syncthreads();
+ threadid = base_thread_id;
+ return threadid;
+}
+
+__device__ inline void hip_release_scratch_index(int32_t* scratch_locks,
+ int64_t threadid) {
+ __syncthreads();
+ if (threadIdx.x == 0 && threadIdx.y == 0) {
+ scratch_locks[threadid] = 0;
+ }
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_UNIQUE_TOKEN_HPP
#define KOKKOS_HIP_UNIQUE_TOKEN_HPP
-#include <Kokkos_HIP_Space.hpp>
+#include <HIP/Kokkos_HIP_Space.hpp>
#include <Kokkos_UniqueToken.hpp>
-#include <impl/Kokkos_SharedAlloc.hpp>
namespace Kokkos {
namespace Impl {
-Kokkos::View<uint32_t*, Kokkos::Experimental::HIPSpace>
-hip_global_unique_token_locks(bool deallocate = false);
+Kokkos::View<uint32_t*, HIPSpace> hip_global_unique_token_locks(
+ bool deallocate = false);
}
namespace Experimental {
done_active = __ballot(done ? 1 : 0);
}
-// Make sure that all writes in the previous lock owner are visible to me
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+ // Make sure that all writes in the previous lock owner are visible to me
desul::atomic_thread_fence(desul::MemoryOrderAcquire(),
desul::MemoryScopeDevice());
-#else
- Kokkos::memory_fence();
-#endif
return idx;
}
/// \brief release an acquired value
KOKKOS_INLINE_FUNCTION
void release(size_type idx) const noexcept {
-// Make sure my writes are visible to the next lock owner
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+ // Make sure my writes are visible to the next lock owner
desul::atomic_thread_fence(desul::MemoryOrderRelease(),
desul::MemoryScopeDevice());
-#else
- Kokkos::memory_fence();
-#endif
(void)Kokkos::atomic_exchange(m_locks.data() + idx, 0);
}
};
// The instance version will forward to protected constructor which creates
// a lock array per instance
UniqueToken()
- : UniqueToken<HIP, UniqueTokenScope::Global>(
- Kokkos::Experimental::HIP().concurrency()) {}
+ : UniqueToken<HIP, UniqueTokenScope::Global>(HIP().concurrency()) {}
explicit UniqueToken(execution_space const& arg)
- : UniqueToken<HIP, UniqueTokenScope::Global>(
- Kokkos::Experimental::HIP().concurrency(), arg) {}
+ : UniqueToken<HIP, UniqueTokenScope::Global>(HIP().concurrency(), arg) {}
explicit UniqueToken(size_type max_size)
: UniqueToken<HIP, UniqueTokenScope::Global>(max_size) {}
UniqueToken(size_type max_size, execution_space const& arg)
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HIP_VECTORIZATION_HPP
#define KOKKOS_HIP_VECTORIZATION_HPP
#include <Kokkos_Macros.hpp>
namespace Kokkos {
-namespace Experimental {
namespace Impl {
//----------------------------------------------------------------------------
template <class Scalar>
// requires _assignable_from_bits<Scalar>
__device__ inline std::enable_if_t<sizeof(Scalar) < sizeof(int)> operator()(
- Scalar& out, Scalar const& in, int lane_or_delta, int width) const
- noexcept {
+ Scalar& out, Scalar const& in, int lane_or_delta,
+ int width) const noexcept {
using shfl_type = int;
union conv_type {
Scalar orig;
template <class Scalar>
// requires _assignable_from_bits<Scalar>
__device__ inline std::enable_if_t<sizeof(Scalar) == sizeof(int)> operator()(
- Scalar& out, Scalar const& in, int lane_or_delta, int width) const
- noexcept {
+ Scalar& out, Scalar const& in, int lane_or_delta,
+ int width) const noexcept {
reinterpret_cast<int&>(out) = self().do_shfl_op(
reinterpret_cast<int const&>(in), lane_or_delta, width);
}
template <class Scalar>
__device__ inline std::enable_if_t<sizeof(Scalar) == sizeof(double)>
- operator()(Scalar& out, Scalar const& in, int lane_or_delta, int width) const
- noexcept {
+ operator()(Scalar& out, Scalar const& in, int lane_or_delta,
+ int width) const noexcept {
reinterpret_cast<double&>(out) = self().do_shfl_op(
*reinterpret_cast<double const*>(&in), lane_or_delta, width);
}
// sizeof(Scalar) > sizeof(double) case
template <typename Scalar>
__device__ inline std::enable_if_t<(sizeof(Scalar) > sizeof(double))>
- operator()(Scalar& out, const Scalar& val, int lane_or_delta, int width) const
- noexcept {
+ operator()(Scalar& out, const Scalar& val, int lane_or_delta,
+ int width) const noexcept {
using shuffle_as_t = int;
- int constexpr N = sizeof(Scalar) / sizeof(shuffle_as_t);
+ constexpr int N = sizeof(Scalar) / sizeof(shuffle_as_t);
for (int i = 0; i < N; ++i) {
reinterpret_cast<shuffle_as_t*>(&out)[i] = self().do_shfl_op(
template <class... Args>
__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl(Args&&... args) noexcept {
- in_place_shfl_fn{}((Args &&) args...);
+ in_place_shfl_fn{}((Args&&)args...);
}
struct in_place_shfl_up_fn : in_place_shfl_op<in_place_shfl_up_fn> {
template <class... Args>
__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_up(
Args&&... args) noexcept {
- in_place_shfl_up_fn{}((Args &&) args...);
+ in_place_shfl_up_fn{}((Args&&)args...);
}
struct in_place_shfl_down_fn : in_place_shfl_op<in_place_shfl_down_fn> {
template <class... Args>
__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_down(
Args&&... args) noexcept {
- in_place_shfl_down_fn{}((Args &&) args...);
+ in_place_shfl_down_fn{}((Args&&)args...);
}
} // namespace Impl
return rv;
}
-} // namespace Experimental
} // namespace Kokkos
#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_WORKGRAPHPOLICY_HPP
+#define KOKKOS_HIP_WORKGRAPHPOLICY_HPP
+
+#include <HIP/Kokkos_HIP.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>, HIP> {
+ public:
+ using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+ using Self = ParallelFor<FunctorType, Policy, HIP>;
+
+ private:
+ Policy m_policy;
+ FunctorType m_functor;
+
+ template <class TagType>
+ __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_one(
+ const std::int32_t w) const noexcept {
+ m_functor(w);
+ }
+
+ template <class TagType>
+ __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_one(
+ const std::int32_t w) const noexcept {
+ const TagType t{};
+ m_functor(t, w);
+ }
+
+ public:
+ __device__ inline void operator()() const noexcept {
+ // Spin until COMPLETED_TOKEN.
+ // END_TOKEN indicates no work is currently available.
+ for (std::int32_t w = Policy::END_TOKEN;
+ Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+ if (Policy::END_TOKEN != w) {
+ exec_one<typename Policy::work_tag>(w);
+ m_policy.completed_work(w);
+ }
+ }
+ }
+
+ inline void execute() {
+ const int warps_per_block = 4;
+ const dim3 grid(hip_internal_multiprocessor_count(), 1, 1);
+ const dim3 block(1, HIPTraits::WarpSize, warps_per_block);
+ const int shared = 0;
+
+ HIPParallelLaunch<Self>(*this, grid, block, shared,
+ HIP().impl_internal_space_instance(), false);
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* #define KOKKOS_HIP_WORKGRAPHPOLICY_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <HIP/Kokkos_HIP_ZeroMemset.hpp>
+#include <HIP/Kokkos_HIP_ParallelFor_Range.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// alternative to hipMemsetAsync, which sets the first `cnt` bytes of `dst` to 0
+void zero_with_hip_kernel(const HIP& exec_space, void* dst, size_t cnt) {
+ Kokkos::parallel_for(
+ "Kokkos::ZeroMemset via parallel_for",
+ Kokkos::RangePolicy<Kokkos::HIP>(exec_space, 0, cnt),
+ KOKKOS_LAMBDA(size_t i) { static_cast<char*>(dst)[i] = 0; });
+}
+
+} // namespace Impl
+} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#ifndef KOKKOS_HIP_ZEROMEMSET_HPP
+#define KOKKOS_HIP_ZEROMEMSET_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <HIP/Kokkos_HIP.hpp>
+#include <impl/Kokkos_ZeroMemset_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// hipMemsetAsync sets the first `cnt` bytes of `dst` to the provided value
+void zero_with_hip_kernel(const HIP& exec_space, void* dst, size_t cnt);
+
+template <>
+struct ZeroMemset<HIP> {
+ ZeroMemset(const HIP& exec_space, void* dst, size_t cnt) {
+ // in ROCm <= 6.2.0, hipMemsetAsync on a host-allocated pointer
+ // returns an invalid value error, but accessing the data via a
+ // GPU kernel works.
+#if defined(KOKKOS_IMPL_HIP_UNIFIED_MEMORY)
+ zero_with_hip_kernel(exec_space, dst, cnt);
+#else
+ KOKKOS_IMPL_HIP_SAFE_CALL(
+ hipMemsetAsync(dst, 0, cnt, exec_space.hip_stream()));
+#endif
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // !defined(KOKKOS_HIP_ZEROMEMSET_HPP)
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#ifdef KOKKOS_ENABLE_HPX
+#include <HPX/Kokkos_HPX.hpp>
+
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <hpx/condition_variable.hpp>
+#include <hpx/init.hpp>
+#include <hpx/mutex.hpp>
+#include <hpx/runtime.hpp>
+#include <hpx/thread.hpp>
+#include <hpx/version.hpp>
+
+#include <atomic>
+#include <chrono>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <type_traits>
+
+namespace Kokkos {
+namespace Impl {
+void hpx_thread_buffer::resize(const std::size_t num_threads,
+ const std::size_t size_per_thread,
+ const std::size_t extra_space) noexcept {
+ m_num_threads = num_threads;
+ m_size_per_thread = size_per_thread;
+ m_extra_space = extra_space;
+
+ pad_to_cache_line(m_size_per_thread);
+
+ std::size_t size_total_new =
+ m_num_threads * m_size_per_thread + m_extra_space;
+
+ if (m_size_total < size_total_new) {
+ // Don't use make_unique here as it value-initializes the elements of the
+ // array, which we have no use for, and can be very slow for large arrays.
+ m_data = std::unique_ptr<char[]>(new char[size_total_new]);
+ m_size_total = size_total_new;
+ }
+}
+
+void *hpx_thread_buffer::get(std::size_t thread_num) const noexcept {
+ KOKKOS_EXPECTS(thread_num < m_num_threads);
+ if (!m_data) {
+ return nullptr;
+ }
+ return &m_data[thread_num * m_size_per_thread];
+}
+
+void *hpx_thread_buffer::get_extra_space() const noexcept {
+ KOKKOS_EXPECTS(m_extra_space > 0);
+ if (!m_data) {
+ return nullptr;
+ }
+ return &m_data[m_num_threads * m_size_per_thread];
+}
+} // namespace Impl
+
+namespace Experimental {
+
+bool HPX::m_hpx_initialized = false;
+std::atomic<uint32_t> HPX::m_next_instance_id{HPX::impl_default_instance_id() +
+ 1};
+uint32_t HPX::m_active_parallel_region_count{0};
+hpx::spinlock HPX::m_active_parallel_region_count_mutex;
+hpx::condition_variable_any HPX::m_active_parallel_region_count_cond;
+HPX::instance_data HPX::m_default_instance_data;
+
+void HPX::print_configuration(std::ostream &os, const bool) const {
+ os << "Host Parallel Execution Space\n";
+ os << " KOKKOS_ENABLE_HPX: yes\n";
+ os << "HPX Options:\n";
+#if defined(KOKKOS_ENABLE_IMPL_HPX_ASYNC_DISPATCH)
+ os << " KOKKOS_ENABLE_IMPL_HPX_ASYNC_DISPATCH: yes\n";
+#else
+ os << " KOKKOS_ENABLE_IMPL_HPX_ASYNC_DISPATCH: no\n";
+#endif
+ os << "\nHPX Runtime Configuration:\n";
+ os << "Worker threads: " << hpx::get_num_worker_threads() << '\n';
+ os << hpx::complete_version() << '\n';
+ os << hpx::configuration_string() << '\n';
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+bool &HPX::impl_get_in_parallel() noexcept {
+ static thread_local bool in_parallel = false;
+ return in_parallel;
+}
+
+HPX::impl_in_parallel_scope::impl_in_parallel_scope() noexcept {
+ KOKKOS_EXPECTS(!impl_get_in_parallel());
+ impl_get_in_parallel() = true;
+}
+
+HPX::impl_in_parallel_scope::~impl_in_parallel_scope() noexcept {
+ KOKKOS_EXPECTS(impl_get_in_parallel());
+ impl_get_in_parallel() = false;
+}
+
+HPX::impl_not_in_parallel_scope::impl_not_in_parallel_scope() noexcept {
+ KOKKOS_EXPECTS(impl_get_in_parallel());
+ impl_get_in_parallel() = false;
+}
+
+HPX::impl_not_in_parallel_scope::~impl_not_in_parallel_scope() noexcept {
+ KOKKOS_EXPECTS(!impl_get_in_parallel());
+ impl_get_in_parallel() = true;
+}
+#endif
+
+void HPX::impl_decrement_active_parallel_region_count() {
+ std::unique_lock<hpx::spinlock> l(m_active_parallel_region_count_mutex);
+ if (--m_active_parallel_region_count == 0) {
+ l.unlock();
+ m_active_parallel_region_count_cond.notify_all();
+ };
+}
+
+void HPX::impl_increment_active_parallel_region_count() {
+ std::unique_lock<hpx::spinlock> l(m_active_parallel_region_count_mutex);
+ ++m_active_parallel_region_count;
+}
+
+void HPX::impl_instance_fence_locked(const std::string &name) const {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<
+ Kokkos::Experimental::HPX>(
+ name,
+ Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
+ impl_instance_id()},
+ [&]() {
+ auto &s = impl_get_sender();
+
+ hpx::this_thread::experimental::sync_wait(std::move(s));
+ s = hpx::execution::experimental::unique_any_sender<>(
+ hpx::execution::experimental::just());
+ });
+}
+
+void HPX::impl_instance_fence(const std::string &name) const {
+ std::lock_guard<hpx::spinlock> l(impl_get_sender_mutex());
+ impl_instance_fence_locked(name);
+}
+
+void HPX::impl_static_fence(const std::string &name) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<
+ Kokkos::Experimental::HPX>(
+ name,
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ GlobalDeviceSynchronization,
+ [&]() {
+ auto &s = HPX().impl_get_sender();
+
+ std::unique_lock<hpx::spinlock> l(HPX().impl_get_sender_mutex());
+
+ // This is a loose fence. Any work scheduled before this will be waited
+ // for, but work scheduled while waiting may also be waited for.
+ {
+ std::unique_lock<hpx::spinlock> l_count(
+ m_active_parallel_region_count_mutex);
+ m_active_parallel_region_count_cond.wait(
+ l_count, [&]() { return m_active_parallel_region_count == 0; });
+ }
+
+ hpx::this_thread::experimental::sync_wait(std::move(s));
+ s = hpx::execution::experimental::unique_any_sender<>(
+ hpx::execution::experimental::just());
+ });
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+int HPX::concurrency() {
+#else
+int HPX::concurrency() const {
+#endif
+ hpx::runtime *rt = hpx::get_runtime_ptr();
+ if (rt == nullptr) {
+ return hpx::threads::hardware_concurrency();
+ } else {
+ if (hpx::threads::get_self_ptr() == nullptr) {
+ return hpx::resource::get_thread_pool(0).get_os_thread_count();
+ } else {
+ return hpx::this_thread::get_pool()->get_os_thread_count();
+ }
+ }
+}
+
+void HPX::impl_initialize(InitializationSettings const &settings) {
+ hpx::runtime *rt = hpx::get_runtime_ptr();
+ if (rt == nullptr) {
+ hpx::init_params i;
+ if (settings.has_num_threads()) {
+ i.cfg.emplace_back("hpx.os_threads=" +
+ std::to_string(settings.get_num_threads()));
+ }
+ int argc_hpx = 1;
+ char name[] = "kokkos_hpx";
+ char *argv_hpx[] = {name, nullptr};
+ hpx::start(nullptr, argc_hpx, argv_hpx, i);
+
+ m_hpx_initialized = true;
+ }
+}
+
+bool HPX::impl_is_initialized() noexcept {
+ hpx::runtime *rt = hpx::get_runtime_ptr();
+ return rt != nullptr;
+}
+
+void HPX::impl_finalize() {
+ if (m_hpx_initialized) {
+ hpx::runtime *rt = hpx::get_runtime_ptr();
+ if (rt != nullptr) {
+#if HPX_VERSION_FULL >= 0x010900
+ hpx::post([]() { hpx::finalize(); });
+#else
+ hpx::apply([]() { hpx::finalize(); });
+#endif
+ hpx::stop();
+ } else {
+ Kokkos::abort(
+ "Kokkos::Experimental::HPX::impl_finalize: Kokkos started "
+ "HPX but something else already stopped HPX\n");
+ }
+ }
+}
+
+int HPX::impl_thread_pool_size() noexcept {
+ hpx::runtime *rt = hpx::get_runtime_ptr();
+ if (rt == nullptr) {
+ return 0;
+ } else {
+ if (hpx::threads::get_self_ptr() == nullptr) {
+ return hpx::resource::get_thread_pool(0).get_os_thread_count();
+ } else {
+ return hpx::this_thread::get_pool()->get_os_thread_count();
+ }
+ }
+}
+
+int HPX::impl_thread_pool_rank() noexcept {
+ hpx::runtime *rt = hpx::get_runtime_ptr();
+ if (rt == nullptr) {
+ return 0;
+ } else {
+ if (hpx::threads::get_self_ptr() == nullptr) {
+ return 0;
+ } else {
+ return hpx::this_thread::get_pool()->get_pool_index();
+ }
+ }
+}
+
+int HPX::impl_thread_pool_size(int depth) {
+ if (depth == 0) {
+ return impl_thread_pool_size();
+ } else {
+ return 1;
+ }
+}
+
+template void HPX::impl_bulk_plain_erased<int>(
+ bool, bool, std::function<void(int)> &&, int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+template void HPX::impl_bulk_plain_erased<unsigned int>(
+ bool, bool, std::function<void(unsigned int)> &&, unsigned int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+template void HPX::impl_bulk_plain_erased<long>(
+ bool, bool, std::function<void(long)> &&, long const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+template void HPX::impl_bulk_plain_erased<std::size_t>(
+ bool, bool, std::function<void(std::size_t)> &&, std::size_t const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+template void HPX::impl_bulk_setup_finalize_erased<int>(
+ bool, bool, std::function<void(int)> &&, std::function<void()> &&,
+ std::function<void()> &&, int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+template void HPX::impl_bulk_setup_finalize_erased<unsigned int>(
+ bool, bool, std::function<void(unsigned int)> &&, std::function<void()> &&,
+ std::function<void()> &&, unsigned int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+template void HPX::impl_bulk_setup_finalize_erased<long>(
+ bool, bool, std::function<void(long)> &&, std::function<void()> &&,
+ std::function<void()> &&, long const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+template void HPX::impl_bulk_setup_finalize_erased<std::size_t>(
+ bool, bool, std::function<void(std::size_t)> &&, std::function<void()> &&,
+ std::function<void()> &&, std::size_t const,
+ hpx::threads::thread_stacksize stacksize) const;
+} // namespace Experimental
+
+namespace Impl {
+int g_hpx_space_factory_initialized =
+ initialize_space_factory<Kokkos::Experimental::HPX>("060_HPX");
+
+} // namespace Impl
+
+} // namespace Kokkos
+
+#else
+void KOKKOS_CORE_SRC_IMPL_HPX_PREVENT_LINK_ERROR() {}
+#endif // #ifdef KOKKOS_ENABLE_HPX
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_HPX_HPP
+#define KOKKOS_HPX_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_HPX)
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+#include <impl/Kokkos_FunctorAnalysis.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+#include <hpx/barrier.hpp>
+#include <hpx/condition_variable.hpp>
+#include <hpx/execution.hpp>
+#include <hpx/future.hpp>
+#include <hpx/mutex.hpp>
+#include <hpx/thread.hpp>
+
+#include <Kokkos_UniqueToken.hpp>
+
+#include <cstddef>
+#include <iosfwd>
+#include <functional>
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+namespace Kokkos {
+namespace Impl {
+class hpx_thread_buffer {
+ static constexpr std::size_t m_cache_line_size = 64;
+
+ std::size_t m_num_threads = 0;
+ std::size_t m_size_per_thread = 0;
+ std::size_t m_extra_space = 0;
+ std::size_t m_size_total = 0;
+ std::unique_ptr<char[]> m_data = nullptr;
+
+ static constexpr void pad_to_cache_line(std::size_t &size) {
+ size = ((size + m_cache_line_size - 1) / m_cache_line_size) *
+ m_cache_line_size;
+ }
+
+ public:
+ hpx_thread_buffer() = default;
+ ~hpx_thread_buffer() = default;
+ hpx_thread_buffer(const hpx_thread_buffer &) = delete;
+ hpx_thread_buffer(hpx_thread_buffer &&) = delete;
+ hpx_thread_buffer &operator=(const hpx_thread_buffer &) = delete;
+ hpx_thread_buffer &operator=(hpx_thread_buffer) = delete;
+
+ void resize(const std::size_t num_threads, const std::size_t size_per_thread,
+ const std::size_t extra_space = 0) noexcept;
+ void *get(std::size_t thread_num) const noexcept;
+ void *get_extra_space() const noexcept;
+};
+
+template <typename T>
+struct hpx_range {
+ T begin;
+ T end;
+};
+
+template <typename T>
+constexpr T get_num_chunks(const T offset, const T chunk_size, const T max) {
+ return (max - offset + chunk_size - 1) / chunk_size;
+}
+
+template <typename T>
+constexpr hpx_range<T> get_chunk_range(const T i_chunk, const T offset,
+ const T chunk_size, const T max) {
+ const T begin = offset + i_chunk * chunk_size;
+ const T end = (std::min)(begin + chunk_size, max);
+ return {begin, end};
+}
+
+template <typename Policy>
+constexpr bool is_light_weight_policy() {
+ constexpr Kokkos::Experimental::WorkItemProperty::HintLightWeight_t
+ light_weight = Kokkos::Experimental::WorkItemProperty::HintLightWeight;
+ return (typename Policy::work_item_property() & light_weight) == light_weight;
+}
+} // namespace Impl
+
+namespace Experimental {
+class HPX {
+ public:
+ static constexpr uint32_t impl_default_instance_id() { return 1; }
+
+ private:
+ static bool m_hpx_initialized;
+ static std::atomic<uint32_t> m_next_instance_id;
+
+ public:
+ enum class instance_mode { default_, independent };
+
+ private:
+ static uint32_t m_active_parallel_region_count;
+ static hpx::spinlock m_active_parallel_region_count_mutex;
+ static hpx::condition_variable_any m_active_parallel_region_count_cond;
+
+ struct instance_data {
+ instance_data() = default;
+ ~instance_data() = default;
+ instance_data(uint32_t instance_id) : m_instance_id(instance_id) {}
+ instance_data(uint32_t instance_id,
+ hpx::execution::experimental::unique_any_sender<> &&sender)
+ : m_instance_id(instance_id), m_sender{std::move(sender)} {}
+
+ instance_data(const instance_data &) = delete;
+ instance_data(instance_data &&) = delete;
+ instance_data &operator=(const instance_data &) = delete;
+ instance_data &operator=(instance_data) = delete;
+
+ uint32_t m_instance_id{HPX::impl_default_instance_id()};
+ hpx::execution::experimental::unique_any_sender<> m_sender{
+ hpx::execution::experimental::just()};
+ Kokkos::Impl::hpx_thread_buffer m_buffer;
+ hpx::spinlock m_sender_mutex;
+ };
+
+ static void default_instance_deleter(instance_data *) {}
+ static instance_data m_default_instance_data;
+ Kokkos::Impl::HostSharedPtr<instance_data> m_instance_data;
+
+ public:
+ using execution_space = HPX;
+ using memory_space = HostSpace;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+ using array_layout = LayoutRight;
+ using size_type = memory_space::size_type;
+ using scratch_memory_space = ScratchMemorySpace<HPX>;
+
+ HPX()
+ : m_instance_data(Kokkos::Impl::HostSharedPtr<instance_data>(
+ &m_default_instance_data, &default_instance_deleter)) {}
+ ~HPX() = default;
+ explicit HPX(instance_mode mode)
+ : m_instance_data(
+ mode == instance_mode::independent
+ ? (Kokkos::Impl::HostSharedPtr<instance_data>(
+ new instance_data(m_next_instance_id++)))
+ : Kokkos::Impl::HostSharedPtr<instance_data>(
+ &m_default_instance_data, &default_instance_deleter)) {}
+ explicit HPX(hpx::execution::experimental::unique_any_sender<> &&sender)
+ : m_instance_data(Kokkos::Impl::HostSharedPtr<instance_data>(
+ new instance_data(m_next_instance_id++, std::move(sender)))) {}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ template <typename T = void>
+ KOKKOS_DEPRECATED_WITH_COMMENT(
+ "HPX execution space should be constructed explicitly.")
+ HPX(instance_mode mode)
+ : HPX(mode) {}
+
+ template <typename T = void>
+ KOKKOS_DEPRECATED_WITH_COMMENT(
+ "HPX execution space should be constructed explicitly.")
+ HPX(hpx::execution::experimental::unique_any_sender<> &&sender)
+ : HPX(std::move(sender)) {}
+#endif
+
+ HPX(HPX &&other) = default;
+ HPX(const HPX &other) = default;
+
+ HPX &operator=(HPX &&) = default;
+ HPX &operator=(const HPX &) = default;
+
+ void print_configuration(std::ostream &os, bool /*verbose*/ = false) const;
+ instance_data &impl_get_instance_data() const noexcept {
+ KOKKOS_EXPECTS(m_instance_data.get());
+ return *m_instance_data.get();
+ }
+ uint32_t impl_instance_id() const noexcept {
+ return impl_get_instance_data().m_instance_id;
+ }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ static bool &impl_get_in_parallel() noexcept;
+
+ struct impl_in_parallel_scope {
+ impl_in_parallel_scope() noexcept;
+ ~impl_in_parallel_scope() noexcept;
+ impl_in_parallel_scope(impl_in_parallel_scope &&) = delete;
+ impl_in_parallel_scope(impl_in_parallel_scope const &) = delete;
+ impl_in_parallel_scope &operator=(impl_in_parallel_scope &&) = delete;
+ impl_in_parallel_scope &operator=(impl_in_parallel_scope const &) = delete;
+ };
+
+ struct impl_not_in_parallel_scope {
+ impl_not_in_parallel_scope() noexcept;
+ ~impl_not_in_parallel_scope() noexcept;
+ impl_not_in_parallel_scope(impl_not_in_parallel_scope &&) = delete;
+ impl_not_in_parallel_scope(impl_not_in_parallel_scope const &) = delete;
+ impl_not_in_parallel_scope &operator=(impl_not_in_parallel_scope &&) =
+ delete;
+ impl_not_in_parallel_scope &operator=(impl_not_in_parallel_scope const &) =
+ delete;
+ };
+
+ KOKKOS_DEPRECATED static bool in_parallel(HPX const & = HPX()) noexcept {
+ return impl_get_in_parallel();
+ }
+#endif
+
+ static void impl_decrement_active_parallel_region_count();
+ static void impl_increment_active_parallel_region_count();
+
+ void impl_instance_fence_locked(const std::string &name) const;
+ void impl_instance_fence(const std::string &name) const;
+ static void impl_static_fence(const std::string &name);
+
+ void fence(
+ const std::string &name =
+ "Kokkos::Experimental::HPX::fence: Unnamed Instance Fence") const {
+ impl_instance_fence(name);
+ }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED static bool is_asynchronous(HPX const & = HPX()) noexcept {
+#if defined(KOKKOS_ENABLE_IMPL_HPX_ASYNC_DISPATCH)
+ return true;
+#else
+ return false;
+#endif
+ }
+#endif
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ static int concurrency();
+#else
+ int concurrency() const;
+#endif
+ static void impl_initialize(InitializationSettings const &);
+ static bool impl_is_initialized() noexcept;
+ static void impl_finalize();
+ static int impl_thread_pool_size() noexcept;
+ static int impl_thread_pool_rank() noexcept;
+ static int impl_thread_pool_size(int depth);
+
+ static int impl_max_hardware_threads() noexcept {
+ return hpx::threads::hardware_concurrency();
+ }
+
+ static int impl_hardware_thread_id() noexcept {
+ return hpx::get_worker_thread_num();
+ }
+
+ Kokkos::Impl::hpx_thread_buffer &impl_get_buffer() const noexcept {
+ return impl_get_instance_data().m_buffer;
+ }
+
+ hpx::execution::experimental::unique_any_sender<> &impl_get_sender()
+ const noexcept {
+ return impl_get_instance_data().m_sender;
+ }
+
+ hpx::execution::experimental::any_sender<> get_sender() const noexcept {
+ std::lock_guard l(impl_get_sender_mutex());
+ auto &s = impl_get_sender();
+ auto split_s = hpx::execution::experimental::split(std::move(s));
+ s = split_s;
+ return hpx::execution::experimental::any_sender<>{split_s};
+ }
+
+ hpx::future<void> impl_get_future() const noexcept {
+ return hpx::execution::experimental::make_future(get_sender());
+ }
+
+ hpx::spinlock &impl_get_sender_mutex() const noexcept {
+ return impl_get_instance_data().m_sender_mutex;
+ }
+
+ template <typename I>
+ void impl_bulk_plain_erased(
+ [[maybe_unused]] bool force_synchronous, bool is_light_weight_policy,
+ std::function<void(I)> &&f, I const n,
+ hpx::threads::thread_stacksize stacksize =
+ hpx::threads::thread_stacksize::default_) const {
+ Kokkos::Experimental::HPX::impl_increment_active_parallel_region_count();
+
+ namespace ex = hpx::execution::experimental;
+
+ auto &sen = impl_get_sender();
+ auto &mut = impl_get_sender_mutex();
+
+ std::lock_guard<hpx::spinlock> l(mut);
+ hpx::util::ignore_lock(&mut);
+
+ {
+ if (n == 1 && is_light_weight_policy &&
+ (hpx::threads::get_self_ptr() != nullptr)) {
+ sen = std::move(sen) | ex::then(hpx::bind_front(std::move(f), 0)) |
+ ex::then(Kokkos::Experimental::HPX::
+ impl_decrement_active_parallel_region_count) |
+ ex::ensure_started();
+ } else {
+ sen = std::move(sen) |
+ ex::transfer(
+ ex::with_stacksize(ex::thread_pool_scheduler{}, stacksize)) |
+ ex::bulk(n, std::move(f)) |
+ ex::then(Kokkos::Experimental::HPX::
+ impl_decrement_active_parallel_region_count) |
+ ex::ensure_started();
+ }
+ }
+
+#if defined(KOKKOS_ENABLE_IMPL_HPX_ASYNC_DISPATCH)
+ if (force_synchronous)
+#endif
+ {
+ impl_instance_fence_locked(
+ "Kokkos::Experimental::HPX: fence due to forced synchronizations");
+ }
+ }
+
+ template <typename Functor, typename Index>
+ void impl_bulk_plain(bool force_synchronous, bool is_light_weight_policy,
+ Functor const &functor, Index const n,
+ hpx::threads::thread_stacksize stacksize =
+ hpx::threads::thread_stacksize::default_) const {
+ impl_bulk_plain_erased(force_synchronous, is_light_weight_policy,
+ {[functor](Index i) {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ impl_in_parallel_scope p;
+#endif
+ functor.execute_range(i);
+ }},
+ n, stacksize);
+ }
+
+ template <typename Index>
+ void impl_bulk_setup_finalize_erased(
+ [[maybe_unused]] bool force_synchronous, bool is_light_weight_policy,
+ std::function<void(Index)> &&f, std::function<void()> &&f_setup,
+ std::function<void()> &&f_finalize, Index const n,
+ hpx::threads::thread_stacksize stacksize =
+ hpx::threads::thread_stacksize::default_) const {
+ Kokkos::Experimental::HPX::impl_increment_active_parallel_region_count();
+
+ namespace ex = hpx::execution::experimental;
+ using hpx::threads::thread_stacksize;
+
+ auto &sen = impl_get_sender();
+ auto &mut = impl_get_sender_mutex();
+
+ std::lock_guard<hpx::spinlock> l(mut);
+ hpx::util::ignore_lock(&mut);
+
+ {
+ if (n == 1 && is_light_weight_policy &&
+ (hpx::threads::get_self_ptr() != nullptr)) {
+ sen = std::move(sen) | ex::then(std::move(f_setup)) |
+ ex::then(hpx::bind_front(std::move(f), 0)) |
+ ex::then(std::move(f_finalize)) |
+ ex::then(Kokkos::Experimental::HPX::
+ impl_decrement_active_parallel_region_count) |
+ ex::ensure_started();
+ } else {
+ sen = std::move(sen) |
+ ex::transfer(
+ ex::with_stacksize(ex::thread_pool_scheduler{}, stacksize)) |
+ ex::then(std::move(f_setup)) | ex::bulk(n, std::move(f)) |
+ ex::then(std::move(f_finalize)) |
+ ex::then(Kokkos::Experimental::HPX::
+ impl_decrement_active_parallel_region_count) |
+ ex::ensure_started();
+ }
+ }
+
+#if defined(KOKKOS_ENABLE_IMPL_HPX_ASYNC_DISPATCH)
+ if (force_synchronous)
+#endif
+ {
+ impl_instance_fence_locked(
+ "Kokkos::Experimental::HPX: fence due to forced syncronizations");
+ }
+ }
+
+ template <typename Functor, typename Index>
+ void impl_bulk_setup_finalize(
+ bool force_synchronous, bool is_light_weight_policy,
+ Functor const &functor, Index const n,
+ hpx::threads::thread_stacksize stacksize =
+ hpx::threads::thread_stacksize::default_) const {
+ impl_bulk_setup_finalize_erased(force_synchronous, is_light_weight_policy,
+ {[functor](Index i) {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ impl_in_parallel_scope p;
+#endif
+ functor.execute_range(i);
+ }},
+ {[functor]() {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ impl_in_parallel_scope p;
+#endif
+ functor.setup();
+ }},
+ {[functor]() {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ impl_in_parallel_scope p;
+#endif
+ functor.finalize();
+ }},
+ n, stacksize);
+ }
+
+ static constexpr const char *name() noexcept { return "HPX"; }
+
+ private:
+ friend bool operator==(HPX const &lhs, HPX const &rhs) {
+ return lhs.impl_instance_id() == rhs.impl_instance_id();
+ }
+ friend bool operator!=(HPX const &lhs, HPX const &rhs) {
+ return !(lhs == rhs);
+ }
+};
+
+template <typename... Args>
+std::vector<HPX> partition_space(HPX const &, Args... args) {
+ std::vector<HPX> instances(sizeof...(args));
+ for (auto &in : instances) in = HPX(HPX::instance_mode::independent);
+ return instances;
+}
+
+template <typename T>
+std::vector<HPX> partition_space(HPX const &, std::vector<T> const &weights) {
+ std::vector<HPX> instances(weights.size());
+ for (auto &in : instances) in = HPX(HPX::instance_mode::independent);
+ return instances;
+}
+
+extern template void HPX::impl_bulk_plain_erased<int>(
+ bool, bool, std::function<void(int)> &&, int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+extern template void HPX::impl_bulk_plain_erased<unsigned int>(
+ bool, bool, std::function<void(unsigned int)> &&, unsigned int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+extern template void HPX::impl_bulk_plain_erased<long>(
+ bool, bool, std::function<void(long)> &&, long const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+extern template void HPX::impl_bulk_plain_erased<std::size_t>(
+ bool, bool, std::function<void(std::size_t)> &&, std::size_t const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+extern template void HPX::impl_bulk_setup_finalize_erased<int>(
+ bool, bool, std::function<void(int)> &&, std::function<void()> &&,
+ std::function<void()> &&, int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+extern template void HPX::impl_bulk_setup_finalize_erased<unsigned int>(
+ bool, bool, std::function<void(unsigned int)> &&, std::function<void()> &&,
+ std::function<void()> &&, unsigned int const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+extern template void HPX::impl_bulk_setup_finalize_erased<long>(
+ bool, bool, std::function<void(long)> &&, std::function<void()> &&,
+ std::function<void()> &&, long const,
+ hpx::threads::thread_stacksize stacksize) const;
+
+extern template void HPX::impl_bulk_setup_finalize_erased<std::size_t>(
+ bool, bool, std::function<void(std::size_t)> &&, std::function<void()> &&,
+ std::function<void()> &&, std::size_t const,
+ hpx::threads::thread_stacksize stacksize) const;
+} // namespace Experimental
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<Kokkos::Experimental::HPX> {
+ static constexpr DeviceType id = DeviceType::HPX;
+ static int device_id(const Kokkos::Experimental::HPX &) { return 0; }
+};
+} // namespace Experimental
+} // namespace Tools
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HPX::memory_space,
+ Kokkos::Experimental::HPX::scratch_memory_space> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = false };
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+template <>
+class UniqueToken<HPX, UniqueTokenScope::Instance> {
+ private:
+ using buffer_type = Kokkos::View<uint32_t *, Kokkos::HostSpace>;
+ int m_count;
+ buffer_type m_buffer_view;
+ uint32_t volatile *m_buffer;
+
+ public:
+ using execution_space = HPX;
+ using size_type = int;
+
+ /// \brief create object size for concurrency on the given instance
+ ///
+ /// This object should not be shared between instances
+ UniqueToken(execution_space const & = execution_space()) noexcept
+ : m_count(execution_space::impl_max_hardware_threads()),
+ m_buffer_view(buffer_type()),
+ m_buffer(nullptr) {}
+
+ UniqueToken(size_type max_size, execution_space const & = execution_space())
+ : m_count(max_size > execution_space::impl_max_hardware_threads()
+ ? execution_space::impl_max_hardware_threads()
+ : max_size),
+ m_buffer_view(
+ max_size > execution_space::impl_max_hardware_threads()
+ ? buffer_type()
+ : buffer_type("UniqueToken::m_buffer_view",
+ ::Kokkos::Impl::concurrent_bitset::buffer_bound(
+ m_count))),
+ m_buffer(m_buffer_view.data()) {}
+
+ /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+ KOKKOS_INLINE_FUNCTION
+ int size() const noexcept { return m_count; }
+
+ /// \brief acquire value such that 0 <= value < size()
+ KOKKOS_INLINE_FUNCTION
+ int acquire() const noexcept {
+ KOKKOS_IF_ON_HOST((
+ if (m_buffer == nullptr) {
+ return execution_space::impl_hardware_thread_id();
+ } else {
+ const ::Kokkos::pair<int, int> result =
+ ::Kokkos::Impl::concurrent_bitset::acquire_bounded(
+ m_buffer, m_count, ::Kokkos::Impl::clock_tic() % m_count);
+
+ if (result.first < 0) {
+ ::Kokkos::abort(
+ "UniqueToken<HPX> failure to acquire tokens, no tokens "
+ "available");
+ }
+ return result.first;
+ }))
+
+ KOKKOS_IF_ON_DEVICE((return 0;))
+ }
+
+ /// \brief release a value acquired by generate
+ KOKKOS_INLINE_FUNCTION
+ void release(int i) const noexcept {
+ KOKKOS_IF_ON_HOST((if (m_buffer != nullptr) {
+ ::Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
+ }))
+
+ KOKKOS_IF_ON_DEVICE(((void)i;))
+ }
+};
+
+template <>
+class UniqueToken<HPX, UniqueTokenScope::Global> {
+ public:
+ using execution_space = HPX;
+ using size_type = int;
+ UniqueToken(execution_space const & = execution_space()) noexcept {}
+
+ // NOTE: Currently this assumes that there is no oversubscription.
+ // hpx::get_num_worker_threads can't be used directly because it may yield
+ // it's task (problematic if called after hpx::get_worker_thread_num).
+ int size() const noexcept { return HPX::impl_max_hardware_threads(); }
+ int acquire() const noexcept { return HPX::impl_hardware_thread_id(); }
+ void release(int) const noexcept {}
+};
+} // namespace Experimental
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+struct HPXTeamMember {
+ public:
+ using execution_space = Kokkos::Experimental::HPX;
+ using scratch_memory_space =
+ Kokkos::ScratchMemorySpace<Kokkos::Experimental::HPX>;
+ using team_handle = HPXTeamMember;
+
+ private:
+ scratch_memory_space m_team_shared;
+
+ int m_league_size;
+ int m_league_rank;
+ int m_team_size;
+ int m_team_rank;
+
+ public:
+ KOKKOS_INLINE_FUNCTION
+ const scratch_memory_space &team_shmem() const {
+ return m_team_shared.set_team_thread_mode(0, 1, 0);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const execution_space::scratch_memory_space &team_scratch(const int) const {
+ return m_team_shared.set_team_thread_mode(0, 1, 0);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const execution_space::scratch_memory_space &thread_scratch(const int) const {
+ return m_team_shared.set_team_thread_mode(0, team_size(), team_rank());
+ }
+
+ KOKKOS_INLINE_FUNCTION int league_rank() const noexcept {
+ return m_league_rank;
+ }
+
+ KOKKOS_INLINE_FUNCTION int league_size() const noexcept {
+ return m_league_size;
+ }
+
+ KOKKOS_INLINE_FUNCTION int team_rank() const noexcept { return m_team_rank; }
+ KOKKOS_INLINE_FUNCTION int team_size() const noexcept { return m_team_size; }
+
+ template <class... Properties>
+ constexpr KOKKOS_INLINE_FUNCTION HPXTeamMember(
+ const TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>
+ &policy,
+ const int team_rank, const int league_rank, void *scratch,
+ size_t scratch_size) noexcept
+ : m_team_shared(scratch, scratch_size, scratch, scratch_size),
+ m_league_size(policy.league_size()),
+ m_league_rank(league_rank),
+ m_team_size(policy.team_size()),
+ m_team_rank(team_rank) {}
+
+ KOKKOS_INLINE_FUNCTION
+ void team_barrier() const {}
+
+ template <class ValueType>
+ KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType &, const int &) const {}
+
+ template <class Closure, class ValueType>
+ KOKKOS_INLINE_FUNCTION void team_broadcast(const Closure &closure,
+ ValueType &value,
+ const int &) const {
+ closure(value);
+ }
+
+ template <class ValueType, class JoinOp>
+ KOKKOS_INLINE_FUNCTION ValueType team_reduce(const ValueType &value,
+ const JoinOp &) const {
+ return value;
+ }
+
+ template <class ReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ team_reduce(const ReducerType &) const {}
+
+ template <typename Type>
+ KOKKOS_INLINE_FUNCTION Type
+ team_scan(const Type &value, Type *const global_accum = nullptr) const {
+ if (global_accum) {
+ Kokkos::atomic_fetch_add(global_accum, value);
+ }
+
+ return 0;
+ }
+};
+
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>
+ : public PolicyTraits<Properties...> {
+ public:
+ using traits = PolicyTraits<Properties...>;
+
+ //! Tag this class as a kokkos execution policy
+ using execution_policy = TeamPolicyInternal;
+
+ using member_type = HPXTeamMember;
+
+ //! Execution space of this execution policy:
+ using execution_space = Kokkos::Experimental::HPX;
+
+ private:
+ typename traits::execution_space m_space{};
+ int m_league_size;
+ int m_team_size;
+ std::size_t m_team_scratch_size[2];
+ std::size_t m_thread_scratch_size[2];
+ int m_chunk_size;
+
+ public:
+ // NOTE: Max size is 1 for simplicity. In most cases more than 1 is not
+ // necessary on CPU. Implement later if there is a need.
+ template <class FunctorType>
+ inline static int team_size_max(const FunctorType &) {
+ return 1;
+ }
+
+ template <class FunctorType>
+ inline static int team_size_recommended(const FunctorType &) {
+ return 1;
+ }
+
+ template <class FunctorType>
+ inline static int team_size_recommended(const FunctorType &, const int &) {
+ return 1;
+ }
+
+ template <class FunctorType>
+ int team_size_max(const FunctorType &, const ParallelForTag &) const {
+ return 1;
+ }
+
+ template <class FunctorType>
+ int team_size_max(const FunctorType &, const ParallelReduceTag &) const {
+ return 1;
+ }
+
+ template <class FunctorType, class ReducerType>
+ int team_size_max(const FunctorType &, const ReducerType &,
+ const ParallelReduceTag &) const {
+ return 1;
+ }
+
+ template <class FunctorType>
+ int team_size_recommended(const FunctorType &, const ParallelForTag &) const {
+ return 1;
+ }
+
+ template <class FunctorType>
+ int team_size_recommended(const FunctorType &,
+ const ParallelReduceTag &) const {
+ return 1;
+ }
+
+ template <class FunctorType, class ReducerType>
+ int team_size_recommended(const FunctorType &, const ReducerType &,
+ const ParallelReduceTag &) const {
+ return 1;
+ }
+
+ static int vector_length_max() { return 1; }
+
+ inline int impl_vector_length() noexcept { return 1; }
+ inline bool impl_auto_team_size() noexcept { return false; }
+ inline bool impl_auto_vector_length() noexcept { return false; }
+ inline void impl_set_vector_length(int) noexcept {}
+ inline void impl_set_team_size(int) noexcept {}
+
+ private:
+ inline void init(const int league_size_request, const int team_size_request) {
+ m_league_size = league_size_request;
+ const int max_team_size = 1; // TODO: Can't use team_size_max(...) because
+ // it requires a functor as argument.
+ m_team_size =
+ team_size_request > max_team_size ? max_team_size : team_size_request;
+
+ if (m_chunk_size > 0) {
+ if (!Impl::is_integral_power_of_two(m_chunk_size))
+ Kokkos::abort("TeamPolicy blocking granularity must be power of two");
+ } else {
+ int new_chunk_size = 1;
+ while (new_chunk_size * 4 * m_space.concurrency() < m_league_size) {
+ new_chunk_size *= 2;
+ }
+
+ if (new_chunk_size < 128) {
+ new_chunk_size = 1;
+ while ((new_chunk_size * m_space.concurrency() < m_league_size) &&
+ (new_chunk_size < 128))
+ new_chunk_size *= 2;
+ }
+
+ m_chunk_size = new_chunk_size;
+ }
+ }
+
+ public:
+ inline int team_size() const { return m_team_size; }
+ inline int league_size() const { return m_league_size; }
+
+ size_t scratch_size(const int &level, int team_size_ = -1) const {
+ if (team_size_ < 0) {
+ team_size_ = m_team_size;
+ }
+ return m_team_scratch_size[level] +
+ team_size_ * m_thread_scratch_size[level];
+ }
+
+ inline static int scratch_size_max(int level) {
+ return (level == 0 ? 1024 * 32 : // Roughly L1 size
+ 20 * 1024 * 1024); // Limit to keep compatibility with CUDA
+ }
+
+ public:
+ template <class ExecSpace, class... OtherProperties>
+ friend class TeamPolicyInternal;
+
+ const typename traits::execution_space &space() const { return m_space; }
+
+ template <class... OtherProperties>
+ TeamPolicyInternal(const TeamPolicyInternal<Kokkos::Experimental::HPX,
+ OtherProperties...> &p) {
+ m_space = p.m_space;
+ m_league_size = p.m_league_size;
+ m_team_size = p.m_team_size;
+ m_team_scratch_size[0] = p.m_team_scratch_size[0];
+ m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+ m_team_scratch_size[1] = p.m_team_scratch_size[1];
+ m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+ m_chunk_size = p.m_chunk_size;
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space &space,
+ int league_size_request, int team_size_request,
+ int /* vector_length_request */ = 1)
+ : m_space{space},
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request);
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space &space,
+ int league_size_request, const Kokkos::AUTO_t &,
+ int /* vector_length_request */ = 1)
+ : m_space{space},
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, 1);
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space &space,
+ int league_size_request,
+ const Kokkos::AUTO_t &, /* team_size_request */
+ const Kokkos::AUTO_t & /* vector_length_request */)
+ : m_space{space},
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, 1);
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space &space,
+ int league_size_request, int team_size_request,
+ const Kokkos::AUTO_t & /* vector_length_request */
+ )
+ : m_space{space},
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request);
+ }
+
+ TeamPolicyInternal(int league_size_request,
+ const Kokkos::AUTO_t &, /* team_size_request */
+ const Kokkos::AUTO_t & /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, 1);
+ }
+
+ TeamPolicyInternal(int league_size_request, int team_size_request,
+ const Kokkos::AUTO_t & /* vector_length_request */
+ )
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request);
+ }
+
+ TeamPolicyInternal(int league_size_request, int team_size_request,
+ int /* vector_length_request */ = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request);
+ }
+
+ TeamPolicyInternal(int league_size_request, const Kokkos::AUTO_t &,
+ int /* vector_length_request */ = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(0) {
+ init(league_size_request, 1);
+ }
+
+ inline int chunk_size() const { return m_chunk_size; }
+
+ inline TeamPolicyInternal &set_chunk_size(
+ typename traits::index_type chunk_size_) {
+ m_chunk_size = chunk_size_;
+ return *this;
+ }
+
+ inline TeamPolicyInternal &set_scratch_size(const int &level,
+ const PerTeamValue &per_team) {
+ m_team_scratch_size[level] = per_team.value;
+ return *this;
+ }
+
+ inline TeamPolicyInternal &set_scratch_size(
+ const int &level, const PerThreadValue &per_thread) {
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ inline TeamPolicyInternal &set_scratch_size(
+ const int &level, const PerTeamValue &per_team,
+ const PerThreadValue &per_thread) {
+ m_team_scratch_size[level] = per_team.value;
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ public:
+ void execute_range(const Member i_chunk) const {
+ const auto r = get_chunk_range(i_chunk, m_policy.begin(),
+ m_policy.chunk_size(), m_policy.end());
+ for (Member i = r.begin; i < r.end; ++i) {
+ if constexpr (std::is_same_v<WorkTag, void>) {
+ m_functor(i);
+ } else {
+ m_functor(WorkTag{}, i);
+ }
+ }
+ }
+
+ void execute() const {
+ const Member num_chunks =
+ get_num_chunks(m_policy.begin(), m_policy.chunk_size(), m_policy.end());
+ m_policy.space().impl_bulk_plain(false, is_light_weight_policy<Policy>(),
+ *this, num_chunks,
+ hpx::threads::thread_stacksize::nostack);
+ }
+
+ inline ParallelFor(const FunctorType &arg_functor, Policy arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = typename MDRangePolicy::impl_range_policy;
+ using WorkTag = typename MDRangePolicy::work_tag;
+ using Member = typename Policy::member_type;
+ using iterate_type =
+ typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
+ WorkTag, void>;
+
+ const iterate_type m_iter;
+ const Policy m_policy;
+
+ public:
+ void execute_range(const Member i_chunk) const {
+ const auto r = get_chunk_range(i_chunk, m_policy.begin(),
+ m_policy.chunk_size(), m_policy.end());
+ for (Member i = r.begin; i < r.end; ++i) {
+ m_iter(i);
+ }
+ }
+
+ void execute() const {
+ const Member num_chunks =
+ get_num_chunks(m_policy.begin(), m_policy.chunk_size(), m_policy.end());
+ m_iter.m_rp.space().impl_bulk_plain(
+ false, is_light_weight_policy<MDRangePolicy>(), *this, num_chunks,
+ hpx::threads::thread_stacksize::nostack);
+ }
+
+ inline ParallelFor(const FunctorType &arg_functor, MDRangePolicy arg_policy)
+ : m_iter(arg_policy, arg_functor),
+ m_policy(Policy(0, arg_policy.m_num_tiles).set_chunk_size(1)) {}
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy &, const Functor &) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+
+ using value_type = typename ReducerType::value_type;
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const bool m_force_synchronous;
+
+ public:
+ void setup() const {
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+ const std::size_t value_size = reducer.value_size();
+ const int num_worker_threads = m_policy.space().concurrency();
+
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ buffer.resize(num_worker_threads, value_size);
+
+ for (int t = 0; t < num_worker_threads; ++t) {
+ reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+ }
+ }
+
+ void execute_range(const Member i_chunk) const {
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ reference_type update =
+ ReducerType::reference(reinterpret_cast<pointer_type>(
+ buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id())));
+ const auto r = get_chunk_range(i_chunk, m_policy.begin(),
+ m_policy.chunk_size(), m_policy.end());
+ for (Member i = r.begin; i < r.end; ++i) {
+ if constexpr (std::is_same_v<WorkTag, void>) {
+ m_functor_reducer.get_functor()(i, update);
+ } else {
+ m_functor_reducer.get_functor()(WorkTag{}, i, update);
+ }
+ }
+ }
+
+ void finalize() const {
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+ const int num_worker_threads = m_policy.space().concurrency();
+ for (int i = 1; i < num_worker_threads; ++i) {
+ reducer.join(reinterpret_cast<pointer_type>(buffer.get(0)),
+ reinterpret_cast<pointer_type>(buffer.get(i)));
+ }
+
+ pointer_type final_value_ptr =
+ reinterpret_cast<pointer_type>(buffer.get(0));
+
+ reducer.final(final_value_ptr);
+
+ if (m_result_ptr != nullptr) {
+ const int n = reducer.value_count();
+
+ for (int j = 0; j < n; ++j) {
+ m_result_ptr[j] = final_value_ptr[j];
+ }
+ }
+ }
+
+ void execute() const {
+ if (m_policy.end() <= m_policy.begin()) {
+ if (m_result_ptr) {
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+ reducer.init(m_result_ptr);
+ reducer.final(m_result_ptr);
+ }
+ return;
+ }
+
+ const Member num_chunks =
+ get_num_chunks(m_policy.begin(), m_policy.chunk_size(), m_policy.end());
+ m_policy.space().impl_bulk_setup_finalize(
+ m_force_synchronous, is_light_weight_policy<Policy>(), *this,
+ num_chunks, hpx::threads::thread_stacksize::nostack);
+ }
+
+ template <class ViewType>
+ inline ParallelReduce(const CombinedFunctorReducerType &arg_functor_reducer,
+ Policy arg_policy, const ViewType &arg_view)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_view.data()),
+ m_force_synchronous(!arg_view.impl_track().has_record()) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "HPX reduce result must be a View accessible from HostSpace");
+ }
+};
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using Policy = typename MDRangePolicy::impl_range_policy;
+ using WorkTag = typename MDRangePolicy::work_tag;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
+ using iterate_type = typename Kokkos::Impl::HostIterateTile<
+ MDRangePolicy, CombinedFunctorReducerType, WorkTag, reference_type>;
+
+ const iterate_type m_iter;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const bool m_force_synchronous;
+
+ public:
+ void setup() const {
+ const ReducerType &reducer = m_iter.m_func.get_reducer();
+ const std::size_t value_size = reducer.value_size();
+ const int num_worker_threads = m_policy.space().concurrency();
+
+ hpx_thread_buffer &buffer = m_iter.m_rp.space().impl_get_buffer();
+ buffer.resize(num_worker_threads, value_size);
+
+ for (int t = 0; t < num_worker_threads; ++t) {
+ reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+ }
+ }
+
+ void execute_range(const Member i_chunk) const {
+ hpx_thread_buffer &buffer = m_iter.m_rp.space().impl_get_buffer();
+ reference_type update =
+ ReducerType::reference(reinterpret_cast<pointer_type>(
+ buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id())));
+ const auto r = get_chunk_range(i_chunk, m_policy.begin(),
+ m_policy.chunk_size(), m_policy.end());
+ for (Member i = r.begin; i < r.end; ++i) {
+ m_iter(i, update);
+ }
+ }
+
+ void finalize() const {
+ hpx_thread_buffer &buffer = m_iter.m_rp.space().impl_get_buffer();
+ ReducerType reducer = m_iter.m_func.get_reducer();
+ const int num_worker_threads = m_policy.space().concurrency();
+ for (int i = 1; i < num_worker_threads; ++i) {
+ reducer.join(reinterpret_cast<pointer_type>(buffer.get(0)),
+ reinterpret_cast<pointer_type>(buffer.get(i)));
+ }
+
+ pointer_type final_value_ptr =
+ reinterpret_cast<pointer_type>(buffer.get(0));
+
+ reducer.final(final_value_ptr);
+
+ if (m_result_ptr != nullptr) {
+ const int n = reducer.value_count();
+
+ for (int j = 0; j < n; ++j) {
+ m_result_ptr[j] = final_value_ptr[j];
+ }
+ }
+ }
+
+ void execute() const {
+ const Member num_chunks =
+ get_num_chunks(m_policy.begin(), m_policy.chunk_size(), m_policy.end());
+ m_iter.m_rp.space().impl_bulk_setup_finalize(
+ m_force_synchronous, is_light_weight_policy<MDRangePolicy>(), *this,
+ num_chunks, hpx::threads::thread_stacksize::nostack);
+ }
+
+ template <class ViewType>
+ inline ParallelReduce(const CombinedFunctorReducerType &arg_functor_reducer,
+ MDRangePolicy arg_policy, const ViewType &arg_view)
+ : m_iter(arg_policy, arg_functor_reducer),
+ m_policy(Policy(0, arg_policy.m_num_tiles).set_chunk_size(1)),
+ m_result_ptr(arg_view.data()),
+ m_force_synchronous(!arg_view.impl_track().has_record()) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "HPX reduce result must be a View accessible from HostSpace");
+ }
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy &, const Functor &) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkTag = typename Policy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+ using Analysis =
+ FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType, void>;
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+ using value_type = typename Analysis::value_type;
+ using barrier_type = hpx::barrier<>;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ public:
+ void setup() const {
+ const int num_worker_threads = m_policy.space().concurrency();
+ const std::size_t value_size = Analysis::value_size(m_functor);
+
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ buffer.resize(num_worker_threads, 2 * value_size, sizeof(barrier_type));
+
+ new (buffer.get_extra_space()) barrier_type(num_worker_threads);
+ }
+
+ void execute_chunk(const Member i_begin, const Member i_end,
+ reference_type update, const bool final) const {
+ for (Member i = i_begin; i < i_end; ++i) {
+ if constexpr (std::is_same_v<WorkTag, void>) {
+ m_functor(i, update, final);
+ } else {
+ m_functor(WorkTag{}, i, update, final);
+ }
+ }
+ }
+
+ void execute_range(int t) const {
+ const int num_worker_threads = m_policy.space().concurrency();
+ const int value_count = Analysis::value_count(m_functor);
+ const std::size_t value_size = Analysis::value_size(m_functor);
+
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ typename Analysis::Reducer final_reducer(m_functor);
+ barrier_type &barrier =
+ *static_cast<barrier_type *>(buffer.get_extra_space());
+ reference_type update_sum =
+ final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+
+ const WorkRange range(m_policy, t, num_worker_threads);
+ execute_chunk(range.begin(), range.end(), update_sum, false);
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ {
+ // Since arrive_and_wait may yield and resume on another worker thread we
+ // set in_parallel = false on the current thread before suspending and set
+ // it again to true when we resume.
+ Kokkos::Experimental::HPX::impl_not_in_parallel_scope p;
+ barrier.arrive_and_wait();
+ }
+#else
+ barrier.arrive_and_wait();
+#endif
+
+ if (t == 0) {
+ final_reducer.init(reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(0)) + value_size));
+
+ for (int i = 1; i < num_worker_threads; ++i) {
+ pointer_type ptr_1_prev =
+ reinterpret_cast<pointer_type>(buffer.get(i - 1));
+ pointer_type ptr_2_prev = reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(i - 1)) + value_size);
+ pointer_type ptr_2 = reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(i)) + value_size);
+
+ for (int j = 0; j < value_count; ++j) {
+ ptr_2[j] = ptr_2_prev[j];
+ }
+
+ final_reducer.join(ptr_2, ptr_1_prev);
+ }
+ }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ {
+ // Since arrive_and_wait may yield and resume on another worker thread we
+ // set in_parallel = false on the current thread before suspending and set
+ // it again to true when we resume.
+ Kokkos::Experimental::HPX::impl_not_in_parallel_scope p;
+ barrier.arrive_and_wait();
+ }
+#else
+ barrier.arrive_and_wait();
+#endif
+
+ reference_type update_base =
+ Analysis::Reducer::reference(reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(t)) + value_size));
+
+ execute_chunk(range.begin(), range.end(), update_base, true);
+ }
+
+ void finalize() const {
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ static_cast<barrier_type *>(buffer.get_extra_space())->~barrier_type();
+ }
+
+ void execute() const {
+ const int num_worker_threads = m_policy.space().concurrency();
+ m_policy.space().impl_bulk_setup_finalize(
+ false, is_light_weight_policy<Policy>(), *this, num_worker_threads,
+ hpx::threads::thread_stacksize::small_);
+ }
+
+ inline ParallelScan(const FunctorType &arg_functor, const Policy &arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+ ReturnType, Kokkos::Experimental::HPX> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkTag = typename Policy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+ using Analysis = FunctorAnalysis<FunctorPatternInterface::SCAN, Policy,
+ FunctorType, ReturnType>;
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+ using value_type = typename Analysis::value_type;
+ using barrier_type = hpx::barrier<>;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+ pointer_type m_result_ptr;
+
+ public:
+ void setup() const {
+ const int num_worker_threads = m_policy.space().concurrency();
+ const std::size_t value_size = Analysis::value_size(m_functor);
+
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ buffer.resize(num_worker_threads, 2 * value_size, sizeof(barrier_type));
+
+ new (buffer.get_extra_space()) barrier_type(num_worker_threads);
+ }
+
+ void execute_chunk(const Member i_begin, const Member i_end,
+ reference_type update, const bool final) const {
+ for (Member i = i_begin; i < i_end; ++i) {
+ if constexpr (std::is_same_v<WorkTag, void>) {
+ m_functor(i, update, final);
+ } else {
+ m_functor(WorkTag{}, i, update, final);
+ }
+ }
+ }
+
+ void execute_range(int t) const {
+ const int num_worker_threads = m_policy.space().concurrency();
+ const int value_count = Analysis::value_count(m_functor);
+ const std::size_t value_size = Analysis::value_size(m_functor);
+
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ typename Analysis::Reducer final_reducer(m_functor);
+ barrier_type &barrier =
+ *static_cast<barrier_type *>(buffer.get_extra_space());
+ reference_type update_sum =
+ final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+
+ const WorkRange range(m_policy, t, num_worker_threads);
+ execute_chunk(range.begin(), range.end(), update_sum, false);
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ {
+ // Since arrive_and_wait may yield and resume on another worker thread we
+ // set in_parallel = false on the current thread before suspending and set
+ // it again to true when we resume.
+ Kokkos::Experimental::HPX::impl_not_in_parallel_scope p;
+ barrier.arrive_and_wait();
+ }
+#else
+ barrier.arrive_and_wait();
+#endif
+
+ if (t == 0) {
+ final_reducer.init(reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(0)) + value_size));
+
+ for (int i = 1; i < num_worker_threads; ++i) {
+ pointer_type ptr_1_prev =
+ reinterpret_cast<pointer_type>(buffer.get(i - 1));
+ pointer_type ptr_2_prev = reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(i - 1)) + value_size);
+ pointer_type ptr_2 = reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(i)) + value_size);
+
+ for (int j = 0; j < value_count; ++j) {
+ ptr_2[j] = ptr_2_prev[j];
+ }
+
+ final_reducer.join(ptr_2, ptr_1_prev);
+ }
+ }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ {
+ // Since arrive_and_wait may yield and resume on another worker thread we
+ // set in_parallel = false on the current thread before suspending and set
+ // it again to true when we resume.
+ Kokkos::Experimental::HPX::impl_not_in_parallel_scope p;
+ barrier.arrive_and_wait();
+ }
+#else
+ barrier.arrive_and_wait();
+#endif
+
+ reference_type update_base =
+ Analysis::Reducer::reference(reinterpret_cast<pointer_type>(
+ static_cast<char *>(buffer.get(t)) + value_size));
+
+ execute_chunk(range.begin(), range.end(), update_base, true);
+
+ if (t == num_worker_threads - 1) {
+ *m_result_ptr = update_base;
+ }
+ }
+
+ void finalize() const {
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ static_cast<barrier_type *>(buffer.get_extra_space())->~barrier_type();
+ }
+
+ void execute() const {
+ const int num_worker_threads = m_policy.space().concurrency();
+ m_policy.space().impl_bulk_setup_finalize(
+ false, is_light_weight_policy<Policy>(), *this, num_worker_threads,
+ hpx::threads::thread_stacksize::small_);
+ }
+
+ template <class ViewType>
+ ParallelScanWithTotal(const FunctorType &arg_functor,
+ const Policy &arg_policy,
+ const ViewType &arg_result_view)
+ : m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_view.data()) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::HPX parallel_scan result must be host-accessible!");
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using Policy = TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+ using memory_space = Kokkos::HostSpace;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+ const int m_league;
+ const std::size_t m_shared;
+
+ public:
+ void setup() const {
+ const int num_worker_threads = m_policy.space().concurrency();
+
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ buffer.resize(num_worker_threads, m_shared);
+ }
+
+ void execute_range(const int i) const {
+ const int t = Kokkos::Experimental::HPX::impl_hardware_thread_id();
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ const auto r =
+ get_chunk_range(i, 0, m_policy.chunk_size(), m_policy.league_size());
+ for (int league_rank = r.begin; league_rank < r.end; ++league_rank) {
+ if constexpr (std::is_same_v<WorkTag, void>) {
+ m_functor(Member(m_policy, 0, league_rank, buffer.get(t), m_shared));
+ } else {
+ m_functor(WorkTag{},
+ Member(m_policy, 0, league_rank, buffer.get(t), m_shared));
+ }
+ }
+ }
+
+ void finalize() const {}
+
+ void execute() const {
+ const int num_chunks =
+ get_num_chunks(0, m_policy.chunk_size(), m_policy.league_size());
+ m_policy.space().impl_bulk_setup_finalize(
+ false, is_light_weight_policy<Policy>(), *this, num_chunks,
+ hpx::threads::thread_stacksize::nostack);
+ }
+
+ ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+ : m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_league(arg_policy.league_size()),
+ m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor, arg_policy.team_size())) {}
+};
+
+template <class CombinedFunctorReducerType, class... Properties>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using Policy = TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using Member = typename Policy::member_type;
+ using WorkTag = typename Policy::work_tag;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+ using value_type = typename ReducerType::value_type;
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const int m_league;
+ const Policy m_policy;
+ pointer_type m_result_ptr;
+ const std::size_t m_shared;
+ const bool m_force_synchronous;
+
+ public:
+ void setup() const {
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+ const std::size_t value_size = reducer.value_size();
+ const int num_worker_threads = m_policy.space().concurrency();
+
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ buffer.resize(num_worker_threads, value_size + m_shared);
+
+ for (int t = 0; t < num_worker_threads; ++t) {
+ reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+ }
+ }
+
+ void execute_range(const int i) const {
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+ const std::size_t value_size = reducer.value_size();
+ std::size_t t = Kokkos::Experimental::HPX::impl_hardware_thread_id();
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ reference_type update =
+ ReducerType::reference(reinterpret_cast<pointer_type>(buffer.get(t)));
+ const auto r =
+ get_chunk_range(i, 0, m_policy.chunk_size(), m_policy.league_size());
+ char *local_buffer = static_cast<char *>(buffer.get(t)) + value_size;
+ for (int league_rank = r.begin; league_rank < r.end; ++league_rank) {
+ if constexpr (std::is_same_v<WorkTag, void>) {
+ m_functor_reducer.get_functor()(
+ Member(m_policy, 0, league_rank, local_buffer, m_shared), update);
+ } else {
+ m_functor_reducer.get_functor()(
+ WorkTag{}, Member(m_policy, 0, league_rank, local_buffer, m_shared),
+ update);
+ }
+ }
+ }
+
+ void finalize() const {
+ hpx_thread_buffer &buffer = m_policy.space().impl_get_buffer();
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+ const int num_worker_threads = m_policy.space().concurrency();
+ const pointer_type ptr = reinterpret_cast<pointer_type>(buffer.get(0));
+ for (int t = 1; t < num_worker_threads; ++t) {
+ reducer.join(ptr, reinterpret_cast<pointer_type>(buffer.get(t)));
+ }
+
+ reducer.final(ptr);
+
+ if (m_result_ptr) {
+ const int n = reducer.value_count();
+
+ for (int j = 0; j < n; ++j) {
+ m_result_ptr[j] = ptr[j];
+ }
+ }
+ }
+
+ void execute() const {
+ if (m_policy.league_size() * m_policy.team_size() == 0) {
+ if (m_result_ptr) {
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+ reducer.init(m_result_ptr);
+ reducer.final(m_result_ptr);
+ }
+ return;
+ }
+
+ const int num_chunks =
+ get_num_chunks(0, m_policy.chunk_size(), m_policy.league_size());
+ m_policy.space().impl_bulk_setup_finalize(
+ m_force_synchronous, is_light_weight_policy<Policy>(), *this,
+ num_chunks, hpx::threads::thread_stacksize::nostack);
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType &arg_functor_reducer,
+ const Policy &arg_policy, const ViewType &arg_result)
+ : m_functor_reducer(arg_functor_reducer),
+ m_league(arg_policy.league_size()),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result.data()),
+ m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ m_functor_reducer.get_functor(), arg_policy.team_size())),
+ m_force_synchronous(!arg_result.impl_track().has_record()) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "HPX reduce result must be a View accessible from HostSpace");
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+ Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ TeamThreadRange(const Impl::HPXTeamMember &thread, const iType &count) {
+ return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+ thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+ std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
+TeamThreadRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
+ const iType2 &i_end) {
+ using iType = std::common_type_t<iType1, iType2>;
+ return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+ thread, iType(i_begin), iType(i_end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+ Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ TeamVectorRange(const Impl::HPXTeamMember &thread, const iType &count) {
+ return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+ thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+ std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
+TeamVectorRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
+ const iType2 &i_end) {
+ using iType = std::common_type_t<iType1, iType2>;
+ return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+ thread, iType(i_begin), iType(i_end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+ Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ ThreadVectorRange(const Impl::HPXTeamMember &thread, const iType &count) {
+ return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+ thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+ std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
+ThreadVectorRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
+ const iType2 &i_end) {
+ using iType = std::common_type_t<iType1, iType2>;
+ return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+ thread, iType(i_begin), iType(i_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::HPXTeamMember> PerTeam(
+ const Impl::HPXTeamMember &thread) {
+ return Impl::ThreadSingleStruct<Impl::HPXTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::HPXTeamMember> PerThread(
+ const Impl::HPXTeamMember &thread) {
+ return Impl::VectorSingleStruct<Impl::HPXTeamMember>(thread);
+}
+
+/** \brief Inter-thread parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const Lambda &lambda) {
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment)
+ lambda(i);
+}
+
+/** \brief Inter-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType,
+ typename = std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const Lambda &lambda, ValueType &result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HPXTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type value;
+ wrapped_reducer.init(&value);
+
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, value);
+ }
+
+ wrapped_reducer.final(&value);
+ result = value;
+}
+
+/** \brief Intra-thread vector parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const Lambda &lambda) {
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i);
+ }
+}
+
+/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType,
+ typename = std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const Lambda &lambda, ValueType &result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HPXTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type value;
+ wrapped_reducer.init(&value);
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, value);
+ }
+ wrapped_reducer.final(&value);
+ result = value;
+}
+
+template <typename iType, class Lambda, typename ReducerType,
+ typename = std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const Lambda &lambda, const ReducerType &reducer) {
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HPXTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
+
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, value);
+ }
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
+}
+
+template <typename iType, class Lambda, typename ReducerType,
+ typename = std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const Lambda &lambda, const ReducerType &reducer) {
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::HPXTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, value);
+ }
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
+}
+
+template <typename iType, class FunctorType, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember> const
+ &loop_boundaries,
+ const FunctorType &lambda, ValueType &return_val) {
+ using functor_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+ static_assert(std::is_same_v<functor_value_type, ValueType>,
+ "Non-matching value types of functor and return type");
+
+ ValueType scan_val{};
+
+ // Intra-member scan
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, scan_val, false);
+ }
+
+ // 'scan_val' output is the exclusive prefix sum
+ scan_val = loop_boundaries.thread.team_scan(scan_val);
+
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, scan_val, true);
+ }
+
+ return_val = scan_val;
+}
+
+template <typename iType, typename FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_bounds,
+ const FunctorType &lambda) {
+ // Extract value_type from lambda
+ using value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+
+ value_type scan_val;
+ parallel_scan(loop_bounds, lambda, scan_val);
+}
+
+/** \brief Intra-thread vector parallel exclusive prefix sum. Executes
+ * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes in the thread and a scan
+ * operation is performed. Depending on the target execution space the operator
+ * might be called twice: once with final=false and once with final=true. When
+ * final==true val contains the prefix sum value. The contribution of this "i"
+ * needs to be added to val no matter whether final==true or not. In a serial
+ * execution (i.e. team_size==1) the operator is only called once with
+ * final==true. Scan_val will be set to the final sum value over all vector
+ */
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const FunctorType &lambda) {
+ using value_type =
+ typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ TeamPolicy<Experimental::HPX>, FunctorType,
+ void>::value_type;
+
+ value_type scan_val = value_type();
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, scan_val, true);
+ }
+}
+
+/** \brief Intra-thread vector parallel scan with reducer
+ *
+ */
+template <typename iType, class FunctorType, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const FunctorType &lambda, const ReducerType &reducer) {
+ typename ReducerType::value_type scan_val;
+ reducer.init(scan_val);
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+ i += loop_boundaries.increment) {
+ lambda(i, scan_val, true);
+ }
+ reducer.reference() = scan_val;
+}
+
+template <typename iType, class FunctorType, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+ &loop_boundaries,
+ const FunctorType &lambda, ValueType &return_val) {
+ // Extract ValueType from FunctorType
+ using closure_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+ static_assert(std::is_same<closure_value_type, ValueType>::value,
+ "Non-matching value types of closure and return type");
+
+ ValueType accum;
+ parallel_scan(loop_boundaries, lambda, Kokkos::Sum<ValueType>(accum));
+
+ return_val = accum;
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::VectorSingleStruct<Impl::HPXTeamMember> &,
+ const FunctorType &lambda) {
+ lambda();
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::ThreadSingleStruct<Impl::HPXTeamMember> &,
+ const FunctorType &lambda) {
+ lambda();
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::VectorSingleStruct<Impl::HPXTeamMember> &,
+ const FunctorType &lambda, ValueType &val) {
+ lambda(val);
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::ThreadSingleStruct<Impl::HPXTeamMember> &,
+ const FunctorType &lambda, ValueType &val) {
+ lambda(val);
+}
+
+} // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#include <HPX/Kokkos_HPX_Task.hpp>
+#endif
+
+#endif /* #if defined( KOKKOS_ENABLE_HPX ) */
+#endif /* #ifndef KOKKOS_HPX_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HPX_MDRANGEPOLICY_HPP_
+#define KOKKOS_HPX_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// Settings for TeamMDRangePolicy
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, Kokkos::Experimental::HPX,
+ ThreadAndVector>
+ : HostBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<Kokkos::Experimental::HPX,
+ Kokkos::Experimental::HPX::memory_space>;
+
+} // namespace Impl
+} // namespace Kokkos
+
+#else
+void KOKKOS_CORE_SRC_IMPL_HPX_TASK_PREVENT_LINK_ERROR() {}
+#endif // #if defined( KOKKOS_ENABLE_HPX ) && defined( KOKKOS_ENABLE_TASKDAG )
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HPX_TASK_HPP
+#define KOKKOS_HPX_TASK_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Atomic.hpp>
+#include <Kokkos_TaskScheduler_fwd.hpp>
+
+#include <HPX/Kokkos_HPX.hpp>
+
+#include <impl/Kokkos_TaskTeamMember.hpp>
+
+#include <hpx/execution.hpp>
+#include <hpx/future.hpp>
+
+#include <type_traits>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+template <class QueueType>
+class TaskQueueSpecialization<
+ SimpleTaskScheduler<Kokkos::Experimental::HPX, QueueType>> {
+ public:
+ void setup() const {
+ const int num_worker_threads = Kokkos::Experimental::HPX().concurrency();
+
+ hpx_thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
+ buffer.resize(num_worker_threads, 512);
+ }
+
+ void execute_range(int t) const {
+ // NOTE: This implementation has been simplified based on the
+ // assumption that team_size = 1. The HPX backend currently only
+ // supports a team size of 1.
+ const int num_worker_threads = Kokkos::Experimental::HPX().concurrency();
+
+ hpx_thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
+
+ buffer.get(t);
+ HPXTeamMember member(
+ TeamPolicyInternal<Kokkos::Experimental::HPX>(
+ Kokkos::Experimental::HPX(), num_worker_threads, 1),
+ 0, t, buffer.get(t), 512);
+
+ member_type single_exec(*scheduler, member);
+ member_type &team_exec = single_exec;
+
+ auto &queue = scheduler->queue();
+ auto &team_scheduler = team_exec.scheduler();
+
+ using task_base_type = typename scheduler_type::task_base_type;
+ auto current_task = OptionalRef<task_base_type>(nullptr);
+
+ while (!queue.is_done()) {
+ current_task = queue.pop_ready_task(team_scheduler.team_scheduler_info());
+
+ if (current_task) {
+ KOKKOS_EXPECTS(current_task->is_single_runnable() ||
+ current_task->is_team_runnable());
+ current_task->as_runnable_task().run(single_exec);
+ queue.complete((*std::move(current_task)).as_runnable_task(),
+ team_scheduler.team_scheduler_info());
+ }
+ }
+ }
+
+ void finalize() const {}
+
+ using execution_space = Kokkos::Experimental::HPX;
+ using scheduler_type =
+ SimpleTaskScheduler<Kokkos::Experimental::HPX, QueueType>;
+ using member_type =
+ TaskTeamMemberAdapter<Kokkos::Impl::HPXTeamMember, scheduler_type>;
+ using memory_space = Kokkos::HostSpace;
+
+ static void execute(scheduler_type const &scheduler) {
+ // NOTE: We create an instance so that we can use impl_bulk_setup_finalize.
+ // This is not necessarily the most efficient, but can be improved later.
+ TaskQueueSpecialization<scheduler_type> task_queue;
+ task_queue.scheduler = &scheduler;
+ const int num_worker_threads = Kokkos::Experimental::HPX().concurrency();
+ Kokkos::Experimental::HPX().impl_bulk_setup_finalize(
+ true, false, task_queue, num_worker_threads,
+ hpx::threads::thread_stacksize::nostack);
+ }
+
+ static uint32_t get_max_team_count(execution_space const &espace) {
+ return static_cast<uint32_t>(espace.concurrency());
+ }
+
+ template <typename TaskType>
+ static void get_function_pointer(typename TaskType::function_type &ptr,
+ typename TaskType::destroy_type &dtor) {
+ ptr = TaskType::apply;
+ dtor = TaskType::destroy;
+ }
+
+ private:
+ const scheduler_type *scheduler;
+};
+
+template <class Scheduler>
+class TaskQueueSpecializationConstrained<
+ Scheduler,
+ std::enable_if_t<std::is_same<typename Scheduler::execution_space,
+ Kokkos::Experimental::HPX>::value>> {
+ public:
+ void setup() const {
+ const int num_worker_threads = Kokkos::Experimental::HPX().concurrency();
+
+ hpx_thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
+ buffer.resize(num_worker_threads, 512);
+
+ auto &queue = scheduler->queue();
+ queue.initialize_team_queues(num_worker_threads);
+ }
+
+ void execute_range(int t) const {
+ // NOTE: This implementation has been simplified based on the
+ // assumption that team_size = 1. The HPX backend currently only
+ // supports a team size of 1.
+ const int num_worker_threads = Kokkos::Experimental::HPX().concurrency();
+
+ hpx_thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
+
+ buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id());
+ HPXTeamMember member(
+ TeamPolicyInternal<Kokkos::Experimental::HPX>(
+ Kokkos::Experimental::HPX(), num_worker_threads, 1),
+ 0, t, buffer.get(t), 512);
+
+ using task_base_type = typename scheduler_type::task_base;
+ using queue_type = typename scheduler_type::queue_type;
+
+ static task_base_type *const end = (task_base_type *)task_base_type::EndTag;
+ constexpr task_base_type *no_more_tasks_sentinel = nullptr;
+
+ member_type single_exec(*scheduler, member);
+ member_type &team_exec = single_exec;
+
+ auto &team_queue = team_exec.scheduler().queue();
+ task_base_type *task = no_more_tasks_sentinel;
+
+ do {
+ if (task != no_more_tasks_sentinel && task != end) {
+ team_queue.complete(task);
+ }
+
+ if (desul::atomic_load(&team_queue.m_ready_count,
+ desul::MemoryOrderAcquire(),
+ desul::MemoryScopeDevice()) > 0) {
+ task = end;
+ for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+ for (int j = 0; j < 2 && end == task; ++j) {
+ task = queue_type::pop_ready_task(&team_queue.m_ready[i][j]);
+ }
+ }
+ } else {
+ task = team_queue.attempt_to_steal_task();
+ }
+
+ if (task != no_more_tasks_sentinel && task != end) {
+ (*task->m_apply)(task, &single_exec);
+ }
+ } while (task != no_more_tasks_sentinel);
+ }
+
+ void finalize() const {}
+
+ using execution_space = Kokkos::Experimental::HPX;
+ using scheduler_type = Scheduler;
+ using member_type =
+ TaskTeamMemberAdapter<Kokkos::Impl::HPXTeamMember, scheduler_type>;
+ using memory_space = Kokkos::HostSpace;
+
+ static void iff_single_thread_recursive_execute(
+ scheduler_type const &scheduler) {
+ using task_base_type = typename scheduler_type::task_base;
+ using queue_type = typename scheduler_type::queue_type;
+
+ if (1 == Kokkos::Experimental::HPX().concurrency()) {
+ task_base_type *const end = (task_base_type *)task_base_type::EndTag;
+ task_base_type *task = end;
+
+ HPXTeamMember member(TeamPolicyInternal<Kokkos::Experimental::HPX>(
+ Kokkos::Experimental::HPX(), 1, 1),
+ 0, 0, nullptr, 0);
+ member_type single_exec(scheduler, member);
+
+ do {
+ task = end;
+
+ // Loop by priority and then type
+ for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+ for (int j = 0; j < 2 && end == task; ++j) {
+ task =
+ queue_type::pop_ready_task(&scheduler.m_queue->m_ready[i][j]);
+ }
+ }
+
+ if (end == task) break;
+
+ (*task->m_apply)(task, &single_exec);
+
+ scheduler.m_queue->complete(task);
+
+ } while (true);
+ }
+ }
+
+ static void execute(scheduler_type const &scheduler) {
+ // NOTE: We create an instance so that we can use impl_bulk_setup_finalize.
+ // This is not necessarily the most efficient, but can be improved later.
+ TaskQueueSpecializationConstrained<scheduler_type> task_queue;
+ task_queue.scheduler = &scheduler;
+ const int num_worker_threads = Kokkos::Experimental::HPX().concurrency();
+ Kokkos::Experimental::HPX().impl_bulk_setup_finalize(
+ true, false, task_queue, num_worker_threads,
+ hpx::threads::thread_stacksize::nostack);
+ }
+
+ template <typename TaskType>
+ static void get_function_pointer(typename TaskType::function_type &ptr,
+ typename TaskType::destroy_type &dtor) {
+ ptr = TaskType::apply;
+ dtor = TaskType::destroy;
+ }
+
+ private:
+ const scheduler_type *scheduler;
+};
+
+extern template class TaskQueue<
+ Kokkos::Experimental::HPX,
+ typename Kokkos::Experimental::HPX::memory_space>;
+
+} // namespace Impl
+} // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_HPX_TASK_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HPX_WORKGRAPHPOLICY_HPP
+#define KOKKOS_HPX_WORKGRAPHPOLICY_HPP
+
+#include <HPX/Kokkos_HPX.hpp>
+
+#include <hpx/execution.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+ Kokkos::Experimental::HPX> {
+ private:
+ using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+ using WorkTag = typename Policy::work_tag;
+
+ Policy m_policy;
+ FunctorType m_functor;
+
+ public:
+ void execute_range(int) const {
+ std::int32_t w = m_policy.pop_work();
+ while (w != Policy::COMPLETED_TOKEN) {
+ if (w != Policy::END_TOKEN) {
+ if constexpr (std::is_same_v<WorkTag, void>) {
+ m_functor(w);
+ } else {
+ m_functor(WorkTag{}, w);
+ }
+ m_policy.completed_work(w);
+ }
+
+ w = m_policy.pop_work();
+ }
+ }
+
+ void execute() const {
+ const int num_worker_threads = Kokkos::Experimental::HPX().concurrency();
+ Kokkos::Experimental::HPX().impl_bulk_plain(
+ true, is_light_weight_policy<Policy>(), *this, num_worker_threads,
+ hpx::threads::thread_stacksize::nostack);
+ }
+
+ inline ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+ : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* #define KOKKOS_HPX_WORKGRAPHPOLICY_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CORE_EXP_INTEROP_HPP
#define KOKKOS_CORE_EXP_INTEROP_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_CORE_EXP_MD_RANGE_POLICY_HPP
#define KOKKOS_CORE_EXP_MD_RANGE_POLICY_HPP
#include <impl/KokkosExp_Host_IterateTile.hpp>
#include <Kokkos_ExecPolicy.hpp>
#include <type_traits>
+#include <cmath>
namespace Kokkos {
// NOTE the comparison below is encapsulated to silent warnings about pointless
// comparison of unsigned integer with zero
template <class T>
-constexpr std::enable_if_t<!std::is_signed<T>::value, bool>
+constexpr std::enable_if_t<!std::is_signed_v<T>, bool>
is_less_than_value_initialized_variable(T) {
return false;
}
template <class T>
-constexpr std::enable_if_t<std::is_signed<T>::value, bool>
+constexpr std::enable_if_t<std::is_signed_v<T>, bool>
is_less_than_value_initialized_variable(T arg) {
return arg < T{};
}
// Checked narrowing conversion that calls abort if the cast changes the value
template <class To, class From>
-constexpr To checked_narrow_cast(From arg) {
+constexpr To checked_narrow_cast(From arg, std::size_t idx) {
constexpr const bool is_different_signedness =
- (std::is_signed<To>::value != std::is_signed<From>::value);
+ (std::is_signed_v<To> != std::is_signed_v<From>);
auto const ret = static_cast<To>(arg);
if (static_cast<From>(ret) != arg ||
(is_different_signedness &&
is_less_than_value_initialized_variable(arg) !=
is_less_than_value_initialized_variable(ret))) {
- Kokkos::abort("unsafe narrowing conversion");
+ auto msg =
+ "Kokkos::MDRangePolicy bound type error: an unsafe implicit conversion "
+ "is performed on a bound (" +
+ std::to_string(arg) + ") in dimension (" + std::to_string(idx) +
+ "), which may not preserve its original value.\n";
+ Kokkos::abort(msg.c_str());
}
return ret;
}
using T = typename Array::value_type;
Array a{};
constexpr std::size_t N = a.size();
- static_assert(M <= N, "");
+ static_assert(M <= N);
auto* ptr = a.data();
// NOTE equivalent to
// std::transform(std::begin(init), std::end(init), a.data(),
// [](U x) { return static_cast<T>(x); });
// except that std::transform is not constexpr.
- for (auto x : init) {
- *ptr++ = checked_narrow_cast<T>(x);
- (void)checked_narrow_cast<IndexType>(x); // see note above
+ for (std::size_t i = 0; i < M; ++i) {
+ *ptr++ = checked_narrow_cast<T>(init[i], i);
+ (void)checked_narrow_cast<IndexType>(init[i], i); // see note above
}
return a;
}
using T = typename NVCC_WONT_LET_ME_CALL_YOU_Array::value_type;
NVCC_WONT_LET_ME_CALL_YOU_Array a{};
constexpr std::size_t N = a.size();
- static_assert(M <= N, "");
+ static_assert(M <= N);
for (std::size_t i = 0; i < M; ++i) {
- a[i] = checked_narrow_cast<T>(other[i]);
- (void)checked_narrow_cast<IndexType>(other[i]); // see note above
+ a[i] = checked_narrow_cast<T>(other[i], i);
+ (void)checked_narrow_cast<IndexType>(other[i], i); // see note above
}
return a;
}
// multi-dimensional iteration pattern
template <typename... Properties>
-struct MDRangePolicy : public Kokkos::Impl::PolicyTraits<Properties...> {
- using traits = Kokkos::Impl::PolicyTraits<Properties...>;
- using range_policy = RangePolicy<Properties...>;
+struct MDRangePolicy;
+
+// Note: If MDRangePolicy has a primary template, implicit CTAD (deduction
+// guides) are generated -> MDRangePolicy<> by some compilers, which is
+// incorrect. By making it a template specialization instead, no implicit CTAD
+// is generated. This works because there has to be at least one property
+// specified (which is Rank<...>); otherwise, we'd get the static_assert
+// "Kokkos::Error: MD iteration pattern not defined". This template
+// specialization uses <P, Properties...> in all places for correctness.
+template <typename P, typename... Properties>
+struct MDRangePolicy<P, Properties...>
+ : public Kokkos::Impl::PolicyTraits<P, Properties...> {
+ using traits = Kokkos::Impl::PolicyTraits<P, Properties...>;
+ using range_policy = RangePolicy<P, Properties...>;
typename traits::execution_space m_space;
typename traits::schedule_type, typename traits::index_type>;
using execution_policy =
- MDRangePolicy<Properties...>; // needed for is_execution_space
- // interrogation
+ MDRangePolicy<P, Properties...>; // needed for is_execution_policy
+ // interrogation
template <class... OtherProperties>
friend struct MDRangePolicy;
- static_assert(!std::is_void<typename traits::iteration_pattern>::value,
+ static_assert(!std::is_void_v<typename traits::iteration_pattern>,
"Kokkos Error: MD iteration pattern not defined");
using iteration_pattern = typename traits::iteration_pattern;
using member_type = typename range_policy::member_type;
static constexpr int rank = iteration_pattern::rank;
+ static_assert(rank < 7, "Kokkos MDRangePolicy Error: Unsupported rank...");
using index_type = typename traits::index_type;
using array_index_type = std::int64_t;
template <typename LT, std::size_t LN, typename UT, std::size_t UN,
typename TT = array_index_type, std::size_t TN = rank,
- typename = std::enable_if_t<std::is_integral<LT>::value &&
- std::is_integral<UT>::value &&
- std::is_integral<TT>::value>>
+ typename = std::enable_if_t<std::is_integral_v<LT> &&
+ std::is_integral_v<UT> &&
+ std::is_integral_v<TT>>>
MDRangePolicy(const LT (&lower)[LN], const UT (&upper)[UN],
const TT (&tile)[TN] = {})
: MDRangePolicy(
template <typename LT, std::size_t LN, typename UT, std::size_t UN,
typename TT = array_index_type, std::size_t TN = rank,
- typename = std::enable_if_t<std::is_integral<LT>::value &&
- std::is_integral<UT>::value &&
- std::is_integral<TT>::value>>
+ typename = std::enable_if_t<std::is_integral_v<LT> &&
+ std::is_integral_v<UT> &&
+ std::is_integral_v<TT>>>
MDRangePolicy(const typename traits::execution_space& work_space,
const LT (&lower)[LN], const UT (&upper)[UN],
const TT (&tile)[TN] = {})
}
template <typename T, std::size_t NT = rank,
- typename = std::enable_if_t<std::is_integral<T>::value>>
+ typename = std::enable_if_t<std::is_integral_v<T>>>
MDRangePolicy(Kokkos::Array<T, rank> const& lower,
Kokkos::Array<T, rank> const& upper,
Kokkos::Array<T, NT> const& tile = Kokkos::Array<T, NT>{})
: MDRangePolicy(typename traits::execution_space(), lower, upper, tile) {}
template <typename T, std::size_t NT = rank,
- typename = std::enable_if_t<std::is_integral<T>::value>>
+ typename = std::enable_if_t<std::is_integral_v<T>>>
MDRangePolicy(const typename traits::execution_space& work_space,
Kokkos::Array<T, rank> const& lower,
Kokkos::Array<T, rank> const& upper,
}
bool impl_tune_tile_size() const { return m_tune_tile_size; }
+ tile_type tile_size_recommended() const {
+ tile_type rec_tile_sizes = {};
+
+ for (std::size_t i = 0; i < rec_tile_sizes.size(); ++i) {
+ rec_tile_sizes[i] = tile_size_recommended(i);
+ }
+ return rec_tile_sizes;
+ }
+
+ int max_total_tile_size() const {
+ return Impl::get_tile_size_properties(m_space).max_total_tile_size;
+ }
+
private:
+ int tile_size_recommended(const int tile_rank) const {
+ auto properties = Impl::get_tile_size_properties(m_space);
+ int last_rank = (inner_direction == Iterate::Right) ? rank - 1 : 0;
+ int rank_acc =
+ (inner_direction == Iterate::Right) ? tile_rank + 1 : tile_rank - 1;
+ int rec_tile_size = (std::pow(properties.default_tile_size, rank_acc) <
+ properties.max_total_tile_size)
+ ? properties.default_tile_size
+ : 1;
+
+ if (tile_rank == last_rank) {
+ rec_tile_size = tile_size_last_rank(
+ properties, m_upper[last_rank] - m_lower[last_rank]);
+ }
+ return rec_tile_size;
+ }
+
+ int tile_size_last_rank(const Impl::TileSizeProperties properties,
+ const index_type length) const {
+ return properties.default_largest_tile_size == 0
+ ? std::max<int>(length, 1)
+ : properties.default_largest_tile_size;
+ }
+
void init_helper(Impl::TileSizeProperties properties) {
m_prod_tile_dims = 1;
int increment = 1;
rank_start = rank - 1;
rank_end = -1;
}
+
for (int i = rank_start; i != rank_end; i += increment) {
const index_type length = m_upper[i] - m_lower[i];
+
+ if (m_upper[i] < m_lower[i]) {
+ std::string msg =
+ "Kokkos::MDRangePolicy bounds error: The lower bound (" +
+ std::to_string(m_lower[i]) + ") is greater than its upper bound (" +
+ std::to_string(m_upper[i]) + ") in dimension " + std::to_string(i) +
+ ".\n";
+#if !defined(KOKKOS_ENABLE_DEPRECATED_CODE_4)
+ Kokkos::abort(msg.c_str());
+#elif defined(KOKKOS_ENABLE_DEPRECATION_WARNINGS)
+ Kokkos::Impl::log_warning(msg);
+#endif
+ }
+
if (m_tile[i] <= 0) {
m_tune_tile_size = true;
if ((inner_direction == Iterate::Right && (i < rank - 1)) ||
m_tile[i] = 1;
}
} else {
- m_tile[i] = properties.default_largest_tile_size == 0
- ? std::max<int>(length, 1)
- : properties.default_largest_tile_size;
+ m_tile[i] = tile_size_last_rank(properties, length);
}
}
m_tile_end[i] =
}
};
-} // namespace Kokkos
+template <typename LT, size_t N, typename UT>
+MDRangePolicy(const LT (&)[N], const UT (&)[N]) -> MDRangePolicy<Rank<N>>;
+
+template <typename LT, size_t N, typename UT, typename TT, size_t TN>
+MDRangePolicy(const LT (&)[N], const UT (&)[N], const TT (&)[TN])
+ -> MDRangePolicy<Rank<N>>;
+
+template <typename LT, size_t N, typename UT>
+MDRangePolicy(DefaultExecutionSpace const&, const LT (&)[N], const UT (&)[N])
+ -> MDRangePolicy<Rank<N>>;
+
+template <typename LT, size_t N, typename UT, typename TT, size_t TN>
+MDRangePolicy(DefaultExecutionSpace const&, const LT (&)[N], const UT (&)[N],
+ const TT (&)[TN]) -> MDRangePolicy<Rank<N>>;
+
+template <typename ES, typename LT, size_t N, typename UT,
+ typename = std::enable_if_t<is_execution_space_v<ES>>>
+MDRangePolicy(ES const&, const LT (&)[N], const UT (&)[N])
+ -> MDRangePolicy<ES, Rank<N>>;
+
+template <typename ES, typename LT, size_t N, typename UT, typename TT,
+ size_t TN, typename = std::enable_if_t<is_execution_space_v<ES>>>
+MDRangePolicy(ES const&, const LT (&)[N], const UT (&)[N], const TT (&)[TN])
+ -> MDRangePolicy<ES, Rank<N>>;
+
+template <typename T, size_t N>
+MDRangePolicy(Array<T, N> const&, Array<T, N> const&) -> MDRangePolicy<Rank<N>>;
+
+template <typename T, size_t N, size_t NT>
+MDRangePolicy(Array<T, N> const&, Array<T, N> const&, Array<T, NT> const&)
+ -> MDRangePolicy<Rank<N>>;
+
+template <typename T, size_t N>
+MDRangePolicy(DefaultExecutionSpace const&, Array<T, N> const&,
+ Array<T, N> const&) -> MDRangePolicy<Rank<N>>;
+
+template <typename T, size_t N, size_t NT>
+MDRangePolicy(DefaultExecutionSpace const&, Array<T, N> const&,
+ Array<T, N> const&, Array<T, NT> const&)
+ -> MDRangePolicy<Rank<N>>;
+
+template <typename ES, typename T, size_t N,
+ typename = std::enable_if_t<is_execution_space_v<ES>>>
+MDRangePolicy(ES const&, Array<T, N> const&, Array<T, N> const&)
+ -> MDRangePolicy<ES, Rank<N>>;
+
+template <typename ES, typename T, size_t N, size_t NT,
+ typename = std::enable_if_t<is_execution_space_v<ES>>>
+MDRangePolicy(ES const&, Array<T, N> const&, Array<T, N> const&,
+ Array<T, NT> const&) -> MDRangePolicy<ES, Rank<N>>;
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-// For backward compatibility
-namespace Kokkos {
-namespace Experimental {
-using Iterate KOKKOS_DEPRECATED = Kokkos::Iterate;
-template <typename... Properties>
-using MDRangePolicy KOKKOS_DEPRECATED = Kokkos::MDRangePolicy<Properties...>;
-template <unsigned N, Kokkos::Iterate OuterDir = Kokkos::Iterate::Default,
- Kokkos::Iterate InnerDir = Kokkos::Iterate::Default>
-using Rank KOKKOS_DEPRECATED = Kokkos::Rank<N, OuterDir, InnerDir>;
-} // namespace Experimental
} // namespace Kokkos
-#endif
#endif // KOKKOS_CORE_EXP_MD_RANGE_POLICY_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_ABORT_HPP
+#define KOKKOS_ABORT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Printf.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+#include <Cuda/Kokkos_Cuda_abort.hpp>
+#endif
+#ifdef KOKKOS_ENABLE_HIP
+#include <HIP/Kokkos_HIP_Abort.hpp>
+#endif
+#ifdef KOKKOS_ENABLE_SYCL
+#include <SYCL/Kokkos_SYCL_Abort.hpp>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+[[noreturn]] void host_abort(const char *const);
+
+#if defined(KOKKOS_ENABLE_CUDA) && defined(__CUDA_ARCH__)
+
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+// required to workaround failures in random number generator unit tests with
+// pre-volta architectures
+#define KOKKOS_IMPL_ABORT_NORETURN
+#else
+// cuda_abort aborts when building for other platforms than macOS
+#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
+#endif
+
+#elif defined(KOKKOS_COMPILER_NVHPC)
+
+#define KOKKOS_IMPL_ABORT_NORETURN
+
+#elif defined(KOKKOS_ENABLE_HIP) && defined(__HIP_DEVICE_COMPILE__)
+// HIP aborts
+#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
+#elif defined(KOKKOS_ENABLE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+// FIXME_SYCL SYCL doesn't abort
+#define KOKKOS_IMPL_ABORT_NORETURN
+#elif !defined(KOKKOS_ENABLE_OPENMPTARGET) && !defined(KOKKOS_ENABLE_OPENACC)
+// Host aborts
+#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
+#else
+// Everything else does not abort
+#define KOKKOS_IMPL_ABORT_NORETURN
+#endif
+
+// FIXME_SYCL
+// Accomodate host pass for device functions that are not [[noreturn]]
+#if defined(KOKKOS_ENABLE_SYCL) || \
+ (defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK))
+#define KOKKOS_IMPL_ABORT_NORETURN_DEVICE
+#else
+#define KOKKOS_IMPL_ABORT_NORETURN_DEVICE KOKKOS_IMPL_ABORT_NORETURN
+#endif
+
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
+ defined(KOKKOS_ENABLE_SYCL) || defined(KOKKOS_ENABLE_OPENMPTARGET) || \
+ defined(KOKKOS_ENABLE_OPENACC)
+KOKKOS_IMPL_ABORT_NORETURN_DEVICE inline KOKKOS_IMPL_DEVICE_FUNCTION void
+device_abort(const char *const msg) {
+#if defined(KOKKOS_ENABLE_CUDA)
+ ::Kokkos::Impl::cuda_abort(msg);
+#elif defined(KOKKOS_ENABLE_HIP)
+ ::Kokkos::Impl::hip_abort(msg);
+#elif defined(KOKKOS_ENABLE_SYCL)
+ ::Kokkos::Impl::sycl_abort(msg);
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET) || defined(KOKKOS_ENABLE_OPENACC)
+ printf("%s", msg); // FIXME_OPENMPTARGET FIXME_OPENACC
+#else
+#error faulty logic
+#endif
+}
+#endif
+} // namespace Impl
+
+KOKKOS_IMPL_ABORT_NORETURN KOKKOS_INLINE_FUNCTION void abort(
+ const char *const message) {
+ KOKKOS_IF_ON_HOST(::Kokkos::Impl::host_abort(message);)
+ KOKKOS_IF_ON_DEVICE(::Kokkos::Impl::device_abort(message);)
+}
+
+#undef KOKKOS_IMPL_ABORT_NORETURN
+
+} // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_ABORT_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_ACQUIRE_UNIQUE_TOKEN_IMPL_HPP
+#define KOKKOS_ACQUIRE_UNIQUE_TOKEN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include <Kokkos_UniqueToken.hpp>
+namespace Kokkos {
+namespace Experimental {
+
+template <typename TeamPolicy>
+KOKKOS_FUNCTION AcquireTeamUniqueToken<TeamPolicy>::AcquireTeamUniqueToken(
+ AcquireTeamUniqueToken<TeamPolicy>::token_type t, team_member_type team)
+ : my_token(t), my_team_acquired_val(team.team_scratch(0)), my_team(team) {
+ Kokkos::single(Kokkos::PerTeam(my_team),
+ [&]() { my_team_acquired_val() = my_token.acquire(); });
+ my_team.team_barrier();
+
+ my_acquired_val = my_team_acquired_val();
+}
+
+template <typename TeamPolicy>
+KOKKOS_FUNCTION AcquireTeamUniqueToken<TeamPolicy>::~AcquireTeamUniqueToken() {
+ my_team.team_barrier();
+ Kokkos::single(Kokkos::PerTeam(my_team),
+ [&]() { my_token.release(my_acquired_val); });
+ my_team.team_barrier();
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif // KOKKOS_UNIQUE_TOKEN_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_ANONYMOUSSPACE_HPP
#define KOKKOS_ANONYMOUSSPACE_HPP
using device_type = Kokkos::Device<execution_space, memory_space>;
/**\brief Default memory space instance */
- AnonymousSpace() = default;
- AnonymousSpace(AnonymousSpace &&rhs) = default;
- AnonymousSpace(const AnonymousSpace &rhs) = default;
- AnonymousSpace &operator=(AnonymousSpace &&) = default;
+ AnonymousSpace() = default;
+ AnonymousSpace(AnonymousSpace &&rhs) = default;
+ AnonymousSpace(const AnonymousSpace &rhs) = default;
+ AnonymousSpace &operator=(AnonymousSpace &&) = default;
AnonymousSpace &operator=(const AnonymousSpace &) = default;
~AnonymousSpace() = default;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_ARRAY_HPP
#define KOKKOS_ARRAY_HPP
#endif
#include <Kokkos_Macros.hpp>
+#include <Kokkos_Swap.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_StringManipulation.hpp>
#include <type_traits>
#include <algorithm>
#include <utility>
-#include <limits>
#include <cstddef>
namespace Kokkos {
#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
namespace Impl {
-template <typename Integral, bool Signed = std::is_signed<Integral>::value>
+template <typename Integral, bool Signed = std::is_signed_v<Integral>>
struct ArrayBoundsCheck;
template <typename Integral>
/**\brief Derived from the C++17 'std::array'.
* Dropping the iterator interface.
*/
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
template <class T = void, size_t N = KOKKOS_INVALID_INDEX, class Proxy = void>
+#else
+template <class T, size_t N>
+#endif
struct Array {
public:
/**
KOKKOS_INLINE_FUNCTION constexpr const_pointer data() const {
return &m_internal_implementation_private_member_data[0];
}
+
+ friend KOKKOS_FUNCTION constexpr bool operator==(Array const& lhs,
+ Array const& rhs) noexcept {
+ for (size_t i = 0; i != N; ++i)
+ if (lhs[i] != rhs[i]) return false;
+ return true;
+ }
+
+ friend KOKKOS_FUNCTION constexpr bool operator!=(Array const& lhs,
+ Array const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+
+ private:
+ template <class U = T>
+ friend KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+ Impl::is_swappable<U>::value>
+ kokkos_swap(Array<T, N>& a,
+ Array<T, N>& b) noexcept(Impl::is_nothrow_swappable_v<U>) {
+ for (std::size_t i = 0; i < N; ++i) {
+ kokkos_swap(a[i], b[i]);
+ }
+ }
};
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
template <class T, class Proxy>
struct Array<T, 0, Proxy> {
+#else
+template <class T>
+struct Array<T, 0> {
+#endif
public:
using reference = T&;
using const_reference = std::add_const_t<T>&;
return *reinterpret_cast<const_pointer>(-1);
}
- KOKKOS_INLINE_FUNCTION pointer data() { return pointer(0); }
- KOKKOS_INLINE_FUNCTION const_pointer data() const { return const_pointer(0); }
+ KOKKOS_INLINE_FUNCTION constexpr pointer data() { return nullptr; }
+ KOKKOS_INLINE_FUNCTION constexpr const_pointer data() const {
+ return nullptr;
+ }
- KOKKOS_DEFAULTED_FUNCTION ~Array() = default;
- KOKKOS_DEFAULTED_FUNCTION Array() = default;
- KOKKOS_DEFAULTED_FUNCTION Array(const Array&) = default;
- KOKKOS_DEFAULTED_FUNCTION Array& operator=(const Array&) = default;
+ friend KOKKOS_FUNCTION constexpr bool operator==(Array const&,
+ Array const&) noexcept {
+ return true;
+ }
+ friend KOKKOS_FUNCTION constexpr bool operator!=(Array const&,
+ Array const&) noexcept {
+ return false;
+ }
- // Some supported compilers are not sufficiently C++11 compliant
- // for default move constructor and move assignment operator.
- // Array( Array && ) = default ;
- // Array & operator = ( Array && ) = default ;
+ private:
+ friend KOKKOS_INLINE_FUNCTION constexpr void kokkos_swap(
+ Array<T, 0>&, Array<T, 0>&) noexcept {}
};
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+namespace Impl {
+struct KokkosArrayContiguous {};
+struct KokkosArrayStrided {};
+} // namespace Impl
+
template <>
-struct Array<void, KOKKOS_INVALID_INDEX, void> {
- struct contiguous {};
- struct strided {};
+struct KOKKOS_DEPRECATED Array<void, KOKKOS_INVALID_INDEX, void> {
+ using contiguous = Impl::KokkosArrayContiguous;
+ using strided = Impl::KokkosArrayStrided;
};
template <class T>
-struct Array<T, KOKKOS_INVALID_INDEX, Array<>::contiguous> {
+struct KOKKOS_DEPRECATED
+ Array<T, KOKKOS_INVALID_INDEX, Impl::KokkosArrayContiguous> {
private:
T* m_elem;
size_t m_size;
using const_pointer = std::add_const_t<T>*;
KOKKOS_INLINE_FUNCTION constexpr size_type size() const { return m_size; }
- KOKKOS_INLINE_FUNCTION constexpr bool empty() const { return 0 != m_size; }
+ KOKKOS_INLINE_FUNCTION constexpr bool empty() const { return 0 == m_size; }
KOKKOS_INLINE_FUNCTION constexpr size_type max_size() const { return m_size; }
template <typename iType>
KOKKOS_INLINE_FUNCTION
Array& operator=(const Array& rhs) {
- const size_t n = std::min(m_size, rhs.size());
+ const size_t n = size() < rhs.size() ? size() : rhs.size();
for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
return *this;
}
template <size_t N, class P>
KOKKOS_INLINE_FUNCTION Array& operator=(const Array<T, N, P>& rhs) {
- const size_t n = std::min(m_size, rhs.size());
+ const size_t n = size() < rhs.size() ? size() : rhs.size();
for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
return *this;
}
};
template <class T>
-struct Array<T, KOKKOS_INVALID_INDEX, Array<>::strided> {
+struct KOKKOS_DEPRECATED
+ Array<T, KOKKOS_INVALID_INDEX, Impl::KokkosArrayStrided> {
private:
T* m_elem;
size_t m_size;
using const_pointer = std::add_const_t<T>*;
KOKKOS_INLINE_FUNCTION constexpr size_type size() const { return m_size; }
- KOKKOS_INLINE_FUNCTION constexpr bool empty() const { return 0 != m_size; }
+ KOKKOS_INLINE_FUNCTION constexpr bool empty() const { return 0 == m_size; }
KOKKOS_INLINE_FUNCTION constexpr size_type max_size() const { return m_size; }
template <typename iType>
KOKKOS_INLINE_FUNCTION
Array& operator=(const Array& rhs) {
- const size_t n = std::min(m_size, rhs.size());
- for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
+ const size_t n = size() < rhs.size() ? size() : rhs.size();
+ for (size_t i = 0; i < n; ++i) m_elem[i * m_stride] = rhs[i];
return *this;
}
template <size_t N, class P>
KOKKOS_INLINE_FUNCTION Array& operator=(const Array<T, N, P>& rhs) {
- const size_t n = std::min(m_size, rhs.size());
- for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
+ const size_t n = size() < rhs.size() ? size() : rhs.size();
+ for (size_t i = 0; i < n; ++i) m_elem[i * m_stride] = rhs[i];
return *this;
}
size_type arg_stride)
: m_elem(arg_ptr), m_size(arg_size), m_stride(arg_stride) {}
};
+#endif
+
+template <typename T, typename... Us>
+Array(T, Us...) -> Array<T, 1 + sizeof...(Us)>;
+
+namespace Impl {
+
+template <typename T, size_t N, size_t... I>
+KOKKOS_FUNCTION constexpr Array<std::remove_cv_t<T>, N> to_array_impl(
+ T (&a)[N], std::index_sequence<I...>) {
+ return {{a[I]...}};
+}
+
+template <typename T, size_t N, size_t... I>
+KOKKOS_FUNCTION constexpr Array<std::remove_cv_t<T>, N> to_array_impl(
+ T (&&a)[N], std::index_sequence<I...>) {
+ return {{std::move(a[I])...}};
+}
+
+} // namespace Impl
+
+template <typename T, size_t N>
+KOKKOS_FUNCTION constexpr auto to_array(T (&a)[N]) {
+ return Impl::to_array_impl(a, std::make_index_sequence<N>{});
+}
+
+template <typename T, size_t N>
+KOKKOS_FUNCTION constexpr auto to_array(T (&&a)[N]) {
+ return Impl::to_array_impl(std::move(a), std::make_index_sequence<N>{});
+}
} // namespace Kokkos
//<editor-fold desc="Support for structured binding">
-// guarding against bogus error 'specialization in different namespace' with
-// older GCC that do not support C++17 anyway
-#if !defined(KOKKOS_COMPILER_GNU) || (KOKKOS_COMPILER_GNU >= 710)
-#if defined(KOKKOS_COMPILER_CLANG) && KOKKOS_COMPILER_CLANG < 800
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wmismatched-tags"
-#endif
template <class T, std::size_t N>
struct std::tuple_size<Kokkos::Array<T, N>>
: std::integral_constant<std::size_t, N> {};
template <std::size_t I, class T, std::size_t N>
struct std::tuple_element<I, Kokkos::Array<T, N>> {
+ static_assert(I < N);
using type = T;
};
-#if defined(KOKKOS_COMPILER_CLANG) && KOKKOS_COMPILER_CLANG < 800
-#pragma clang diagnostic pop
-#endif
-#endif
namespace Kokkos {
template <std::size_t I, class T, std::size_t N>
KOKKOS_FUNCTION constexpr T& get(Array<T, N>& a) noexcept {
+ static_assert(I < N);
return a[I];
}
template <std::size_t I, class T, std::size_t N>
KOKKOS_FUNCTION constexpr T const& get(Array<T, N> const& a) noexcept {
+ static_assert(I < N);
return a[I];
}
template <std::size_t I, class T, std::size_t N>
KOKKOS_FUNCTION constexpr T&& get(Array<T, N>&& a) noexcept {
+ static_assert(I < N);
return std::move(a[I]);
}
template <std::size_t I, class T, std::size_t N>
KOKKOS_FUNCTION constexpr T const&& get(Array<T, N> const&& a) noexcept {
+ static_assert(I < N);
return std::move(a[I]);
}
} // namespace Kokkos
//</editor-fold>
+//<editor-fold desc="Support for range-based for loop">
+namespace Kokkos {
+
+template <class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T const* begin(Array<T, N> const& a) noexcept {
+ return a.data();
+}
+
+template <class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T* begin(Array<T, N>& a) noexcept {
+ return a.data();
+}
+
+template <class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T const* end(Array<T, N> const& a) noexcept {
+ return a.data() + a.size();
+}
+
+template <class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T* end(Array<T, N>& a) noexcept {
+ return a.data() + a.size();
+}
+
+} // namespace Kokkos
+//</editor-fold>
+
#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ARRAY
#undef KOKKOS_IMPL_PUBLIC_INCLUDE
#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ARRAY
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_ASSERT_HPP
+#define KOKKOS_ASSERT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Abort.hpp>
+
+#if !defined(NDEBUG) || defined(KOKKOS_ENFORCE_CONTRACTS) || \
+ defined(KOKKOS_ENABLE_DEBUG)
+#define KOKKOS_EXPECTS(...) \
+ { \
+ if (!bool(__VA_ARGS__)) { \
+ ::Kokkos::abort( \
+ "Kokkos contract violation:\n " \
+ " Expected precondition `" #__VA_ARGS__ \
+ "` evaluated false.\n" \
+ "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
+ __LINE__) " \n"); \
+ } \
+ }
+#define KOKKOS_ENSURES(...) \
+ { \
+ if (!bool(__VA_ARGS__)) { \
+ ::Kokkos::abort( \
+ "Kokkos contract violation:\n " \
+ " Ensured postcondition `" #__VA_ARGS__ \
+ "` evaluated false.\n" \
+ "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
+ __LINE__) " \n"); \
+ } \
+ }
+#define KOKKOS_ASSERT(...) \
+ { \
+ if (!bool(__VA_ARGS__)) { \
+ ::Kokkos::abort( \
+ "Kokkos contract violation:\n " \
+ " Asserted condition `" #__VA_ARGS__ \
+ "` evaluated false.\n" \
+ "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
+ __LINE__) " \n"); \
+ } \
+ }
+#else // not debug mode
+#define KOKKOS_EXPECTS(...)
+#define KOKKOS_ENSURES(...)
+#ifndef KOKKOS_ASSERT
+#define KOKKOS_ASSERT(...)
+#endif // ifndef KOKKOS_ASSERT
+#endif // end debug mode ifdefs
+
+#endif /* #ifndef KOKKOS_ASSERT_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+/// \file Kokkos_Atomic.hpp
+/// \brief Atomic functions
+///
+/// This header file defines prototypes for the following atomic functions:
+/// - exchange
+/// - compare and exchange
+/// - add
+///
+/// Supported types include:
+/// - signed and unsigned 4 and 8 byte integers
+/// - float
+/// - double
+///
+/// They are implemented through GCC compatible intrinsics, OpenMP
+/// directives and native CUDA intrinsics.
+///
+/// Including this header file requires one of the following
+/// compilers:
+/// - NVCC (for CUDA device code only)
+/// - GCC (for host code only)
+/// - Intel (for host code only)
+/// - A compiler that supports OpenMP 3.1 (for host code only)
+
+#ifndef KOKKOS_ATOMIC_HPP
+#define KOKKOS_ATOMIC_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Atomics_Desul_Wrapper.hpp>
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
+#endif
+#endif /* KOKKOS_ATOMIC_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_DESUL_ATOMICS_WRAPPER_HPP_
+#define KOKKOS_DESUL_ATOMICS_WRAPPER_HPP_
+#include <Kokkos_Macros.hpp>
+#include <desul/atomics.hpp>
+
+#include <impl/Kokkos_Utilities.hpp> // identity_type
+#include <impl/Kokkos_Volatile_Load.hpp>
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+KOKKOS_DEPRECATED inline const char* atomic_query_version() {
+ return "KOKKOS_DESUL_ATOMICS";
+}
+#endif
+
+#if defined(KOKKOS_COMPILER_GNU) && !defined(__PGIC__) && \
+ !defined(__CUDA_ARCH__)
+
+#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) __builtin_prefetch(addr, 0, 0)
+#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) __builtin_prefetch(addr, 1, 0)
+
+#else
+
+#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) ((void)0)
+#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) ((void)0)
+
+#endif
+// ============================================================
+
+#ifdef KOKKOS_ENABLE_ATOMICS_BYPASS
+#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeCaller()
+#else
+#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeDevice()
+#endif
+
+namespace Impl {
+template <class T>
+using not_deduced_atomic_t =
+ std::add_const_t<std::remove_volatile_t<type_identity_t<T>>>;
+
+template <class T, class R>
+using enable_if_atomic_t =
+ std::enable_if_t<!std::is_reference_v<T> && !std::is_const_v<T>,
+ std::remove_volatile_t<R>>;
+} // namespace Impl
+
+// clang-format off
+
+// fences
+KOKKOS_INLINE_FUNCTION void memory_fence() { desul::atomic_thread_fence(desul::MemoryOrderSeqCst(), KOKKOS_DESUL_MEM_SCOPE); }
+KOKKOS_INLINE_FUNCTION void load_fence() { desul::atomic_thread_fence(desul::MemoryOrderAcquire(), KOKKOS_DESUL_MEM_SCOPE); }
+KOKKOS_INLINE_FUNCTION void store_fence() { desul::atomic_thread_fence(desul::MemoryOrderRelease(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// load/store
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_load (T const* ptr) { return desul::atomic_load (const_cast<std::remove_volatile_t<T>*>(ptr), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_store(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_store(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+template<class T> KOKKOS_DEPRECATED_WITH_COMMENT("Use atomic_store() instead!") KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_assign(T* ptr, Impl::not_deduced_atomic_t<T> val) { atomic_store(ptr, val); }
+#endif
+
+// atomic_fetch_op
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_add(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_add(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_sub(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_sub(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_max(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_max(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_min(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_min(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_mul(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_mul(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_div(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_div(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_mod(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_mod(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_and(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_and(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_or (T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_or (const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_xor(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_xor(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_nand(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_nand(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_lshift(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_lshift(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_rshift(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_fetch_rshift(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_inc(T* ptr) { return desul::atomic_fetch_inc(const_cast<std::remove_volatile_t<T>*>(ptr), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_fetch_dec(T* ptr) { return desul::atomic_fetch_dec(const_cast<std::remove_volatile_t<T>*>(ptr), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// atomic_op_fetch
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_add_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_add_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_sub_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_sub_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_max_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_max_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_min_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_min_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_mul_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_mul_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_div_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_div_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_mod_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_mod_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_and_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_and_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_or_fetch (T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_or_fetch (const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_xor_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_xor_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_nand_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_nand_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_lshift_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_lshift_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_rshift_fetch(T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_rshift_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_inc_fetch(T* ptr) { return desul::atomic_inc_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_dec_fetch(T* ptr) { return desul::atomic_dec_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// atomic_op
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_add(T* ptr, Impl::not_deduced_atomic_t<T> val) { desul::atomic_add(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_sub(T* ptr, Impl::not_deduced_atomic_t<T> val) { desul::atomic_sub(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_max(T* ptr, Impl::not_deduced_atomic_t<T> val) { desul::atomic_max(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_min(T* ptr, Impl::not_deduced_atomic_t<T> val) { desul::atomic_min(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_mul(T* ptr, Impl::not_deduced_atomic_t<T> val) { desul::atomic_mul(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_div(T* ptr, Impl::not_deduced_atomic_t<T> val) { desul::atomic_div(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_mod(T* ptr, Impl::not_deduced_atomic_t<T> val) { (void)desul::atomic_fetch_mod(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_and(T* ptr, Impl::not_deduced_atomic_t<T> val) { (void)desul::atomic_fetch_and(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_or (T* ptr, Impl::not_deduced_atomic_t<T> val) { (void)desul::atomic_fetch_or (const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_xor(T* ptr, Impl::not_deduced_atomic_t<T> val) { (void)desul::atomic_fetch_xor(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_nand(T* ptr, Impl::not_deduced_atomic_t<T> val) { (void)desul::atomic_nand_fetch(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_lshift(T* ptr, Impl::not_deduced_atomic_t<T> val) { (void)desul::atomic_fetch_lshift(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_rshift(T* ptr, Impl::not_deduced_atomic_t<T> val) { (void)desul::atomic_fetch_rshift(const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_inc(T* ptr) { desul::atomic_inc(const_cast<std::remove_volatile_t<T>*>(ptr), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_dec(T* ptr) { desul::atomic_dec(const_cast<std::remove_volatile_t<T>*>(ptr), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+template<class T> KOKKOS_DEPRECATED_WITH_COMMENT("Use atomic_inc() instead!") KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_increment(T* ptr) { atomic_inc(ptr); }
+template<class T> KOKKOS_DEPRECATED_WITH_COMMENT("Use atomic_dec() instead!") KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, void> atomic_decrement(T* ptr) { atomic_dec(ptr); }
+#endif
+
+// exchange
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_exchange (T* ptr, Impl::not_deduced_atomic_t<T> val) { return desul::atomic_exchange (const_cast<std::remove_volatile_t<T>*>(ptr), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+template<class T> KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, T> atomic_compare_exchange(T* ptr, Impl::not_deduced_atomic_t<T> expected, Impl::not_deduced_atomic_t<T> desired) { return desul::atomic_compare_exchange(const_cast<std::remove_volatile_t<T>*>(ptr), expected, desired, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+template<class T> KOKKOS_DEPRECATED_WITH_COMMENT("Use atomic_compare_exchange() instead!") KOKKOS_FUNCTION Impl::enable_if_atomic_t<T, bool> atomic_compare_exchange_strong(T* ptr, Impl::not_deduced_atomic_t<T> expected, Impl::not_deduced_atomic_t<T> desired) { return expected == atomic_compare_exchange(ptr, expected, desired); }
+#endif
+
+// clang-format on
+} // namespace Kokkos
+
+namespace Kokkos::Impl {
+
+template <class T, class MemOrderSuccess, class MemOrderFailure>
+KOKKOS_FUNCTION bool atomic_compare_exchange_strong(T* const dest, T& expected,
+ const T desired,
+ MemOrderSuccess succ,
+ MemOrderFailure fail) {
+ return desul::atomic_compare_exchange_strong(dest, expected, desired, succ,
+ fail, KOKKOS_DESUL_MEM_SCOPE);
+}
+
+template <class T, class MemoryOrder>
+KOKKOS_FUNCTION T atomic_load(const T* const src, MemoryOrder order) {
+ return desul::atomic_load(src, order, KOKKOS_DESUL_MEM_SCOPE);
+}
+
+template <class T, class MemoryOrder>
+KOKKOS_FUNCTION void atomic_store(T* const src, const T val,
+ MemoryOrder order) {
+ return desul::atomic_store(src, val, order, KOKKOS_DESUL_MEM_SCOPE);
+}
+
+} // namespace Kokkos::Impl
+
+#undef KOKKOS_DESUL_MEM_SCOPE
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_BIT_MANIPULATION_HPP
+#define KOKKOS_BIT_MANIPULATION_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_NumericTraits.hpp>
+#include <climits> // CHAR_BIT
+#include <cstring> //memcpy
+#include <type_traits>
+
+namespace Kokkos::Impl {
+
+template <class T>
+KOKKOS_FUNCTION constexpr T byteswap_fallback(T x) {
+ if constexpr (sizeof(T) > 1) {
+ using U = std::make_unsigned_t<T>;
+
+ size_t shift = CHAR_BIT * (sizeof(T) - 1);
+
+ U lo_mask = static_cast<unsigned char>(~0);
+ U hi_mask = lo_mask << shift;
+
+ U val = x;
+
+ for (size_t i = 0; i < sizeof(T) / 2; ++i) {
+ U lo_val = val & lo_mask;
+ U hi_val = val & hi_mask;
+
+ val = (val & ~lo_mask) | (hi_val >> shift);
+ val = (val & ~hi_mask) | (lo_val << shift);
+
+ lo_mask <<= CHAR_BIT;
+ hi_mask >>= CHAR_BIT;
+
+ shift -= 2 * CHAR_BIT;
+ }
+ return val;
+ }
+ // sizeof(T) == 1
+ return x;
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr int countl_zero_fallback(T x) {
+ // From Hacker's Delight (2nd edition) section 5-3
+ unsigned int y = 0;
+ using ::Kokkos::Experimental::digits_v;
+ int n = digits_v<T>;
+ int c = digits_v<T> / 2;
+ do {
+ y = x >> c;
+ if (y != 0) {
+ n -= c;
+ x = y;
+ }
+ c >>= 1;
+ } while (c != 0);
+ return n - static_cast<int>(x);
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr int countr_zero_fallback(T x) {
+ using ::Kokkos::Experimental::digits_v;
+ return digits_v<T> - countl_zero_fallback(static_cast<T>(
+ static_cast<T>(~x) & static_cast<T>(x - 1)));
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr int popcount_fallback(T x) {
+ int c = 0;
+ for (; x != 0; x &= x - 1) {
+ ++c;
+ }
+ return c;
+}
+
+template <class T>
+inline constexpr bool is_standard_unsigned_integer_type_v =
+ std::is_same_v<T, unsigned char> || std::is_same_v<T, unsigned short> ||
+ std::is_same_v<T, unsigned int> || std::is_same_v<T, unsigned long> ||
+ std::is_same_v<T, unsigned long long>;
+
+} // namespace Kokkos::Impl
+
+namespace Kokkos {
+
+//<editor-fold desc="[bit.cast], bit_cast">
+#if defined(KOKKOS_ENABLE_SYCL) && defined(__INTEL_LLVM_COMPILER) && \
+ __INTEL_LLVM_COMPILER < 20240000
+using sycl::detail::bit_cast;
+#else
+template <class To, class From>
+KOKKOS_FUNCTION std::enable_if_t<sizeof(To) == sizeof(From) &&
+ std::is_trivially_copyable_v<To> &&
+ std::is_trivially_copyable_v<From>,
+ To>
+bit_cast(From const& from) noexcept {
+#if defined(KOKKOS_ENABLE_SYCL) && defined(__INTEL_LLVM_COMPILER) && \
+ __INTEL_LLVM_COMPILER >= 20240000
+ return sycl::bit_cast<To>(from);
+#else
+ To to;
+ memcpy(static_cast<void*>(&to), static_cast<const void*>(&from), sizeof(To));
+ return to;
+#endif
+}
+#endif
+//</editor-fold>
+
+//<editor-fold desc="[bit.byteswap], byteswap">
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<std::is_integral_v<T>, T> byteswap(
+ T value) noexcept {
+ return Impl::byteswap_fallback(value);
+}
+//</editor-fold>
+
+//<editor-fold desc="[bit.count], counting">
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, int>
+countl_zero(T x) noexcept {
+ using ::Kokkos::Experimental::digits_v;
+ if (x == 0) return digits_v<T>;
+ // TODO use compiler intrinsics when available
+ return Impl::countl_zero_fallback(x);
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, int>
+countl_one(T x) noexcept {
+ using ::Kokkos::Experimental::digits_v;
+ using ::Kokkos::Experimental::finite_max_v;
+ if (x == finite_max_v<T>) return digits_v<T>;
+ return countl_zero(static_cast<T>(~x));
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, int>
+countr_zero(T x) noexcept {
+ using ::Kokkos::Experimental::digits_v;
+ if (x == 0) return digits_v<T>;
+ // TODO use compiler intrinsics when available
+ return Impl::countr_zero_fallback(x);
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, int>
+countr_one(T x) noexcept {
+ using ::Kokkos::Experimental::digits_v;
+ using ::Kokkos::Experimental::finite_max_v;
+ if (x == finite_max_v<T>) return digits_v<T>;
+ return countr_zero(static_cast<T>(~x));
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, int>
+popcount(T x) noexcept {
+ if (x == 0) return 0;
+ // TODO use compiler intrinsics when available
+ return Impl::popcount_fallback(x);
+}
+//</editor-fold>
+
+//<editor-fold desc="[bit.pow.two], integral powers of 2">
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, bool>
+has_single_bit(T x) noexcept {
+ return x != 0 && (((x & (x - 1)) == 0));
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, T>
+bit_ceil(T x) noexcept {
+ if (x <= 1) return 1;
+ using ::Kokkos::Experimental::digits_v;
+ return T{1} << (digits_v<T> - countl_zero(static_cast<T>(x - 1)));
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, T>
+bit_floor(T x) noexcept {
+ if (x == 0) return 0;
+ using ::Kokkos::Experimental::digits_v;
+ return T{1} << (digits_v<T> - 1 - countl_zero(x));
+}
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, T>
+bit_width(T x) noexcept {
+ if (x == 0) return 0;
+ using ::Kokkos::Experimental::digits_v;
+ return digits_v<T> - countl_zero(x);
+}
+//</editor-fold>
+
+//<editor-fold desc="[bit.rotate], rotating">
+template <class T>
+[[nodiscard]] KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, T>
+rotl(T x, int s) noexcept {
+ using Experimental::digits_v;
+ constexpr auto dig = digits_v<T>;
+ int const rem = s % dig;
+ if (rem == 0) return x;
+ if (rem > 0) return (x << rem) | (x >> ((dig - rem) % dig));
+ return (x >> -rem) | (x << ((dig + rem) % dig)); // rotr(x, -rem)
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FUNCTION constexpr std::enable_if_t<
+ Impl::is_standard_unsigned_integer_type_v<T>, T>
+rotr(T x, int s) noexcept {
+ using Experimental::digits_v;
+ constexpr auto dig = digits_v<T>;
+ int const rem = s % dig;
+ if (rem == 0) return x;
+ if (rem > 0) return (x >> rem) | (x << ((dig - rem) % dig));
+ return (x << -rem) | (x >> ((dig + rem) % dig)); // rotl(x, -rem)
+}
+//</editor-fold>
+
+} // namespace Kokkos
+
+namespace Kokkos::Impl {
+
+#if defined(KOKKOS_COMPILER_CLANG) || defined(KOKKOS_COMPILER_INTEL_LLVM) || \
+ defined(KOKKOS_COMPILER_GNU)
+#define KOKKOS_IMPL_USE_GCC_BUILT_IN_FUNCTIONS
+#endif
+
+template <class T>
+KOKKOS_IMPL_DEVICE_FUNCTION T byteswap_builtin_device(T x) noexcept {
+ return byteswap_fallback(x);
+}
+
+template <class T>
+KOKKOS_IMPL_HOST_FUNCTION T byteswap_builtin_host(T x) noexcept {
+#ifdef KOKKOS_IMPL_USE_GCC_BUILT_IN_FUNCTIONS
+ if constexpr (sizeof(T) == 1) {
+ return x;
+ } else if constexpr (sizeof(T) == 2) {
+ return __builtin_bswap16(x);
+ } else if constexpr (sizeof(T) == 4) {
+ return __builtin_bswap32(x);
+ } else if constexpr (sizeof(T) == 8) {
+ return __builtin_bswap64(x);
+ } else if constexpr (sizeof(T) == 16) {
+#if defined(__has_builtin)
+#if __has_builtin(__builtin_bswap128)
+ return __builtin_bswap128(x);
+#endif
+#endif
+ return (__builtin_bswap64(x >> 64) |
+ (static_cast<T>(__builtin_bswap64(x)) << 64));
+ }
+#endif
+
+ return byteswap_fallback(x);
+}
+
+template <class T>
+KOKKOS_IMPL_DEVICE_FUNCTION
+ std::enable_if_t<is_standard_unsigned_integer_type_v<T>, int>
+ countl_zero_builtin_device(T x) noexcept {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+ if constexpr (sizeof(T) == sizeof(long long int))
+ return __clzll(reinterpret_cast<long long int&>(x));
+ if constexpr (sizeof(T) == sizeof(int))
+ return __clz(reinterpret_cast<int&>(x));
+ using ::Kokkos::Experimental::digits_v;
+ constexpr int shift = digits_v<unsigned int> - digits_v<T>;
+ return __clz(x) - shift;
+#elif defined(KOKKOS_ENABLE_SYCL)
+ return sycl::clz(x);
+#else
+ return countl_zero_fallback(x);
+#endif
+}
+
+template <class T>
+KOKKOS_IMPL_HOST_FUNCTION
+ std::enable_if_t<is_standard_unsigned_integer_type_v<T>, int>
+ countl_zero_builtin_host(T x) noexcept {
+ using ::Kokkos::Experimental::digits_v;
+ if (x == 0) return digits_v<T>;
+#ifdef KOKKOS_IMPL_USE_GCC_BUILT_IN_FUNCTIONS
+ if constexpr (std::is_same_v<T, unsigned long long>) {
+ return __builtin_clzll(x);
+ } else if constexpr (std::is_same_v<T, unsigned long>) {
+ return __builtin_clzl(x);
+ } else if constexpr (std::is_same_v<T, unsigned int>) {
+ return __builtin_clz(x);
+ } else {
+ constexpr int shift = digits_v<unsigned int> - digits_v<T>;
+ return __builtin_clz(x) - shift;
+ }
+#else
+ return countl_zero_fallback(x);
+#endif
+}
+
+template <class T>
+KOKKOS_IMPL_DEVICE_FUNCTION
+ std::enable_if_t<is_standard_unsigned_integer_type_v<T>, int>
+ countr_zero_builtin_device(T x) noexcept {
+ using ::Kokkos::Experimental::digits_v;
+ if (x == 0) return digits_v<T>;
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+ if constexpr (sizeof(T) == sizeof(long long int))
+ return __ffsll(reinterpret_cast<long long int&>(x)) - 1;
+ return __ffs(reinterpret_cast<int&>(x)) - 1;
+#elif defined(KOKKOS_ENABLE_SYCL)
+ return sycl::ctz(x);
+#else
+ return countr_zero_fallback(x);
+#endif
+}
+
+template <class T>
+KOKKOS_IMPL_HOST_FUNCTION
+ std::enable_if_t<is_standard_unsigned_integer_type_v<T>, int>
+ countr_zero_builtin_host(T x) noexcept {
+ using ::Kokkos::Experimental::digits_v;
+ if (x == 0) return digits_v<T>;
+#ifdef KOKKOS_IMPL_USE_GCC_BUILT_IN_FUNCTIONS
+ if constexpr (std::is_same_v<T, unsigned long long>) {
+ return __builtin_ctzll(x);
+ } else if constexpr (std::is_same_v<T, unsigned long>) {
+ return __builtin_ctzl(x);
+ } else {
+ return __builtin_ctz(x);
+ }
+#else
+ return countr_zero_fallback(x);
+#endif
+}
+
+template <class T>
+KOKKOS_IMPL_DEVICE_FUNCTION
+ std::enable_if_t<is_standard_unsigned_integer_type_v<T>, int>
+ popcount_builtin_device(T x) noexcept {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+ if constexpr (sizeof(T) == sizeof(long long int)) return __popcll(x);
+ return __popc(x);
+#elif defined(KOKKOS_ENABLE_SYCL)
+ return sycl::popcount(x);
+#else
+ return popcount_fallback(x);
+#endif
+}
+
+template <class T>
+KOKKOS_IMPL_HOST_FUNCTION
+ std::enable_if_t<is_standard_unsigned_integer_type_v<T>, int>
+ popcount_builtin_host(T x) noexcept {
+#ifdef KOKKOS_IMPL_USE_GCC_BUILT_IN_FUNCTIONS
+ if constexpr (std::is_same_v<T, unsigned long long>) {
+ return __builtin_popcountll(x);
+ } else if constexpr (std::is_same_v<T, unsigned long>) {
+ return __builtin_popcountl(x);
+ } else {
+ return __builtin_popcount(x);
+ }
+#else
+ return popcount_fallback(x);
+#endif
+}
+
+#undef KOKKOS_IMPL_USE_GCC_BUILT_IN_FUNCTIONS
+
+} // namespace Kokkos::Impl
+
+namespace Kokkos::Experimental {
+
+template <class To, class From>
+KOKKOS_FUNCTION std::enable_if_t<sizeof(To) == sizeof(From) &&
+ std::is_trivially_copyable_v<To> &&
+ std::is_trivially_copyable_v<From>,
+ To>
+bit_cast_builtin(From const& from) noexcept {
+ // qualify the call to avoid ADL
+ return Kokkos::bit_cast<To>(from); // no benefit to call the _builtin variant
+}
+
+template <class T>
+KOKKOS_FUNCTION std::enable_if_t<std::is_integral_v<T>, T> byteswap_builtin(
+ T x) noexcept {
+ KOKKOS_IF_ON_DEVICE((return ::Kokkos::Impl::byteswap_builtin_device(x);))
+ KOKKOS_IF_ON_HOST((return ::Kokkos::Impl::byteswap_builtin_host(x);))
+}
+
+template <class T>
+KOKKOS_FUNCTION std::enable_if_t<
+ ::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, int>
+countl_zero_builtin(T x) noexcept {
+ KOKKOS_IF_ON_DEVICE((return ::Kokkos::Impl::countl_zero_builtin_device(x);))
+ KOKKOS_IF_ON_HOST((return ::Kokkos::Impl::countl_zero_builtin_host(x);))
+}
+
+template <class T>
+KOKKOS_FUNCTION std::enable_if_t<
+ ::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, int>
+countl_one_builtin(T x) noexcept {
+ if (x == finite_max_v<T>) return digits_v<T>;
+ return countl_zero_builtin(static_cast<T>(~x));
+}
+
+template <class T>
+KOKKOS_FUNCTION std::enable_if_t<
+ ::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, int>
+countr_zero_builtin(T x) noexcept {
+ KOKKOS_IF_ON_DEVICE((return ::Kokkos::Impl::countr_zero_builtin_device(x);))
+ KOKKOS_IF_ON_HOST((return ::Kokkos::Impl::countr_zero_builtin_host(x);))
+}
+
+template <class T>
+KOKKOS_FUNCTION std::enable_if_t<
+ ::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, int>
+countr_one_builtin(T x) noexcept {
+ if (x == finite_max_v<T>) return digits_v<T>;
+ return countr_zero_builtin(static_cast<T>(~x));
+}
+
+template <class T>
+KOKKOS_FUNCTION std::enable_if_t<
+ ::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, int>
+popcount_builtin(T x) noexcept {
+ KOKKOS_IF_ON_DEVICE((return ::Kokkos::Impl::popcount_builtin_device(x);))
+ KOKKOS_IF_ON_HOST((return ::Kokkos::Impl::popcount_builtin_host(x);))
+}
+
+template <class T>
+KOKKOS_FUNCTION std::enable_if_t<
+ ::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, bool>
+has_single_bit_builtin(T x) noexcept {
+ return has_single_bit(x); // no benefit to call the _builtin variant
+}
+
+template <class T>
+KOKKOS_FUNCTION
+ std::enable_if_t<::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, T>
+ bit_ceil_builtin(T x) noexcept {
+ if (x <= 1) return 1;
+ return T{1} << (digits_v<T> - countl_zero_builtin(static_cast<T>(x - 1)));
+}
+
+template <class T>
+KOKKOS_FUNCTION
+ std::enable_if_t<::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, T>
+ bit_floor_builtin(T x) noexcept {
+ if (x == 0) return 0;
+ return T{1} << (digits_v<T> - 1 - countl_zero_builtin(x));
+}
+
+template <class T>
+KOKKOS_FUNCTION
+ std::enable_if_t<::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, T>
+ bit_width_builtin(T x) noexcept {
+ if (x == 0) return 0;
+ return digits_v<T> - countl_zero_builtin(x);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FUNCTION
+ std::enable_if_t<::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, T>
+ rotl_builtin(T x, int s) noexcept {
+ return rotl(x, s); // no benefit to call the _builtin variant
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FUNCTION
+ std::enable_if_t<::Kokkos::Impl::is_standard_unsigned_integer_type_v<T>, T>
+ rotr_builtin(T x, int s) noexcept {
+ return rotr(x, s); // no benefit to call the _builtin variant
+}
+
+} // namespace Kokkos::Experimental
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CLAMP_HPP
+#define KOKKOS_CLAMP_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+
+template <class T>
+constexpr KOKKOS_INLINE_FUNCTION const T& clamp(const T& value, const T& lo,
+ const T& hi) {
+ KOKKOS_EXPECTS(!(hi < lo));
+ return (value < lo) ? lo : (hi < value) ? hi : value;
+}
+
+template <class T, class ComparatorType>
+constexpr KOKKOS_INLINE_FUNCTION const T& clamp(const T& value, const T& lo,
+ const T& hi,
+ ComparatorType comp) {
+ KOKKOS_EXPECTS(!comp(hi, lo));
+ return comp(value, lo) ? lo : comp(hi, value) ? hi : value;
+}
+
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_COMPLEX_HPP
#define KOKKOS_COMPLEX_HPP
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Atomic.hpp>
#include <Kokkos_MathematicalFunctions.hpp>
#include <Kokkos_NumericTraits.hpp>
+#include <Kokkos_ReductionIdentity.hpp>
#include <impl/Kokkos_Error.hpp>
#include <complex>
#include <type_traits>
#include <iosfwd>
+#include <tuple>
namespace Kokkos {
alignas(2 * sizeof(RealType))
#endif
complex {
+ static_assert(std::is_floating_point_v<RealType> &&
+ std::is_same_v<RealType, std::remove_cv_t<RealType>>,
+ "Kokkos::complex can only be instantiated for a cv-unqualified "
+ "floating point type");
+
private:
RealType re_{};
RealType im_{};
complex& operator=(const complex&) noexcept = default;
/// \brief Conversion constructor from compatible RType
- template <
- class RType,
- std::enable_if_t<std::is_convertible<RType, RealType>::value, int> = 0>
+ template <class RType,
+ std::enable_if_t<std::is_convertible_v<RType, RealType>, int> = 0>
KOKKOS_INLINE_FUNCTION complex(const complex<RType>& other) noexcept
// Intentionally do the conversions implicitly here so that users don't
// get any warnings about narrowing, etc., that they would expect to get
return *this;
}
- //---------------------------------------------------------------------------
- // TODO: refactor Kokkos reductions to remove dependency on
- // volatile member overloads since they are being deprecated in c++20
- //---------------------------------------------------------------------------
+ template <size_t I, typename RT>
+ friend constexpr const RT& get(const complex<RT>&) noexcept;
+ template <size_t I, typename RT>
+ friend constexpr const RT&& get(const complex<RT>&&) noexcept;
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
//! Copy constructor from volatile.
- template <
- class RType,
- std::enable_if_t<std::is_convertible<RType, RealType>::value, int> = 0>
- KOKKOS_INLINE_FUNCTION complex(const volatile complex<RType>& src) noexcept
+ template <class RType,
+ std::enable_if_t<std::is_convertible_v<RType, RealType>, int> = 0>
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION
+ complex(const volatile complex<RType>& src) noexcept
// Intentionally do the conversions implicitly here so that users don't
// get any warnings about narrowing, etc., that they would expect to get
// otherwise.
// vl = r;
// vl = cr;
template <class Complex,
- std::enable_if_t<std::is_same<Complex, complex>::value, int> = 0>
- KOKKOS_INLINE_FUNCTION void operator=(const Complex& src) volatile noexcept {
+ std::enable_if_t<std::is_same_v<Complex, complex>, int> = 0>
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION void operator=(
+ const Complex& src) volatile noexcept {
re_ = src.re_;
im_ = src.im_;
// We deliberately do not return anything here. See explanation
// vl = vr;
// vl = cvr;
template <class Complex,
- std::enable_if_t<std::is_same<Complex, complex>::value, int> = 0>
- KOKKOS_INLINE_FUNCTION volatile complex& operator=(
+ std::enable_if_t<std::is_same_v<Complex, complex>, int> = 0>
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION volatile complex& operator=(
const volatile Complex& src) volatile noexcept {
re_ = src.re_;
im_ = src.im_;
// l = cvr;
//
template <class Complex,
- std::enable_if_t<std::is_same<Complex, complex>::value, int> = 0>
- KOKKOS_INLINE_FUNCTION complex& operator=(
+ std::enable_if_t<std::is_same_v<Complex, complex>, int> = 0>
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION complex& operator=(
const volatile Complex& src) noexcept {
re_ = src.re_;
im_ = src.im_;
// RealType RHS versions.
//! Assignment operator (from a volatile real number).
- KOKKOS_INLINE_FUNCTION void operator=(const volatile RealType& val) noexcept {
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION void operator=(
+ const volatile RealType& val) noexcept {
re_ = val;
im_ = RealType(0);
// We deliberately do not return anything here. See explanation
}
//! Assignment operator volatile LHS and non-volatile RHS
- KOKKOS_INLINE_FUNCTION complex& operator=(
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION complex& operator=(
const RealType& val) volatile noexcept {
re_ = val;
im_ = RealType(0);
//! Assignment operator volatile LHS and volatile RHS
// TODO Should this return void like the other volatile assignment operators?
- KOKKOS_INLINE_FUNCTION complex& operator=(
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION complex& operator=(
const volatile RealType& val) volatile noexcept {
re_ = val;
im_ = RealType(0);
}
//! The imaginary part of this complex number (volatile overload).
- KOKKOS_INLINE_FUNCTION
- volatile RealType& imag() volatile noexcept { return im_; }
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION volatile RealType&
+ imag() volatile noexcept {
+ return im_;
+ }
//! The real part of this complex number (volatile overload).
- KOKKOS_INLINE_FUNCTION
- volatile RealType& real() volatile noexcept { return re_; }
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION volatile RealType&
+ real() volatile noexcept {
+ return re_;
+ }
//! The imaginary part of this complex number (volatile overload).
- KOKKOS_INLINE_FUNCTION
- RealType imag() const volatile noexcept { return im_; }
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION RealType imag() const
+ volatile noexcept {
+ return im_;
+ }
//! The real part of this complex number (volatile overload).
- KOKKOS_INLINE_FUNCTION
- RealType real() const volatile noexcept { return re_; }
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION RealType real() const
+ volatile noexcept {
+ return re_;
+ }
- KOKKOS_INLINE_FUNCTION void operator+=(
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION void operator+=(
const volatile complex<RealType>& src) volatile noexcept {
re_ += src.re_;
im_ += src.im_;
}
- KOKKOS_INLINE_FUNCTION void operator+=(
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION void operator+=(
const volatile RealType& src) volatile noexcept {
re_ += src;
}
- KOKKOS_INLINE_FUNCTION void operator*=(
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION void operator*=(
const volatile complex<RealType>& src) volatile noexcept {
const RealType realPart = re_ * src.re_ - im_ * src.im_;
const RealType imagPart = re_ * src.im_ + im_ * src.re_;
im_ = imagPart;
}
- KOKKOS_INLINE_FUNCTION void operator*=(
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION void operator*=(
const volatile RealType& src) volatile noexcept {
re_ *= src;
im_ *= src;
}
+#endif // KOKKOS_ENABLE_DEPRECATED_CODE_4
+};
- // TODO DSH 2019-10-7 why are there no volatile /= and friends?
+} // namespace Kokkos
+
+// Tuple protocol for complex based on https://wg21.link/P2819R2 (voted into
+// the C++26 working draft on 2023-11)
+
+template <typename RealType>
+struct std::tuple_size<Kokkos::complex<RealType>>
+ : std::integral_constant<size_t, 2> {};
+
+template <size_t I, typename RealType>
+struct std::tuple_element<I, Kokkos::complex<RealType>> {
+ static_assert(I < 2);
+ using type = RealType;
};
+namespace Kokkos {
+
+// get<...>(...) defined here so as not to be hidden friends, as per P2819R2
+
+template <size_t I, typename RealType>
+KOKKOS_FUNCTION constexpr RealType& get(complex<RealType>& z) noexcept {
+ static_assert(I < 2);
+ if constexpr (I == 0)
+ return z.real();
+ else
+ return z.imag();
+#ifdef KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
+}
+
+template <size_t I, typename RealType>
+KOKKOS_FUNCTION constexpr RealType&& get(complex<RealType>&& z) noexcept {
+ static_assert(I < 2);
+ if constexpr (I == 0)
+ return std::move(z.real());
+ else
+ return std::move(z.imag());
+#ifdef KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
+}
+
+template <size_t I, typename RealType>
+KOKKOS_FUNCTION constexpr const RealType& get(
+ const complex<RealType>& z) noexcept {
+ static_assert(I < 2);
+ if constexpr (I == 0)
+ return z.re_;
+ else
+ return z.im_;
+#ifdef KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
+}
+
+template <size_t I, typename RealType>
+KOKKOS_FUNCTION constexpr const RealType&& get(
+ const complex<RealType>&& z) noexcept {
+ static_assert(I < 2);
+ if constexpr (I == 0)
+ return std::move(z.re_);
+ else
+ return std::move(z.im_);
+#ifdef KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
+}
+
//==============================================================================
// <editor-fold desc="Equality and inequality"> {{{1
template <
class RealType1, class RealType2,
// Constraints to avoid participation in oparator==() for every possible RHS
- std::enable_if_t<std::is_convertible<RealType2, RealType1>::value, int> = 0>
+ std::enable_if_t<std::is_convertible_v<RealType2, RealType1>, int> = 0>
KOKKOS_INLINE_FUNCTION bool operator==(complex<RealType1> const& x,
RealType2 const& y) noexcept {
using common_type = std::common_type_t<RealType1, RealType2>;
template <
class RealType1, class RealType2,
// Constraints to avoid participation in oparator==() for every possible RHS
- std::enable_if_t<std::is_convertible<RealType1, RealType2>::value, int> = 0>
+ std::enable_if_t<std::is_convertible_v<RealType1, RealType2>, int> = 0>
KOKKOS_INLINE_FUNCTION bool operator==(RealType1 const& x,
complex<RealType2> const& y) noexcept {
using common_type = std::common_type_t<RealType1, RealType2>;
template <
class RealType1, class RealType2,
// Constraints to avoid participation in oparator==() for every possible RHS
- std::enable_if_t<std::is_convertible<RealType2, RealType1>::value, int> = 0>
+ std::enable_if_t<std::is_convertible_v<RealType2, RealType1>, int> = 0>
KOKKOS_INLINE_FUNCTION bool operator!=(complex<RealType1> const& x,
RealType2 const& y) noexcept {
using common_type = std::common_type_t<RealType1, RealType2>;
template <
class RealType1, class RealType2,
// Constraints to avoid participation in oparator==() for every possible RHS
- std::enable_if_t<std::is_convertible<RealType1, RealType2>::value, int> = 0>
+ std::enable_if_t<std::is_convertible_v<RealType1, RealType2>, int> = 0>
KOKKOS_INLINE_FUNCTION bool operator!=(RealType1 const& x,
complex<RealType2> const& y) noexcept {
using common_type = std::common_type_t<RealType1, RealType2>;
return x == T() ? T() : exp(y * log(x));
}
-template <class T, class U,
- class = std::enable_if_t<std::is_arithmetic<T>::value>>
+template <class T, class U, class = std::enable_if_t<std::is_arithmetic_v<T>>>
KOKKOS_INLINE_FUNCTION complex<Impl::promote_2_t<T, U>> pow(
const T& x, const complex<U>& y) {
using type = Impl::promote_2_t<T, U>;
return pow(type(x), complex<type>(y));
}
-template <class T, class U,
- class = std::enable_if_t<std::is_arithmetic<U>::value>>
+template <class T, class U, class = std::enable_if_t<std::is_arithmetic_v<U>>>
KOKKOS_INLINE_FUNCTION complex<Impl::promote_2_t<T, U>> pow(const complex<T>& x,
const U& y) {
using type = Impl::promote_2_t<T, U>;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_CORE_CONCEPTS_HPP
#define KOKKOS_CORE_CONCEPTS_HPP
// Schedule Wrapper Type
template <class T>
struct Schedule {
- static_assert(std::is_same<T, Static>::value ||
- std::is_same<T, Dynamic>::value,
+ static_assert(std::is_same_v<T, Static> || std::is_same_v<T, Dynamic>,
"Kokkos: Invalid Schedule<> type.");
using schedule_type = Schedule;
using type = T;
// Specify Iteration Index Type
template <typename T>
struct IndexType {
- static_assert(std::is_integral<T>::value, "Kokkos: Invalid IndexType<>.");
+ static_assert(std::is_integral_v<T>, "Kokkos: Invalid IndexType<>.");
using index_type = IndexType;
using type = T;
};
ImplWorkItemProperty<4>();
constexpr static const ImplWorkItemProperty<8> HintIrregular =
ImplWorkItemProperty<8>();
- using None_t = ImplWorkItemProperty<0>;
- using HintLightWeight_t = ImplWorkItemProperty<1>;
- using HintHeavyWeight_t = ImplWorkItemProperty<2>;
- using HintRegular_t = ImplWorkItemProperty<4>;
- using HintIrregular_t = ImplWorkItemProperty<8>;
+ constexpr static const ImplWorkItemProperty<16> ImplForceGlobalLaunch =
+ ImplWorkItemProperty<16>();
+ using None_t = ImplWorkItemProperty<0>;
+ using HintLightWeight_t = ImplWorkItemProperty<1>;
+ using HintHeavyWeight_t = ImplWorkItemProperty<2>;
+ using HintRegular_t = ImplWorkItemProperty<4>;
+ using HintIrregular_t = ImplWorkItemProperty<8>;
+ using ImplForceGlobalLaunch_t = ImplWorkItemProperty<16>;
};
template <unsigned long pv1, unsigned long pv2>
struct LaunchBounds {
using launch_bounds = LaunchBounds;
using type = LaunchBounds<maxT, minB>;
- static unsigned int constexpr maxTperB{maxT};
- static unsigned int constexpr minBperSM{minB};
+ static constexpr unsigned int maxTperB{maxT};
+ static constexpr unsigned int minBperSM{minB};
};
} // namespace Kokkos
\
public: \
static constexpr bool value = \
- std::is_base_of<detected_t<have_t, T>, T>::value || \
- std::is_base_of<detected_t<have_type_t, T>, T>::value; \
+ std::is_base_of_v<detected_t<have_t, T>, T> || \
+ std::is_base_of_v<detected_t<have_type_t, T>, T>; \
constexpr operator bool() const noexcept { return value; } \
- };
+ }; \
+ template <typename T> \
+ inline constexpr bool is_##CONCEPT##_v = is_##CONCEPT<T>::value;
// Public concept:
KOKKOS_IMPL_IS_CONCEPT(execution_policy)
KOKKOS_IMPL_IS_CONCEPT(array_layout)
KOKKOS_IMPL_IS_CONCEPT(reducer)
+KOKKOS_IMPL_IS_CONCEPT(team_handle)
namespace Experimental {
KOKKOS_IMPL_IS_CONCEPT(work_item_property)
KOKKOS_IMPL_IS_CONCEPT(hooks_policy)
namespace Impl {
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-// For backward compatibility:
-
-template <typename T>
-using is_array_layout KOKKOS_DEPRECATED = Kokkos::is_array_layout<T>;
-template <typename T>
-using is_execution_policy KOKKOS_DEPRECATED = Kokkos::is_execution_policy<T>;
-template <typename T>
-using is_execution_space KOKKOS_DEPRECATED = Kokkos::is_execution_space<T>;
-template <typename T>
-using is_memory_space KOKKOS_DEPRECATED = Kokkos::is_memory_space<T>;
-template <typename T>
-using is_memory_traits KOKKOS_DEPRECATED = Kokkos::is_memory_traits<T>;
-#endif
-
// Implementation concept:
KOKKOS_IMPL_IS_CONCEPT(thread_team_member)
template <typename T>
using is_device = typename Impl::is_device_helper<std::remove_cv_t<T>>::type;
+template <typename T>
+inline constexpr bool is_device_v = is_device<T>::value;
+
//----------------------------------------------------------------------------
template <typename T>
using execution_space = typename is_exe::space;
using memory_space = typename is_mem::space;
-
- // For backward compatibility, deprecated in favor of
- // Kokkos::Impl::HostMirror<S>::host_mirror_space
-
- private:
- // The actual definitions for host_memory_space and host_execution_spaces are
- // in do_not_use_host_memory_space and do_not_use_host_execution_space to be
- // able to use them within this class without deprecation warnings.
- using do_not_use_host_memory_space = std::conditional_t<
- std::is_same<memory_space, Kokkos::HostSpace>::value
-#if defined(KOKKOS_ENABLE_CUDA)
- || std::is_same<memory_space, Kokkos::CudaUVMSpace>::value ||
- std::is_same<memory_space, Kokkos::CudaHostPinnedSpace>::value
-#elif defined(KOKKOS_ENABLE_HIP)
- || std::is_same<memory_space,
- Kokkos::Experimental::HIPHostPinnedSpace>::value ||
- std::is_same<memory_space,
- Kokkos::Experimental::HIPManagedSpace>::value
-#elif defined(KOKKOS_ENABLE_SYCL)
- || std::is_same<memory_space,
- Kokkos::Experimental::SYCLSharedUSMSpace>::value ||
- std::is_same<memory_space,
- Kokkos::Experimental::SYCLHostUSMSpace>::value
-#endif
- ,
- memory_space, Kokkos::HostSpace>;
-
- using do_not_use_host_execution_space = std::conditional_t<
-#if defined(KOKKOS_ENABLE_CUDA)
- std::is_same<execution_space, Kokkos::Cuda>::value ||
-#elif defined(KOKKOS_ENABLE_HIP)
- std::is_same<execution_space, Kokkos::Experimental::HIP>::value ||
-#elif defined(KOKKOS_ENABLE_SYCL)
- std::is_same<execution_space, Kokkos::Experimental::SYCL>::value ||
-#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
- std::is_same<execution_space,
- Kokkos::Experimental::OpenMPTarget>::value ||
-#endif
- false,
- Kokkos::DefaultHostExecutionSpace, execution_space>;
-
- public:
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- using host_memory_space KOKKOS_DEPRECATED = do_not_use_host_memory_space;
- using host_execution_space KOKKOS_DEPRECATED =
- do_not_use_host_execution_space;
- using host_mirror_space KOKKOS_DEPRECATED = std::conditional_t<
- std::is_same<execution_space, do_not_use_host_execution_space>::value &&
- std::is_same<memory_space, do_not_use_host_memory_space>::value,
- T,
- Kokkos::Device<do_not_use_host_execution_space,
- do_not_use_host_memory_space>>;
-#endif
};
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-// For backward compatibility
-
-namespace Impl {
-
-template <typename T>
-using is_space KOKKOS_DEPRECATED = Kokkos::is_space<T>;
-
-}
-#endif
-
} // namespace Kokkos
//----------------------------------------------------------------------------
* 2. All execution spaces that can access DstMemorySpace can also access
* SrcMemorySpace.
*/
- enum { assignable = std::is_same<DstMemorySpace, SrcMemorySpace>::value };
+ enum { assignable = std::is_same_v<DstMemorySpace, SrcMemorySpace> };
/**\brief For all DstExecSpace::memory_space == DstMemorySpace
* DstExecSpace can access SrcMemorySpace.
// If same memory space or not accessible use the AccessSpace
// else construct a device with execution space and memory space.
using space = std::conditional_t<
- std::is_same<typename AccessSpace::memory_space, MemorySpace>::value ||
+ std::is_same_v<typename AccessSpace::memory_space, MemorySpace> ||
!exe_access::accessible,
AccessSpace,
Kokkos::Device<typename AccessSpace::execution_space, MemorySpace>>;
} // namespace Kokkos
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-namespace Kokkos {
-namespace Impl {
-
-// For backward compatibility
-template <typename AccessSpace, typename MemorySpace>
-using SpaceAccessibility KOKKOS_DEPRECATED =
- Kokkos::SpaceAccessibility<AccessSpace, MemorySpace>;
-
-} // namespace Impl
-} // namespace Kokkos
-#endif
-
//----------------------------------------------------------------------------
#endif // KOKKOS_CORE_CONCEPTS_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_COPYVIEWS_HPP_
#define KOKKOS_COPYVIEWS_HPP_
#include <string>
+#include <sstream>
#include <Kokkos_Parallel.hpp>
#include <KokkosExp_MDRangePolicy.hpp>
#include <Kokkos_Layout.hpp>
+#include <impl/Kokkos_HostSpace_ZeroMemset.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
const ExecSpace& space)
: a(a_), val(val_) {
+ // MDRangePolicy is not supported for 7D views
+ // Iterate separately over extent(2)
Kokkos::parallel_for("Kokkos::ViewFill-7D",
policy_type(space, {0, 0, 0, 0, 0, 0},
- {a.extent(0), a.extent(1), a.extent(2),
- a.extent(3), a.extent(5), a.extent(6)}),
+ {a.extent(0), a.extent(1), a.extent(3),
+ a.extent(4), a.extent(5), a.extent(6)}),
*this);
}
ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
const ExecSpace& space)
: a(a_), val(val_) {
+ // MDRangePolicy is not supported for 8D views
+ // Iterate separately over extent(2) and extent(4)
Kokkos::parallel_for("Kokkos::ViewFill-8D",
policy_type(space, {0, 0, 0, 0, 0, 0},
{a.extent(0), a.extent(1), a.extent(3),
ViewTypeA a;
ViewTypeB b;
static const Kokkos::Iterate outer_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::outer_iteration_pattern;
static const Kokkos::Iterate inner_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::inner_iteration_pattern;
using iterate_type =
Kokkos::Rank<2, outer_iteration_pattern, inner_iteration_pattern>;
using policy_type =
ViewTypeB b;
static const Kokkos::Iterate outer_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::outer_iteration_pattern;
static const Kokkos::Iterate inner_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::inner_iteration_pattern;
using iterate_type =
Kokkos::Rank<3, outer_iteration_pattern, inner_iteration_pattern>;
using policy_type =
ViewTypeB b;
static const Kokkos::Iterate outer_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::outer_iteration_pattern;
static const Kokkos::Iterate inner_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::inner_iteration_pattern;
using iterate_type =
Kokkos::Rank<4, outer_iteration_pattern, inner_iteration_pattern>;
using policy_type =
ViewTypeB b;
static const Kokkos::Iterate outer_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::outer_iteration_pattern;
static const Kokkos::Iterate inner_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::inner_iteration_pattern;
using iterate_type =
Kokkos::Rank<5, outer_iteration_pattern, inner_iteration_pattern>;
using policy_type =
ViewTypeB b;
static const Kokkos::Iterate outer_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::outer_iteration_pattern;
static const Kokkos::Iterate inner_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::inner_iteration_pattern;
using iterate_type =
Kokkos::Rank<6, outer_iteration_pattern, inner_iteration_pattern>;
using policy_type =
ViewTypeB b;
static const Kokkos::Iterate outer_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::outer_iteration_pattern;
static const Kokkos::Iterate inner_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::inner_iteration_pattern;
using iterate_type =
Kokkos::Rank<6, outer_iteration_pattern, inner_iteration_pattern>;
using policy_type =
ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
const ExecSpace space = ExecSpace())
: a(a_), b(b_) {
+ // MDRangePolicy is not supported for 7D views
+ // Iterate separately over extent(2)
Kokkos::parallel_for("Kokkos::ViewCopy-7D",
policy_type(space, {0, 0, 0, 0, 0, 0},
{a.extent(0), a.extent(1), a.extent(3),
ViewTypeB b;
static const Kokkos::Iterate outer_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::outer_iteration_pattern;
static const Kokkos::Iterate inner_iteration_pattern =
- Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+ Kokkos::Impl::layout_iterate_type_selector<
+ Layout>::inner_iteration_pattern;
using iterate_type =
Kokkos::Rank<6, outer_iteration_pattern, inner_iteration_pattern>;
using policy_type =
ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
const ExecSpace space = ExecSpace())
: a(a_), b(b_) {
+ // MDRangePolicy is not supported for 8D views
+ // Iterate separately over extent(2) and extent(4)
Kokkos::parallel_for("Kokkos::ViewCopy-8D",
policy_type(space, {0, 0, 0, 0, 0, 0},
{a.extent(0), a.extent(1), a.extent(3),
"Kokkos::Impl::view_copy called with invalid execution space");
} else {
// Figure out iteration order in case we need it
- int64_t strides[DstType::Rank + 1];
+ int64_t strides[DstType::rank + 1];
dst.stride(strides);
Kokkos::Iterate iterate;
- if (Kokkos::is_layouttiled<typename DstType::array_layout>::value) {
- iterate = Kokkos::layout_iterate_type_selector<
- typename DstType::array_layout>::outer_iteration_pattern;
- } else if (std::is_same<typename DstType::array_layout,
- Kokkos::LayoutRight>::value) {
+ if (std::is_same_v<typename DstType::array_layout, Kokkos::LayoutRight>) {
iterate = Kokkos::Iterate::Right;
- } else if (std::is_same<typename DstType::array_layout,
- Kokkos::LayoutLeft>::value) {
+ } else if (std::is_same_v<typename DstType::array_layout,
+ Kokkos::LayoutLeft>) {
iterate = Kokkos::Iterate::Left;
- } else if (std::is_same<typename DstType::array_layout,
- Kokkos::LayoutStride>::value) {
- if (strides[0] > strides[DstType::Rank - 1])
+ } else if (std::is_same_v<typename DstType::array_layout,
+ Kokkos::LayoutStride>) {
+ if (strides[0] > strides[DstType::rank - 1])
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
} else {
- if (std::is_same<typename DstType::execution_space::array_layout,
- Kokkos::LayoutRight>::value)
+ if (std::is_same_v<typename DstType::execution_space::array_layout,
+ Kokkos::LayoutRight>)
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutRight, ExecutionSpace, DstType::Rank, int64_t>(
+ Kokkos::LayoutRight, ExecutionSpace, DstType::rank, int64_t>(
dst, src, space);
else
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutLeft, ExecutionSpace, DstType::Rank, int64_t>(
+ Kokkos::LayoutLeft, ExecutionSpace, DstType::rank, int64_t>(
dst, src, space);
} else {
if (iterate == Kokkos::Iterate::Right)
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutRight, ExecutionSpace, DstType::Rank, int>(dst, src,
+ Kokkos::LayoutRight, ExecutionSpace, DstType::rank, int>(dst, src,
space);
else
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutLeft, ExecutionSpace, DstType::Rank, int>(dst, src,
+ Kokkos::LayoutLeft, ExecutionSpace, DstType::rank, int>(dst, src,
space);
}
}
};
if (!DstExecCanAccessSrc && !SrcExecCanAccessDst) {
- std::string message(
- "Error: Kokkos::deep_copy with no available copy mechanism: ");
- message += src.label();
- message += " to ";
- message += dst.label();
- Kokkos::Impl::throw_runtime_exception(message);
+ std::ostringstream ss;
+ ss << "Error: Kokkos::deep_copy with no available copy mechanism: "
+ << "from source view (\"" << src.label() << "\") to destination view (\""
+ << dst.label() << "\").\n"
+ << "There is no common execution space that can access both source's "
+ "space\n"
+ << "(" << src_memory_space().name() << ") and destination's space ("
+ << dst_memory_space().name() << "), "
+ << "so source and destination\n"
+ << "must be contiguous and have the same layout.\n";
+ Kokkos::Impl::throw_runtime_exception(ss.str());
}
// Figure out iteration order in case we need it
- int64_t strides[DstType::Rank + 1];
+ int64_t strides[DstType::rank + 1];
dst.stride(strides);
Kokkos::Iterate iterate;
- if (Kokkos::is_layouttiled<typename DstType::array_layout>::value) {
- iterate = Kokkos::layout_iterate_type_selector<
- typename DstType::array_layout>::outer_iteration_pattern;
- } else if (std::is_same<typename DstType::array_layout,
- Kokkos::LayoutRight>::value) {
+ if (std::is_same_v<typename DstType::array_layout, Kokkos::LayoutRight>) {
iterate = Kokkos::Iterate::Right;
- } else if (std::is_same<typename DstType::array_layout,
- Kokkos::LayoutLeft>::value) {
+ } else if (std::is_same_v<typename DstType::array_layout,
+ Kokkos::LayoutLeft>) {
iterate = Kokkos::Iterate::Left;
- } else if (std::is_same<typename DstType::array_layout,
- Kokkos::LayoutStride>::value) {
- if (strides[0] > strides[DstType::Rank - 1])
+ } else if (std::is_same_v<typename DstType::array_layout,
+ Kokkos::LayoutStride>) {
+ if (strides[0] > strides[DstType::rank - 1])
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
} else {
- if (std::is_same<typename DstType::execution_space::array_layout,
- Kokkos::LayoutRight>::value)
+ if (std::is_same_v<typename DstType::execution_space::array_layout,
+ Kokkos::LayoutRight>)
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutRight, dst_execution_space, DstType::Rank, int64_t>(
+ Kokkos::LayoutRight, dst_execution_space, DstType::rank, int64_t>(
dst, src);
else
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutLeft, dst_execution_space, DstType::Rank, int64_t>(
+ Kokkos::LayoutLeft, dst_execution_space, DstType::rank, int64_t>(
dst, src);
} else {
if (iterate == Kokkos::Iterate::Right)
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutRight, src_execution_space, DstType::Rank, int64_t>(
+ Kokkos::LayoutRight, src_execution_space, DstType::rank, int64_t>(
dst, src);
else
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutLeft, src_execution_space, DstType::Rank, int64_t>(
+ Kokkos::LayoutLeft, src_execution_space, DstType::rank, int64_t>(
dst, src);
}
} else {
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutRight, dst_execution_space, DstType::Rank, int>(dst,
+ Kokkos::LayoutRight, dst_execution_space, DstType::rank, int>(dst,
src);
else
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutLeft, dst_execution_space, DstType::Rank, int>(dst,
+ Kokkos::LayoutLeft, dst_execution_space, DstType::rank, int>(dst,
src);
} else {
if (iterate == Kokkos::Iterate::Right)
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutRight, src_execution_space, DstType::Rank, int>(dst,
+ Kokkos::LayoutRight, src_execution_space, DstType::rank, int>(dst,
src);
else
Kokkos::Impl::ViewCopy<
typename DstType::uniform_runtime_nomemspace_type,
typename SrcType::uniform_runtime_const_nomemspace_type,
- Kokkos::LayoutLeft, src_execution_space, DstType::Rank, int>(dst,
+ Kokkos::LayoutLeft, src_execution_space, DstType::rank, int>(dst,
src);
}
}
template <class DstType, class SrcType,
class ExecSpace = typename DstType::execution_space,
- int Rank = DstType::Rank>
+ int Rank = DstType::rank>
struct ViewRemap;
template <class DstType, class SrcType, class ExecSpace>
} else {
p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 2, Kokkos::Impl::ALL_t, p_type>;
+ CommonSubview<DstType, SrcType, 2, Kokkos::ALL_t, p_type>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1);
view_copy(exec_space..., common_subview.dst_sub,
common_subview.src_sub);
if (dst.extent(1) == src.extent(1)) {
p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 2, p_type, Kokkos::Impl::ALL_t>;
+ CommonSubview<DstType, SrcType, 2, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, ext0, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
common_subview.src_sub);
if (dst.extent(2) == src.extent(2)) {
p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 3, Kokkos::Impl::ALL_t, p_type,
- Kokkos::Impl::ALL_t>;
+ CommonSubview<DstType, SrcType, 3, Kokkos::ALL_t, p_type,
+ Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1,
Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 3, Kokkos::Impl::ALL_t, p_type,
- p_type>;
+ CommonSubview<DstType, SrcType, 3, Kokkos::ALL_t, p_type, p_type>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2);
view_copy(exec_space..., common_subview.dst_sub,
common_subview.src_sub);
if (dst.extent(2) == src.extent(2)) {
p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
- using sv_adapter_type = CommonSubview<DstType, SrcType, 3, p_type,
- p_type, Kokkos::Impl::ALL_t>;
+ using sv_adapter_type =
+ CommonSubview<DstType, SrcType, 3, p_type, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, ext0, ext1, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
common_subview.src_sub);
p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 4, Kokkos::Impl::ALL_t, p_type,
- p_type, Kokkos::Impl::ALL_t>;
+ CommonSubview<DstType, SrcType, 4, Kokkos::ALL_t, p_type, p_type,
+ Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2,
Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 4, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type>;
+ CommonSubview<DstType, SrcType, 4, Kokkos::ALL_t, p_type, p_type,
+ p_type>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3);
view_copy(exec_space..., common_subview.dst_sub,
common_subview.src_sub);
p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
- using sv_adapter_type =
- CommonSubview<DstType, SrcType, 4, p_type, p_type, p_type,
- Kokkos::Impl::ALL_t>;
+ using sv_adapter_type = CommonSubview<DstType, SrcType, 4, p_type,
+ p_type, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
common_subview.src_sub);
p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 5, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, Kokkos::Impl::ALL_t>;
+ CommonSubview<DstType, SrcType, 5, Kokkos::ALL_t, p_type, p_type,
+ p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 5, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, p_type>;
+ CommonSubview<DstType, SrcType, 5, Kokkos::ALL_t, p_type, p_type,
+ p_type, p_type>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
ext4);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
using sv_adapter_type =
CommonSubview<DstType, SrcType, 5, p_type, p_type, p_type, p_type,
- Kokkos::Impl::ALL_t>;
+ Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3,
Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 6, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, p_type, Kokkos::Impl::ALL_t>;
+ CommonSubview<DstType, SrcType, 6, Kokkos::ALL_t, p_type, p_type,
+ p_type, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
ext4, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 6, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, p_type, p_type>;
+ CommonSubview<DstType, SrcType, 6, Kokkos::ALL_t, p_type, p_type,
+ p_type, p_type, p_type>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
ext4, ext5);
view_copy(exec_space..., common_subview.dst_sub,
using sv_adapter_type =
CommonSubview<DstType, SrcType, 6, p_type, p_type, p_type, p_type,
- p_type, Kokkos::Impl::ALL_t>;
+ p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 7, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, p_type, p_type, Kokkos::Impl::ALL_t>;
+ CommonSubview<DstType, SrcType, 7, Kokkos::ALL_t, p_type, p_type,
+ p_type, p_type, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
ext4, ext5, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 7, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, p_type, p_type, p_type>;
+ CommonSubview<DstType, SrcType, 7, Kokkos::ALL_t, p_type, p_type,
+ p_type, p_type, p_type, p_type>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
ext4, ext5, ext6);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
using sv_adapter_type =
CommonSubview<DstType, SrcType, 7, p_type, p_type, p_type, p_type,
- p_type, p_type, Kokkos::Impl::ALL_t>;
+ p_type, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
ext5, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 8, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, p_type, p_type, p_type,
- Kokkos::Impl::ALL_t>;
+ CommonSubview<DstType, SrcType, 8, Kokkos::ALL_t, p_type, p_type,
+ p_type, p_type, p_type, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
ext4, ext5, ext6, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
p_type ext7(0, std::min(dst.extent(7), src.extent(7)));
using sv_adapter_type =
- CommonSubview<DstType, SrcType, 8, Kokkos::Impl::ALL_t, p_type,
- p_type, p_type, p_type, p_type, p_type, p_type>;
+ CommonSubview<DstType, SrcType, 8, Kokkos::ALL_t, p_type, p_type,
+ p_type, p_type, p_type, p_type, p_type>;
sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
ext4, ext5, ext6, ext7);
view_copy(exec_space..., common_subview.dst_sub,
p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
using sv_adapter_type =
CommonSubview<DstType, SrcType, 8, p_type, p_type, p_type, p_type,
- p_type, p_type, p_type, Kokkos::Impl::ALL_t>;
+ p_type, p_type, p_type, Kokkos::ALL_t>;
sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
ext5, ext6, Kokkos::ALL);
view_copy(exec_space..., common_subview.dst_sub,
using ViewTypeFlat = Kokkos::View<
typename ViewType::value_type*, Kokkos::LayoutRight,
Kokkos::Device<typename ViewType::execution_space,
- std::conditional_t<ViewType::Rank == 0,
+ std::conditional_t<ViewType::rank == 0,
typename ViewType::memory_space,
Kokkos::AnonymousSpace>>,
Kokkos::MemoryTraits<0>>;
ViewTypeFlat dst_flat(dst.data(), dst.size());
if (dst.span() < static_cast<size_t>(std::numeric_limits<int>::max())) {
Kokkos::Impl::ViewFill<ViewTypeFlat, Kokkos::LayoutRight, ExecutionSpace,
- ViewTypeFlat::Rank, int>(dst_flat, value,
+ ViewTypeFlat::rank, int>(dst_flat, value,
exec_space);
} else
Kokkos::Impl::ViewFill<ViewTypeFlat, Kokkos::LayoutRight, ExecutionSpace,
- ViewTypeFlat::Rank, int64_t>(dst_flat, value,
+ ViewTypeFlat::rank, int64_t>(dst_flat, value,
exec_space);
}
-template <typename ExecutionSpace, class DT, class... DP>
+// Default implementation for execution spaces that don't provide a definition
+template <typename ExecutionSpace>
struct ZeroMemset {
- ZeroMemset(const ExecutionSpace& exec_space, const View<DT, DP...>& dst,
- typename ViewTraits<DT, DP...>::const_value_type& value) {
- contiguous_fill(exec_space, dst, value);
- }
-
- ZeroMemset(const View<DT, DP...>& dst,
- typename ViewTraits<DT, DP...>::const_value_type& value) {
- contiguous_fill(ExecutionSpace(), dst, value);
+ ZeroMemset(const ExecutionSpace& exec_space, void* dst, size_t cnt) {
+ contiguous_fill(
+ exec_space,
+ Kokkos::View<std::byte*, ExecutionSpace, Kokkos::MemoryUnmanaged>(
+ static_cast<std::byte*>(dst), cnt),
+ std::byte{});
}
};
template <typename ExecutionSpace, class DT, class... DP>
inline std::enable_if_t<
- std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
- std::is_trivially_copy_assignable<
- typename ViewTraits<DT, DP...>::value_type>::value>
+ std::is_trivial_v<typename ViewTraits<DT, DP...>::value_type>>
contiguous_fill_or_memset(
const ExecutionSpace& exec_space, const View<DT, DP...>& dst,
typename ViewTraits<DT, DP...>::const_value_type& value) {
-// On A64FX memset seems to do the wrong thing with regards to first touch
-// leading to the significant performance issues
-#ifndef KOKKOS_ARCH_A64FX
- if (Impl::is_zero_byte(value))
- ZeroMemset<ExecutionSpace, DT, DP...>(exec_space, dst, value);
- else
+ // With OpenMP, using memset has significant performance issues.
+ if (Impl::is_zero_byte(value)
+#ifdef KOKKOS_ENABLE_OPENMP
+ && !std::is_same_v<ExecutionSpace, Kokkos::OpenMP>
#endif
+ )
+ // FIXME intel/19 icpc fails to deduce template parameter here,
+ // resulting in compilation errors; explicitly passing the template
+ // parameter to ZeroMemset helps workaround the issue.
+ // See https://github.com/kokkos/kokkos/issues/7273.
+ ZeroMemset<ExecutionSpace>(
+ exec_space, dst.data(),
+ dst.size() * sizeof(typename ViewTraits<DT, DP...>::value_type));
+ else
contiguous_fill(exec_space, dst, value);
}
template <typename ExecutionSpace, class DT, class... DP>
inline std::enable_if_t<
- !(std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
- std::is_trivially_copy_assignable<
- typename ViewTraits<DT, DP...>::value_type>::value)>
+ !std::is_trivial_v<typename ViewTraits<DT, DP...>::value_type>>
contiguous_fill_or_memset(
const ExecutionSpace& exec_space, const View<DT, DP...>& dst,
typename ViewTraits<DT, DP...>::const_value_type& value) {
template <class DT, class... DP>
inline std::enable_if_t<
- std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
- std::is_trivially_copy_assignable<
- typename ViewTraits<DT, DP...>::value_type>::value>
+ std::is_trivial_v<typename ViewTraits<DT, DP...>::value_type>>
contiguous_fill_or_memset(
const View<DT, DP...>& dst,
typename ViewTraits<DT, DP...>::const_value_type& value) {
using ViewType = View<DT, DP...>;
using exec_space_type = typename ViewType::execution_space;
+ exec_space_type exec;
// On A64FX memset seems to do the wrong thing with regards to first touch
// leading to the significant performance issues
#ifndef KOKKOS_ARCH_A64FX
if (Impl::is_zero_byte(value))
- ZeroMemset<exec_space_type, DT, DP...>(dst, value);
+ // FIXME intel/19 icpc fails to deduce template parameter here,
+ // resulting in compilation errors; explicitly passing the template
+ // parameter to ZeroMemset helps workaround the issue.
+ // See https://github.com/kokkos/kokkos/issues/7273.
+ ZeroMemset<exec_space_type>(
+ exec, dst.data(), dst.size() * sizeof(typename ViewType::value_type));
else
#endif
- contiguous_fill(exec_space_type(), dst, value);
+ contiguous_fill(exec, dst, value);
}
template <class DT, class... DP>
inline std::enable_if_t<
- !(std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
- std::is_trivially_copy_assignable<
- typename ViewTraits<DT, DP...>::value_type>::value)>
+ !std::is_trivial_v<typename ViewTraits<DT, DP...>::value_type>>
contiguous_fill_or_memset(
const View<DT, DP...>& dst,
typename ViewTraits<DT, DP...>::const_value_type& value) {
inline void deep_copy(
const View<DT, DP...>& dst,
typename ViewTraits<DT, DP...>::const_value_type& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
using ViewType = View<DT, DP...>;
using exec_space_type = typename ViewType::execution_space;
}
Kokkos::fence("Kokkos::deep_copy: scalar copy, pre copy fence");
- static_assert(std::is_same<typename ViewType::non_const_value_type,
- typename ViewType::value_type>::value,
+ static_assert(std::is_same_v<typename ViewType::non_const_value_type,
+ typename ViewType::value_type>,
"deep_copy requires non-const type");
// If contiguous we can simply do a 1D flat loop or use memset
}
// Figure out iteration order to do the ViewFill
- int64_t strides[ViewType::Rank + 1];
+ int64_t strides[ViewType::rank + 1];
dst.stride(strides);
Kokkos::Iterate iterate;
- if (std::is_same<typename ViewType::array_layout,
- Kokkos::LayoutRight>::value) {
+ if (std::is_same_v<typename ViewType::array_layout, Kokkos::LayoutRight>) {
iterate = Kokkos::Iterate::Right;
- } else if (std::is_same<typename ViewType::array_layout,
- Kokkos::LayoutLeft>::value) {
+ } else if (std::is_same_v<typename ViewType::array_layout,
+ Kokkos::LayoutLeft>) {
iterate = Kokkos::Iterate::Left;
- } else if (std::is_same<typename ViewType::array_layout,
- Kokkos::LayoutStride>::value) {
- if (strides[0] > strides[ViewType::Rank > 0 ? ViewType::Rank - 1 : 0])
+ } else if (std::is_same_v<typename ViewType::array_layout,
+ Kokkos::LayoutStride>) {
+ if (strides[0] > strides[ViewType::rank > 0 ? ViewType::rank - 1 : 0])
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
} else {
- if (std::is_same<typename ViewType::execution_space::array_layout,
- Kokkos::LayoutRight>::value)
+ if (std::is_same_v<typename ViewType::execution_space::array_layout,
+ Kokkos::LayoutRight>)
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
// Lets call the right ViewFill functor based on integer space needed and
// iteration type
using ViewTypeUniform =
- std::conditional_t<ViewType::Rank == 0,
+ std::conditional_t<ViewType::rank == 0,
typename ViewType::uniform_runtime_type,
typename ViewType::uniform_runtime_nomemspace_type>;
if (dst.span() > static_cast<size_t>(std::numeric_limits<int>::max())) {
if (iterate == Kokkos::Iterate::Right)
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight,
- exec_space_type, ViewType::Rank, int64_t>(
+ exec_space_type, ViewType::rank, int64_t>(
dst, value, exec_space_type());
else
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft,
- exec_space_type, ViewType::Rank, int64_t>(
+ exec_space_type, ViewType::rank, int64_t>(
dst, value, exec_space_type());
} else {
if (iterate == Kokkos::Iterate::Right)
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight,
- exec_space_type, ViewType::Rank, int>(
+ exec_space_type, ViewType::rank, int>(
dst, value, exec_space_type());
else
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft,
- exec_space_type, ViewType::Rank, int>(
+ exec_space_type, ViewType::rank, int>(
dst, value, exec_space_type());
}
Kokkos::fence("Kokkos::deep_copy: scalar copy, post copy fence");
inline void deep_copy(
typename ViewTraits<ST, SP...>::non_const_value_type& dst,
const View<ST, SP...>& src,
- std::enable_if_t<std::is_same<typename ViewTraits<ST, SP...>::specialize,
- void>::value>* = nullptr) {
+ std::enable_if_t<std::is_same_v<typename ViewTraits<ST, SP...>::specialize,
+ void>>* = nullptr) {
using src_traits = ViewTraits<ST, SP...>;
using src_memory_space = typename src_traits::memory_space;
inline void deep_copy(
const View<DT, DP...>& dst, const View<ST, SP...>& src,
std::enable_if_t<
- (std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
- std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+ (std::is_void_v<typename ViewTraits<DT, DP...>::specialize> &&
+ std::is_void_v<typename ViewTraits<ST, SP...>::specialize> &&
(unsigned(ViewTraits<DT, DP...>::rank) == unsigned(0) &&
unsigned(ViewTraits<ST, SP...>::rank) == unsigned(0)))>* = nullptr) {
using dst_type = View<DT, DP...>;
using dst_memory_space = typename dst_type::memory_space;
using src_memory_space = typename src_type::memory_space;
- static_assert(std::is_same<typename dst_type::value_type,
- typename src_type::non_const_value_type>::value,
+ static_assert(std::is_same_v<typename dst_type::value_type,
+ typename src_type::non_const_value_type>,
"deep_copy requires matching non-const destination type");
if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
inline void deep_copy(
const View<DT, DP...>& dst, const View<ST, SP...>& src,
std::enable_if_t<
- (std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
- std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+ (std::is_void_v<typename ViewTraits<DT, DP...>::specialize> &&
+ std::is_void_v<typename ViewTraits<ST, SP...>::specialize> &&
(unsigned(ViewTraits<DT, DP...>::rank) != 0 ||
unsigned(ViewTraits<ST, SP...>::rank) != 0))>* = nullptr) {
using dst_type = View<DT, DP...>;
using dst_value_type = typename dst_type::value_type;
using src_value_type = typename src_type::value_type;
- static_assert(std::is_same<typename dst_type::value_type,
- typename dst_type::non_const_value_type>::value,
+ static_assert(std::is_same_v<typename dst_type::value_type,
+ typename dst_type::non_const_value_type>,
"deep_copy requires non-const destination type");
static_assert((unsigned(dst_type::rank) == unsigned(src_type::rank)),
"match: ");
message += dst.label();
message += "(";
- for (int r = 0; r < dst_type::Rank - 1; r++) {
- message += std::to_string(dst.extent(r));
+ message += std::to_string(dst.extent(0));
+ for (size_t r = 1; r < dst_type::rank; r++) {
message += ",";
+ message += std::to_string(dst.extent(r));
}
- message += std::to_string(dst.extent(dst_type::Rank - 1));
message += ") ";
message += src.label();
message += "(";
- for (int r = 0; r < src_type::Rank - 1; r++) {
- message += std::to_string(src.extent(r));
+ message += std::to_string(src.extent(0));
+ for (size_t r = 1; r < src_type::rank; r++) {
message += ",";
+ message += std::to_string(src.extent(r));
}
- message += std::to_string(src.extent(src_type::Rank - 1));
message += ") ";
Kokkos::Impl::throw_runtime_exception(message);
"Deprecation Error: Kokkos::deep_copy extents of views don't match: ");
message += dst.label();
message += "(";
- for (int r = 0; r < dst_type::Rank - 1; r++) {
- message += std::to_string(dst.extent(r));
+ message += std::to_string(dst.extent(0));
+ for (size_t r = 1; r < dst_type::rank; r++) {
message += ",";
+ message += std::to_string(dst.extent(r));
}
- message += std::to_string(dst.extent(dst_type::Rank - 1));
message += ") ";
message += src.label();
message += "(";
- for (int r = 0; r < src_type::Rank - 1; r++) {
- message += std::to_string(src.extent(r));
+ message += std::to_string(src.extent(0));
+ for (size_t r = 1; r < src_type::rank; r++) {
message += ",";
+ message += std::to_string(src.extent(r));
}
- message += std::to_string(src.extent(src_type::Rank - 1));
message += ") ";
Kokkos::Impl::throw_runtime_exception(message);
// If same type, equal layout, equal dimensions, equal span, and contiguous
// memory then can byte-wise copy
- if (std::is_same<typename dst_type::value_type,
- typename src_type::non_const_value_type>::value &&
- (std::is_same<typename dst_type::array_layout,
- typename src_type::array_layout>::value ||
+ if (std::is_same_v<typename dst_type::value_type,
+ typename src_type::non_const_value_type> &&
+ (std::is_same_v<typename dst_type::array_layout,
+ typename src_type::array_layout> ||
(dst_type::rank == 1 && src_type::rank == 1)) &&
dst.span_is_contiguous() && src.span_is_contiguous() &&
((dst_type::rank < 1) || (dst.stride_0() == src.stride_0())) &&
Kokkos::fence(
"Kokkos::deep_copy: copy between contiguous views, pre view equality "
"check");
- if ((void*)dst.data() != (void*)src.data()) {
+ if ((void*)dst.data() != (void*)src.data() && 0 < nbytes) {
Kokkos::Impl::DeepCopy<dst_memory_space, src_memory_space>(
dst.data(), src.data(), nbytes);
Kokkos::fence(
void KOKKOS_INLINE_FUNCTION local_deep_copy_contiguous(
const TeamType& team, const View<DT, DP...>& dst,
typename ViewTraits<DT, DP...>::const_value_type& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
Kokkos::parallel_for(Kokkos::TeamVectorRange(team, dst.span()),
[&](const int& i) { dst.data()[i] = value; });
}
void KOKKOS_INLINE_FUNCTION local_deep_copy_contiguous(
const View<DT, DP...>& dst,
typename ViewTraits<DT, DP...>::const_value_type& value,
- std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
- void>::value>* = nullptr) {
+ std::enable_if_t<std::is_same_v<typename ViewTraits<DT, DP...>::specialize,
+ void>>* = nullptr) {
for (size_t i = 0; i < dst.span(); ++i) {
dst.data()[i] = value;
}
typename ViewTraits<DT, DP...>::const_value_type& value,
std::enable_if_t<
Kokkos::is_execution_space<ExecSpace>::value &&
- std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+ std::is_void_v<typename ViewTraits<DT, DP...>::specialize> &&
Kokkos::SpaceAccessibility<ExecSpace, typename ViewTraits<DT, DP...>::
memory_space>::accessible>* =
nullptr) {
using dst_traits = ViewTraits<DT, DP...>;
- static_assert(std::is_same<typename dst_traits::non_const_value_type,
- typename dst_traits::value_type>::value,
+ static_assert(std::is_same_v<typename dst_traits::non_const_value_type,
+ typename dst_traits::value_type>,
"deep_copy requires non-const type");
using dst_memory_space = typename dst_traits::memory_space;
if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
} else {
using ViewType = View<DT, DP...>;
// Figure out iteration order to do the ViewFill
- int64_t strides[ViewType::Rank + 1];
+ int64_t strides[ViewType::rank + 1];
dst.stride(strides);
Kokkos::Iterate iterate;
- if (std::is_same<typename ViewType::array_layout,
- Kokkos::LayoutRight>::value) {
+ if (std::is_same_v<typename ViewType::array_layout, Kokkos::LayoutRight>) {
iterate = Kokkos::Iterate::Right;
- } else if (std::is_same<typename ViewType::array_layout,
- Kokkos::LayoutLeft>::value) {
+ } else if (std::is_same_v<typename ViewType::array_layout,
+ Kokkos::LayoutLeft>) {
iterate = Kokkos::Iterate::Left;
- } else if (std::is_same<typename ViewType::array_layout,
- Kokkos::LayoutStride>::value) {
- if (strides[0] > strides[ViewType::Rank > 0 ? ViewType::Rank - 1 : 0])
+ } else if (std::is_same_v<typename ViewType::array_layout,
+ Kokkos::LayoutStride>) {
+ if (strides[0] > strides[ViewType::rank > 0 ? ViewType::rank - 1 : 0])
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
} else {
- if (std::is_same<typename ViewType::execution_space::array_layout,
- Kokkos::LayoutRight>::value)
+ if (std::is_same_v<typename ViewType::execution_space::array_layout,
+ Kokkos::LayoutRight>)
iterate = Kokkos::Iterate::Right;
else
iterate = Kokkos::Iterate::Left;
// Lets call the right ViewFill functor based on integer space needed and
// iteration type
using ViewTypeUniform =
- std::conditional_t<ViewType::Rank == 0,
+ std::conditional_t<ViewType::rank == 0,
typename ViewType::uniform_runtime_type,
typename ViewType::uniform_runtime_nomemspace_type>;
if (dst.span() > static_cast<size_t>(std::numeric_limits<int32_t>::max())) {
if (iterate == Kokkos::Iterate::Right)
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight, ExecSpace,
- ViewType::Rank, int64_t>(dst, value, space);
+ ViewType::rank, int64_t>(dst, value, space);
else
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft, ExecSpace,
- ViewType::Rank, int64_t>(dst, value, space);
+ ViewType::rank, int64_t>(dst, value, space);
} else {
if (iterate == Kokkos::Iterate::Right)
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight, ExecSpace,
- ViewType::Rank, int32_t>(dst, value, space);
+ ViewType::rank, int32_t>(dst, value, space);
else
Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft, ExecSpace,
- ViewType::Rank, int32_t>(dst, value, space);
+ ViewType::rank, int32_t>(dst, value, space);
}
}
if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
typename ViewTraits<DT, DP...>::const_value_type& value,
std::enable_if_t<
Kokkos::is_execution_space<ExecSpace>::value &&
- std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+ std::is_void_v<typename ViewTraits<DT, DP...>::specialize> &&
!Kokkos::SpaceAccessibility<ExecSpace, typename ViewTraits<DT, DP...>::
memory_space>::accessible>* =
nullptr) {
using dst_traits = ViewTraits<DT, DP...>;
- static_assert(std::is_same<typename dst_traits::non_const_value_type,
- typename dst_traits::value_type>::value,
+ static_assert(std::is_same_v<typename dst_traits::non_const_value_type,
+ typename dst_traits::value_type>,
"deep_copy requires non-const type");
using dst_memory_space = typename dst_traits::memory_space;
if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
Impl::contiguous_fill_or_memset(fill_exec_space(), dst, value);
} else {
using ViewTypeUniform = std::conditional_t<
- View<DT, DP...>::Rank == 0,
+ View<DT, DP...>::rank == 0,
typename View<DT, DP...>::uniform_runtime_type,
typename View<DT, DP...>::uniform_runtime_nomemspace_type>;
Kokkos::Impl::ViewFill<ViewTypeUniform, typename dst_traits::array_layout,
typename ViewTraits<ST, SP...>::non_const_value_type& dst,
const View<ST, SP...>& src,
std::enable_if_t<Kokkos::is_execution_space<ExecSpace>::value &&
- std::is_same<typename ViewTraits<ST, SP...>::specialize,
- void>::value>* = nullptr) {
+ std::is_same_v<typename ViewTraits<ST, SP...>::specialize,
+ void>>* = nullptr) {
using src_traits = ViewTraits<ST, SP...>;
using src_memory_space = typename src_traits::memory_space;
static_assert(src_traits::rank == 0,
const View<ST, SP...>& src,
std::enable_if_t<
(Kokkos::is_execution_space<ExecSpace>::value &&
- std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
- std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+ std::is_void_v<typename ViewTraits<DT, DP...>::specialize> &&
+ std::is_void_v<typename ViewTraits<ST, SP...>::specialize> &&
(unsigned(ViewTraits<DT, DP...>::rank) == unsigned(0) &&
unsigned(ViewTraits<ST, SP...>::rank) == unsigned(0)))>* = nullptr) {
using src_traits = ViewTraits<ST, SP...>;
using src_memory_space = typename src_traits::memory_space;
using dst_memory_space = typename dst_traits::memory_space;
- static_assert(std::is_same<typename dst_traits::value_type,
- typename src_traits::non_const_value_type>::value,
+ static_assert(std::is_same_v<typename dst_traits::value_type,
+ typename src_traits::non_const_value_type>,
"deep_copy requires matching non-const destination type");
if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
const View<ST, SP...>& src,
std::enable_if_t<
(Kokkos::is_execution_space<ExecSpace>::value &&
- std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
- std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+ std::is_void_v<typename ViewTraits<DT, DP...>::specialize> &&
+ std::is_void_v<typename ViewTraits<ST, SP...>::specialize> &&
(unsigned(ViewTraits<DT, DP...>::rank) != 0 ||
unsigned(ViewTraits<ST, SP...>::rank) != 0))>* = nullptr) {
using dst_type = View<DT, DP...>;
using src_type = View<ST, SP...>;
- static_assert(std::is_same<typename dst_type::value_type,
- typename dst_type::non_const_value_type>::value,
+ static_assert(std::is_same_v<typename dst_type::value_type,
+ typename dst_type::non_const_value_type>,
"deep_copy requires non-const destination type");
static_assert((unsigned(dst_type::rank) == unsigned(src_type::rank)),
"match: ");
message += dst.label();
message += "(";
- for (int r = 0; r < dst_type::Rank - 1; r++) {
- message += std::to_string(dst.extent(r));
+ message += std::to_string(dst.extent(0));
+ for (size_t r = 1; r < dst_type::rank; r++) {
message += ",";
+ message += std::to_string(dst.extent(r));
}
- message += std::to_string(dst.extent(dst_type::Rank - 1));
message += ") ";
message += src.label();
message += "(";
- for (int r = 0; r < src_type::Rank - 1; r++) {
- message += std::to_string(src.extent(r));
+ message += std::to_string(src.extent(0));
+ for (size_t r = 1; r < src_type::rank; r++) {
message += ",";
+ message += std::to_string(src.extent(r));
}
- message += std::to_string(src.extent(src_type::Rank - 1));
message += ") ";
Kokkos::Impl::throw_runtime_exception(message);
"Deprecation Error: Kokkos::deep_copy extents of views don't match: ");
message += dst.label();
message += "(";
- for (int r = 0; r < dst_type::Rank - 1; r++) {
- message += std::to_string(dst.extent(r));
+ message += std::to_string(dst.extent(0));
+ for (size_t r = 1; r < dst_type::rank; r++) {
message += ",";
+ message += std::to_string(dst.extent(r));
}
- message += std::to_string(dst.extent(dst_type::Rank - 1));
message += ") ";
message += src.label();
message += "(";
- for (int r = 0; r < src_type::Rank - 1; r++) {
- message += std::to_string(src.extent(r));
+ message += std::to_string(src.extent(0));
+ for (size_t r = 1; r < src_type::rank; r++) {
message += ",";
+ message += std::to_string(src.extent(r));
}
- message += std::to_string(src.extent(src_type::Rank - 1));
message += ") ";
Kokkos::Impl::throw_runtime_exception(message);
// If same type, equal layout, equal dimensions, equal span, and contiguous
// memory then can byte-wise copy
- if (std::is_same<typename dst_type::value_type,
- typename src_type::non_const_value_type>::value &&
- (std::is_same<typename dst_type::array_layout,
- typename src_type::array_layout>::value ||
+ if (std::is_same_v<typename dst_type::value_type,
+ typename src_type::non_const_value_type> &&
+ (std::is_same_v<typename dst_type::array_layout,
+ typename src_type::array_layout> ||
(dst_type::rank == 1 && src_type::rank == 1)) &&
dst.span_is_contiguous() && src.span_is_contiguous() &&
((dst_type::rank < 1) || (dst.stride_0() == src.stride_0())) &&
((dst_type::rank < 7) || (dst.stride_6() == src.stride_6())) &&
((dst_type::rank < 8) || (dst.stride_7() == src.stride_7()))) {
const size_t nbytes = sizeof(typename dst_type::value_type) * dst.span();
- if ((void*)dst.data() != (void*)src.data()) {
+ if ((void*)dst.data() != (void*)src.data() && 0 < nbytes) {
Kokkos::Impl::DeepCopy<dst_memory_space, src_memory_space, ExecSpace>(
exec_space, dst.data(), src.data(), nbytes);
}
/** \brief Resize a view with copying old data to new data at the corresponding
* indices. */
template <class T, class... P, class... ViewCtorArgs>
-inline typename std::enable_if<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value>::type
+inline std::enable_if_t<
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>>
impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
Kokkos::View<T, P...>& v, const size_t n0, const size_t n1,
const size_t n2, const size_t n3, const size_t n4, const size_t n5,
const bool sizeMismatch = Impl::size_mismatch(v, v.rank_dynamic, new_extents);
if (sizeMismatch) {
- // Add execution space here to avoid the need for if constexpr below
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs..., std::string,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 10>,
- typename view_type::execution_space>>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- v.label();
+ auto prop_copy = Impl::with_properties_if_unset(
+ arg_prop, typename view_type::execution_space{}, v.label());
view_type v_resized(prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
- if (alloc_prop_input::has_execution_space)
+ if constexpr (alloc_prop_input::has_execution_space)
Kokkos::Impl::ViewRemap<view_type, view_type>(
- v_resized, v,
- static_cast<const Impl::ViewCtorProp<
- void, typename alloc_prop::execution_space>&>(prop_copy)
- .value);
+ v_resized, v, Impl::get_property<Impl::ExecutionSpaceTag>(prop_copy));
else {
Kokkos::Impl::ViewRemap<view_type, view_type>(v_resized, v);
Kokkos::fence("Kokkos::resize(View)");
template <class T, class... P, class... ViewCtorArgs>
inline std::enable_if_t<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value>
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>>
resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
Kokkos::View<T, P...>& v, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
template <class T, class... P>
inline std::enable_if_t<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value>
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>>
resize(Kokkos::View<T, P...>& v, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
inline std::enable_if_t<
(Impl::is_view_ctor_property<I>::value ||
Kokkos::is_execution_space<I>::value) &&
- (std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value)>
+ (std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>)>
resize(const I& arg_prop, Kokkos::View<T, P...>& v,
const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
template <class T, class... P, class... ViewCtorArgs>
inline std::enable_if_t<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutStride>::value ||
- is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value>
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutStride>>
impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
Kokkos::View<T, P...>& v,
const typename Kokkos::View<T, P...>::array_layout& layout) {
"not include a memory space instance!");
if (v.layout() != layout) {
- // Add execution space here to avoid the need for if constexpr below
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs..., std::string,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 10>,
- typename view_type::execution_space>>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- v.label();
+ auto prop_copy = Impl::with_properties_if_unset(arg_prop, v.label());
view_type v_resized(prop_copy, layout);
- if (alloc_prop::has_execution_space)
+ if constexpr (alloc_prop_input::has_execution_space)
Kokkos::Impl::ViewRemap<view_type, view_type>(
- v_resized, v,
- static_cast<const Impl::ViewCtorProp<
- void, typename alloc_prop::execution_space>&>(prop_copy)
- .value);
+ v_resized, v, Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop));
else {
Kokkos::Impl::ViewRemap<view_type, view_type>(v_resized, v);
Kokkos::fence("Kokkos::resize(View)");
// the same as the existing one.
template <class T, class... P, class... ViewCtorArgs>
inline std::enable_if_t<
- !(std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutStride>::value ||
- is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value)>
+ !(std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutStride>)>
impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
Kokkos::View<T, P...>& v,
const typename Kokkos::View<T, P...>::array_layout& layout) {
"The view constructor arguments passed to Kokkos::resize must "
"not include a memory space instance!");
- // Add execution space here to avoid the need for if constexpr below
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs..., std::string,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 10>,
- typename view_type::execution_space>>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- v.label();
+ auto prop_copy = Impl::with_properties_if_unset(arg_prop, v.label());
view_type v_resized(prop_copy, layout);
- if (alloc_prop::has_execution_space)
+ if constexpr (alloc_prop_input::has_execution_space)
Kokkos::Impl::ViewRemap<view_type, view_type>(
- v_resized, v,
- static_cast<const Impl::ViewCtorProp<
- void, typename alloc_prop::execution_space>&>(prop_copy)
- .value);
+ v_resized, v, Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop));
else {
Kokkos::Impl::ViewRemap<view_type, view_type>(v_resized, v);
Kokkos::fence("Kokkos::resize(View)");
/** \brief Resize a view with discarding old data. */
template <class T, class... P, class... ViewCtorArgs>
inline std::enable_if_t<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value>
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>>
impl_realloc(Kokkos::View<T, P...>& v, const size_t n0, const size_t n1,
const size_t n2, const size_t n3, const size_t n4, const size_t n5,
const size_t n6, const size_t n7,
const bool sizeMismatch = Impl::size_mismatch(v, v.rank_dynamic, new_extents);
if (sizeMismatch) {
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop arg_prop_copy(arg_prop);
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
- .value = v.label();
+ auto arg_prop_copy = Impl::with_properties_if_unset(arg_prop, v.label());
v = view_type(); // Best effort to deallocate in case no other view refers
// to the shared allocation
v = view_type(arg_prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
- } else if (alloc_prop_input::initialize) {
- if (alloc_prop_input::has_execution_space) {
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs...,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 2>,
- typename view_type::execution_space>>;
- alloc_prop arg_prop_copy(arg_prop);
- auto const& exec_space = static_cast<Kokkos::Impl::ViewCtorProp<
- void, typename alloc_prop::execution_space> const&>(arg_prop_copy)
- .value;
+ return;
+ }
+
+ if constexpr (alloc_prop_input::initialize) {
+ if constexpr (alloc_prop_input::has_execution_space) {
+ const auto& exec_space =
+ Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
Kokkos::deep_copy(exec_space, v, typename view_type::value_type{});
} else
Kokkos::deep_copy(v, typename view_type::value_type{});
template <class T, class... P, class... ViewCtorArgs>
inline std::enable_if_t<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value>
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>>
realloc(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
Kokkos::View<T, P...>& v,
const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
template <class T, class... P>
inline std::enable_if_t<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value>
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>>
realloc(Kokkos::View<T, P...>& v,
const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
template <class I, class T, class... P>
inline std::enable_if_t<
Impl::is_view_ctor_property<I>::value &&
- (std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value)>
+ (std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight>)>
realloc(const I& arg_prop, Kokkos::View<T, P...>& v,
const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
template <class T, class... P, class... ViewCtorArgs>
inline std::enable_if_t<
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutStride>::value ||
- is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value>
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutStride>>
impl_realloc(Kokkos::View<T, P...>& v,
const typename Kokkos::View<T, P...>::array_layout& layout,
const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
if (v.layout() != layout) {
v = view_type(); // Deallocate first, if the only view to allocation
v = view_type(arg_prop, layout);
- } else if (alloc_prop_input::initialize) {
- if (alloc_prop_input::has_execution_space) {
- // Add execution_space if not provided to avoid need for if constexpr
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs...,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 2>,
- typename view_type::execution_space>,
- std::string>;
- alloc_prop arg_prop_copy(arg_prop);
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
- .value = v.label();
- using execution_space_type = typename alloc_prop::execution_space;
- const execution_space_type& exec_space =
- static_cast<
- Kokkos::Impl::ViewCtorProp<void, execution_space_type> const&>(
- arg_prop_copy)
- .value;
+ return;
+ }
+
+ if constexpr (alloc_prop_input::initialize) {
+ if constexpr (alloc_prop_input::has_execution_space) {
+ const auto& exec_space =
+ Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
Kokkos::deep_copy(exec_space, v, typename view_type::value_type{});
} else
Kokkos::deep_copy(v, typename view_type::value_type{});
// the same as the existing one.
template <class T, class... P, class... ViewCtorArgs>
inline std::enable_if_t<
- !(std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename Kokkos::View<T, P...>::array_layout,
- Kokkos::LayoutStride>::value ||
- is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value)>
+ !(std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename Kokkos::View<T, P...>::array_layout,
+ Kokkos::LayoutStride>)>
impl_realloc(Kokkos::View<T, P...>& v,
const typename Kokkos::View<T, P...>::array_layout& layout,
const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
"The view constructor arguments passed to Kokkos::realloc must "
"not include a memory space instance!");
- v = view_type(); // Deallocate first, if the only view to allocation
+ auto arg_prop_copy = Impl::with_properties_if_unset(arg_prop, v.label());
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop arg_prop_copy(arg_prop);
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
- .value = v.label();
- v = view_type(arg_prop_copy, layout);
+ v = view_type(); // Deallocate first, if the only view to allocation
+ v = view_type(arg_prop_copy, layout);
}
template <class T, class... P, class... ViewCtorArgs>
// Check whether it is the same memory space
enum {
is_same_memspace =
- std::is_same<memory_space, typename src_view_type::memory_space>::value
+ std::is_same_v<memory_space, typename src_view_type::memory_space>
};
// The array_layout
using array_layout = typename src_view_type::array_layout;
std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
};
-template <class Space, class T, class... P>
-struct MirrorType {
- // The incoming view_type
- using src_view_type = typename Kokkos::View<T, P...>;
- // The memory space for the mirror view
- using memory_space = typename Space::memory_space;
- // Check whether it is the same memory space
- enum {
- is_same_memspace =
- std::is_same<memory_space, typename src_view_type::memory_space>::value
- };
- // The array_layout
- using array_layout = typename src_view_type::array_layout;
- // The data type (we probably want it non-const since otherwise we can't even
- // deep_copy to it.
- using data_type = typename src_view_type::non_const_data_type;
- // The destination view type if it is not the same memory space
- using view_type = Kokkos::View<data_type, array_layout, Space>;
-};
-
-template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- !std::is_same<typename Kokkos::ViewTraits<T, P...>::array_layout,
- Kokkos::LayoutStride>::value &&
- !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space,
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror(const Kokkos::View<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- using src_type = View<T, P...>;
- using dst_type = typename src_type::HostMirror;
+// collection of static asserts for create_mirror and create_mirror_view
+template <class... ViewCtorArgs>
+void check_view_ctor_args_create_mirror() {
using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
static_assert(
!alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
+ "The view constructor arguments passed to Kokkos::create_mirror[_view] "
"must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
-
- return dst_type(
- prop_copy,
- src.rank_dynamic > 0 ? src.extent(0) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- src.rank_dynamic > 1 ? src.extent(1) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- src.rank_dynamic > 2 ? src.extent(2) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- src.rank_dynamic > 3 ? src.extent(3) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- src.rank_dynamic > 4 ? src.extent(4) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- src.rank_dynamic > 5 ? src.extent(5) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- src.rank_dynamic > 6 ? src.extent(6) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
- src.rank_dynamic > 7 ? src.extent(7) : KOKKOS_IMPL_CTOR_DEFAULT_ARG);
+ static_assert(!alloc_prop_input::has_pointer,
+ "The view constructor arguments passed to "
+ "Kokkos::create_mirror[_view] must "
+ "not include a pointer!");
+ static_assert(!alloc_prop_input::allow_padding,
+ "The view constructor arguments passed to "
+ "Kokkos::create_mirror[_view] must "
+ "not explicitly allow padding!");
}
+// create a mirror
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- std::is_same<typename Kokkos::ViewTraits<T, P...>::array_layout,
- Kokkos::LayoutStride>::value &&
- !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space,
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror(const Kokkos::View<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- using src_type = View<T, P...>;
- using dst_type = typename src_type::HostMirror;
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
-
- static_assert(
- !alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
- "must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
-
- Kokkos::LayoutStride layout;
-
- layout.dimension[0] = src.extent(0);
- layout.dimension[1] = src.extent(1);
- layout.dimension[2] = src.extent(2);
- layout.dimension[3] = src.extent(3);
- layout.dimension[4] = src.extent(4);
- layout.dimension[5] = src.extent(5);
- layout.dimension[6] = src.extent(6);
- layout.dimension[7] = src.extent(7);
-
- layout.stride[0] = src.stride_0();
- layout.stride[1] = src.stride_1();
- layout.stride[2] = src.stride_2();
- layout.stride[3] = src.stride_3();
- layout.stride[4] = src.stride_4();
- layout.stride[5] = src.stride_5();
- layout.stride[6] = src.stride_6();
- layout.stride[7] = src.stride_7();
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
-
- return dst_type(prop_copy, layout);
-}
-
-// Create a mirror in a new space (specialization for different space)
-template <class T, class... P, class... ViewCtorArgs,
- class Enable = std::enable_if_t<
- Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>>
-auto create_mirror(const Kokkos::View<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
-
- static_assert(
- !alloc_prop_input::has_label,
- "The view constructor arguments passed to Kokkos::create_mirror "
- "must not include a label!");
- static_assert(
- !alloc_prop_input::has_pointer,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not include a pointer!");
- static_assert(
- !alloc_prop_input::allow_padding,
- "The view constructor arguments passed to Kokkos::create_mirror must "
- "not explicitly allow padding!");
-
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
- alloc_prop prop_copy(arg_prop);
- static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
- std::string(src.label()).append("_mirror");
-
- return typename Impl::MirrorType<typename alloc_prop::memory_space, T,
- P...>::view_type(prop_copy, src.layout());
+inline auto create_mirror(const Kokkos::View<T, P...>& src,
+ const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ check_view_ctor_args_create_mirror<ViewCtorArgs...>();
+
+ auto prop_copy = Impl::with_properties_if_unset(
+ arg_prop, std::string(src.label()).append("_mirror"));
+
+ if constexpr (Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ using memory_space = typename decltype(prop_copy)::memory_space;
+ using dst_type =
+ typename Impl::MirrorViewType<memory_space, T, P...>::dest_view_type;
+ return dst_type(prop_copy, src.layout());
+ } else {
+ using dst_type = typename View<T, P...>::HostMirror;
+ return dst_type(prop_copy, src.layout());
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
}
} // namespace Impl
-template <class T, class... P>
-std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror(Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror(v, Impl::ViewCtorProp<>{});
+// public interface
+template <class T, class... P,
+ typename = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+auto create_mirror(Kokkos::View<T, P...> const& src) {
+ return Impl::create_mirror(src, Impl::ViewCtorProp<>{});
}
-template <class T, class... P>
-std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror(Kokkos::Impl::WithoutInitializing_t wi,
- Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror(v, view_alloc(wi));
+// public interface that accepts a without initializing flag
+template <class T, class... P,
+ typename = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+auto create_mirror(Kokkos::Impl::WithoutInitializing_t wi,
+ Kokkos::View<T, P...> const& src) {
+ return Impl::create_mirror(src, view_alloc(wi));
}
+// public interface that accepts a space
template <class Space, class T, class... P,
- typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
- typename Impl::MirrorType<Space, T, P...>::view_type>
-create_mirror(Space const&, Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror(v, view_alloc(typename Space::memory_space{}));
+ typename Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+auto create_mirror(Space const&, Kokkos::View<T, P...> const& src) {
+ return Impl::create_mirror(src, view_alloc(typename Space::memory_space{}));
}
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
template <class T, class... P, class... ViewCtorArgs,
- typename Enable = std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>>
+ typename = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
auto create_mirror(Impl::ViewCtorProp<ViewCtorArgs...> const& arg_prop,
- Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror(v, arg_prop);
-}
-
-template <class T, class... P, class... ViewCtorArgs>
-std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space,
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror(Impl::ViewCtorProp<ViewCtorArgs...> const& arg_prop,
- Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror(v, arg_prop);
+ Kokkos::View<T, P...> const& src) {
+ return Impl::create_mirror(src, arg_prop);
}
+// public interface that accepts a space and a without initializing flag
template <class Space, class T, class... P,
- typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
- typename Impl::MirrorType<Space, T, P...>::view_type>
-create_mirror(Kokkos::Impl::WithoutInitializing_t wi, Space const&,
- Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror(v, view_alloc(typename Space::memory_space{}, wi));
+ typename Enable = std::enable_if_t<
+ Kokkos::is_space<Space>::value &&
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
+auto create_mirror(Kokkos::Impl::WithoutInitializing_t wi, Space const&,
+ Kokkos::View<T, P...> const& src) {
+ return Impl::create_mirror(src,
+ view_alloc(typename Space::memory_space{}, wi));
}
namespace Impl {
-template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- (std::is_same<
- typename Kokkos::View<T, P...>::memory_space,
- typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename Kokkos::View<T, P...>::data_type,
- typename Kokkos::View<T, P...>::HostMirror::data_type>::value),
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::View<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
+// choose a `Kokkos::create_mirror` adapted for the provided view and the
+// provided arguments
+template <class View, class... ViewCtorArgs>
+inline auto choose_create_mirror(
+ const View& src, const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ // Due to the fact that users can overload `Kokkos::create_mirror`, but also
+ // that they may not have implemented all of its different possible
+ // variations, this function chooses the correct private or public version of
+ // it to call.
+ // This helper should be used by any overload of
+ // `Kokkos::Impl::create_mirror_view`.
+
+ if constexpr (std::is_void_v<typename View::traits::specialize>) {
+ // if the view is not specialized, just call the Impl function
+
+ // using ADL to find the later defined overload of the function
+ using namespace Kokkos::Impl;
+
+ return create_mirror(src, arg_prop);
+ } else {
+ // otherwise, recreate the public call
+ using ViewProp = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+ // using ADL to find the later defined overload of the function
+ using namespace Kokkos;
+
+ if constexpr (sizeof...(ViewCtorArgs) == 0) {
+ // if there are no view constructor args, call the specific public
+ // function
+ return create_mirror(src);
+ } else if constexpr (sizeof...(ViewCtorArgs) == 1 &&
+ ViewProp::has_memory_space) {
+ // if there is one view constructor arg and it has a memory space, call
+ // the specific public function
+ return create_mirror(typename ViewProp::memory_space{}, src);
+ } else if constexpr (sizeof...(ViewCtorArgs) == 1 &&
+ !ViewProp::initialize) {
+ // if there is one view constructor arg and it has a without initializing
+ // mark, call the specific public function
+ return create_mirror(typename Kokkos::Impl::WithoutInitializing_t{}, src);
+ } else if constexpr (sizeof...(ViewCtorArgs) == 2 &&
+ ViewProp::has_memory_space && !ViewProp::initialize) {
+ // if there is two view constructor args and they have a memory space and
+ // a without initializing mark, call the specific public function
+ return create_mirror(typename Kokkos::Impl::WithoutInitializing_t{},
+ typename ViewProp::memory_space{}, src);
+ } else {
+ // if there are other constructor args, call the generic public function
+
+ // Beware, there are some libraries using Kokkos that don't implement
+ // this overload (hence the reason for this present function to exist).
+ return create_mirror(arg_prop, src);
+ }
+ }
+
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
}
+// create a mirror view
+// private interface that accepts arbitrary view constructor args passed by a
+// view_alloc
template <class T, class... P, class... ViewCtorArgs>
-inline std::enable_if_t<
- !(std::is_same<
- typename Kokkos::View<T, P...>::memory_space,
- typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename Kokkos::View<T, P...>::data_type,
- typename Kokkos::View<T, P...>::HostMirror::data_type>::value),
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::View<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- return Kokkos::Impl::create_mirror(src, arg_prop);
-}
-
-// Create a mirror view in a new space (specialization for same space)
-template <class Space, class T, class... P, class... ViewCtorArgs>
-std::enable_if_t<Impl::MirrorViewType<Space, T, P...>::is_same_memspace,
- typename Impl::MirrorViewType<Space, T, P...>::view_type>
-create_mirror_view(const Space&, const Kokkos::View<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>&) {
- return src;
-}
-
-// Create a mirror view in a new space (specialization for different space)
-template <class Space, class T, class... P, class... ViewCtorArgs>
-std::enable_if_t<!Impl::MirrorViewType<Space, T, P...>::is_same_memspace,
- typename Impl::MirrorViewType<Space, T, P...>::view_type>
-create_mirror_view(const Space&, const Kokkos::View<T, P...>& src,
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
- using MemorySpace = typename Space::memory_space;
- using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., MemorySpace>;
- alloc_prop prop_copy(arg_prop);
-
- return Kokkos::Impl::create_mirror(src, prop_copy);
+inline auto create_mirror_view(
+ const Kokkos::View<T, P...>& src,
+ [[maybe_unused]] const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+ if constexpr (!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space) {
+ if constexpr (std::is_same_v<typename Kokkos::View<T, P...>::memory_space,
+ typename Kokkos::View<
+ T, P...>::HostMirror::memory_space> &&
+ std::is_same_v<
+ typename Kokkos::View<T, P...>::data_type,
+ typename Kokkos::View<T, P...>::HostMirror::data_type>) {
+ check_view_ctor_args_create_mirror<ViewCtorArgs...>();
+ return typename Kokkos::View<T, P...>::HostMirror(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ } else {
+ if constexpr (Impl::MirrorViewType<typename Impl::ViewCtorProp<
+ ViewCtorArgs...>::memory_space,
+ T, P...>::is_same_memspace) {
+ check_view_ctor_args_create_mirror<ViewCtorArgs...>();
+ return typename Impl::MirrorViewType<
+ typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+ P...>::view_type(src);
+ } else {
+ return Kokkos::Impl::choose_create_mirror(src, arg_prop);
+ }
+ }
+#if defined(KOKKOS_COMPILER_INTEL) || \
+ (defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC))
+ __builtin_unreachable();
+#endif
}
} // namespace Impl
+// public interface
template <class T, class... P>
-std::enable_if_t<
- std::is_same<
- typename Kokkos::View<T, P...>::memory_space,
- typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename Kokkos::View<T, P...>::data_type,
- typename Kokkos::View<T, P...>::HostMirror::data_type>::value,
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::View<T, P...>& src) {
- return src;
-}
-
-template <class T, class... P>
-std::enable_if_t<
- !(std::is_same<
- typename Kokkos::View<T, P...>::memory_space,
- typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
- std::is_same<
- typename Kokkos::View<T, P...>::data_type,
- typename Kokkos::View<T, P...>::HostMirror::data_type>::value),
- typename Kokkos::View<T, P...>::HostMirror>
-create_mirror_view(const Kokkos::View<T, P...>& src) {
- return Kokkos::create_mirror(src);
+auto create_mirror_view(const Kokkos::View<T, P...>& src) {
+ return Impl::create_mirror_view(src, view_alloc());
}
+// public interface that accepts a without initializing flag
template <class T, class... P>
-typename Kokkos::View<T, P...>::HostMirror create_mirror_view(
- Kokkos::Impl::WithoutInitializing_t wi, Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror_view(v, view_alloc(wi));
+auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi,
+ Kokkos::View<T, P...> const& src) {
+ return Impl::create_mirror_view(src, view_alloc(wi));
}
-// FIXME_C++17 Improve SFINAE here.
+// public interface that accepts a space
template <class Space, class T, class... P,
class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
- const Space&, const Kokkos::View<T, P...>& src,
- std::enable_if_t<Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
- nullptr) {
- return src;
-}
-
-// FIXME_C++17 Improve SFINAE here.
-template <class Space, class T, class... P,
- class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
- const Space& space, const Kokkos::View<T, P...>& src,
- std::enable_if_t<!Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
- nullptr) {
- return Kokkos::create_mirror(space, src);
+auto create_mirror_view(const Space&, const Kokkos::View<T, P...>& src) {
+ return Impl::create_mirror_view(src,
+ view_alloc(typename Space::memory_space()));
}
+// public interface that accepts a space and a without initializing flag
template <class Space, class T, class... P,
typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
-typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
- Kokkos::Impl::WithoutInitializing_t wi, Space const& space,
- Kokkos::View<T, P...> const& v) {
- return Impl::create_mirror_view(space, v, view_alloc(wi));
+auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi, Space const&,
+ Kokkos::View<T, P...> const& src) {
+ return Impl::create_mirror_view(
+ src, view_alloc(typename Space::memory_space{}, wi));
}
-template <class T, class... P, class... ViewCtorArgs>
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class T, class... P, class... ViewCtorArgs,
+ typename = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
auto create_mirror_view(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::View<T, P...>& v) {
- return Impl::create_mirror_view(v, arg_prop);
+ const Kokkos::View<T, P...>& src) {
+ return Impl::create_mirror_view(src, arg_prop);
}
-template <class... ViewCtorArgs, class T, class... P>
-auto create_mirror_view_and_copy(
- const Impl::ViewCtorProp<ViewCtorArgs...>&,
- const Kokkos::View<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- Impl::MirrorViewType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::is_same_memspace>* = nullptr) {
+namespace Impl {
+
+// collection of static asserts for create_mirror_view_and_copy
+template <class... ViewCtorArgs>
+void check_view_ctor_args_create_mirror_view_and_copy() {
using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
static_assert(
alloc_prop_input::has_memory_space,
"The view constructor arguments passed to "
"The view constructor arguments passed to "
"Kokkos::create_mirror_view_and_copy must "
"not explicitly allow padding!");
-
- // same behavior as deep_copy(src, src)
- if (!alloc_prop_input::has_execution_space)
- fence(
- "Kokkos::create_mirror_view_and_copy: fence before returning src view");
- return src;
}
-template <class... ViewCtorArgs, class T, class... P>
+} // namespace Impl
+
+// create a mirror view and deep copy it
+// public interface that accepts arbitrary view constructor args passed by a
+// view_alloc
+template <class... ViewCtorArgs, class T, class... P,
+ class Enable = std::enable_if_t<
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>>
auto create_mirror_view_and_copy(
- const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
- const Kokkos::View<T, P...>& src,
- std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
- !Impl::MirrorViewType<
- typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
- P...>::is_same_memspace>* = nullptr) {
+ [[maybe_unused]] const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+ const Kokkos::View<T, P...>& src) {
using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
- static_assert(
- alloc_prop_input::has_memory_space,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must include a memory space!");
- static_assert(!alloc_prop_input::has_pointer,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not include a pointer!");
- static_assert(!alloc_prop_input::allow_padding,
- "The view constructor arguments passed to "
- "Kokkos::create_mirror_view_and_copy must "
- "not explicitly allow padding!");
- using Space = typename alloc_prop_input::memory_space;
- using Mirror = typename Impl::MirrorViewType<Space, T, P...>::view_type;
-
- // Add some properties if not provided to avoid need for if constexpr
- using alloc_prop = Impl::ViewCtorProp<
- ViewCtorArgs...,
- std::conditional_t<alloc_prop_input::has_label,
- std::integral_constant<unsigned int, 12>, std::string>,
- std::conditional_t<!alloc_prop_input::initialize,
- std::integral_constant<unsigned int, 13>,
- Impl::WithoutInitializing_t>,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 14>,
- typename Space::execution_space>>;
- alloc_prop arg_prop_copy(arg_prop);
-
- std::string& label =
- static_cast<Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy).value;
- if (label.empty()) label = src.label();
- auto mirror = typename Mirror::non_const_type{arg_prop_copy, src.layout()};
- if (alloc_prop_input::has_execution_space) {
- using ExecutionSpace = typename alloc_prop::execution_space;
- deep_copy(
- static_cast<Impl::ViewCtorProp<void, ExecutionSpace>&>(arg_prop_copy)
- .value,
- mirror, src);
- } else
- deep_copy(mirror, src);
- return mirror;
+
+ Impl::check_view_ctor_args_create_mirror_view_and_copy<ViewCtorArgs...>();
+
+ if constexpr (Impl::MirrorViewType<typename alloc_prop_input::memory_space, T,
+ P...>::is_same_memspace) {
+ // same behavior as deep_copy(src, src)
+ if constexpr (!alloc_prop_input::has_execution_space)
+ fence(
+ "Kokkos::create_mirror_view_and_copy: fence before returning src "
+ "view");
+ return src;
+ } else {
+ using Space = typename alloc_prop_input::memory_space;
+ using Mirror = typename Impl::MirrorViewType<Space, T, P...>::view_type;
+
+ auto arg_prop_copy = Impl::with_properties_if_unset(
+ arg_prop, std::string{}, WithoutInitializing,
+ typename Space::execution_space{});
+
+ std::string& label = Impl::get_property<Impl::LabelTag>(arg_prop_copy);
+ if (label.empty()) label = src.label();
+ auto mirror = typename Mirror::non_const_type{arg_prop_copy, src.layout()};
+ if constexpr (alloc_prop_input::has_execution_space) {
+ deep_copy(Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop_copy),
+ mirror, src);
+ } else
+ deep_copy(mirror, src);
+ return mirror;
+ }
+#if defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC >= 1130 && \
+ !defined(KOKKOS_COMPILER_MSVC)
+ __builtin_unreachable();
+#endif
}
// Previously when using auto here, the intel compiler 19.3 would
const Space&, const Kokkos::View<T, P...>& src,
std::string const& name = "",
std::enable_if_t<
- std::is_void<typename ViewTraits<T, P...>::specialize>::value>* =
- nullptr) {
+ std::is_void_v<typename ViewTraits<T, P...>::specialize>>* = nullptr) {
return create_mirror_view_and_copy(
Kokkos::view_alloc(typename Space::memory_space{}, name), src);
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-// Create a mirror view in a new space without initializing (specialization for
-// same space)
-template <class Space, class T, class... P>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the version taking WithoutInitializing as first argument")
-typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
- const Space&, const Kokkos::View<T, P...>& src,
- Kokkos::Impl::WithoutInitializing_t,
- std::enable_if_t<Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
- nullptr) {
- return src;
-}
-
-// Create a mirror view in a new space without initializing (specialization for
-// different space)
-template <class Space, class T, class... P>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the version taking WithoutInitializing as first argument")
-typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
- const Space&, const Kokkos::View<T, P...>& src,
- Kokkos::Impl::WithoutInitializing_t,
- std::enable_if_t<!Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
- nullptr) {
- using Mirror = typename Impl::MirrorViewType<Space, T, P...>::view_type;
- return Mirror(view_alloc(WithoutInitializing, src.label()), src.layout());
-}
-#endif
-
} /* namespace Kokkos */
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CORE_HPP
#define KOKKOS_CORE_HPP
#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
#endif
+//----------------------------------------------------------------------------
+// In the case windows.h is included before Kokkos_Core.hpp there might be
+// errors due to the potentially defined macros with name "min" and "max" in
+// windows.h. These collide with the use of "min" and "max" in names inside
+// Kokkos. The macros will be redefined at the end of Kokkos_Core.hpp
+#if defined(min)
+#pragma push_macro("min")
+#undef min
+#define KOKKOS_IMPL_PUSH_MACRO_MIN
+#endif
+#if defined(max)
+#pragma push_macro("max")
+#undef max
+#define KOKKOS_IMPL_PUSH_MACRO_MAX
+#endif
+
//----------------------------------------------------------------------------
// Include the execution space header files for the enabled execution spaces.
#include <Kokkos_Half.hpp>
#include <Kokkos_AnonymousSpace.hpp>
-#include <Kokkos_LogicalSpaces.hpp>
#include <Kokkos_Pair.hpp>
-#include <Kokkos_MinMaxClamp.hpp>
+#include <Kokkos_Clamp.hpp>
+#include <Kokkos_MinMax.hpp>
#include <Kokkos_MathematicalConstants.hpp>
#include <Kokkos_MathematicalFunctions.hpp>
#include <Kokkos_MathematicalSpecialFunctions.hpp>
+#include <Kokkos_NumericTraits.hpp>
+#include <Kokkos_BitManipulation.hpp>
+#include <Kokkos_Swap.hpp>
#include <Kokkos_MemoryPool.hpp>
#include <Kokkos_Array.hpp>
#include <Kokkos_View.hpp>
#include <Kokkos_hwloc.hpp>
#include <Kokkos_Timer.hpp>
#include <Kokkos_Tuners.hpp>
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
#include <Kokkos_TaskScheduler.hpp>
+#endif
#include <Kokkos_Complex.hpp>
#include <Kokkos_CopyViews.hpp>
+#include <impl/Kokkos_TeamMDPolicy.hpp>
#include <impl/Kokkos_InitializationSettings.hpp>
#include <functional>
#include <iosfwd>
void post_initialize(const InitializationSettings& settings);
+void pre_finalize();
+
+void post_finalize();
+
void declare_configuration_metadata(const std::string& category,
const std::string& key,
const std::string& value);
} // namespace Impl
-KOKKOS_ATTRIBUTE_NODISCARD bool is_initialized() noexcept;
-KOKKOS_ATTRIBUTE_NODISCARD bool is_finalized() noexcept;
+[[nodiscard]] bool is_initialized() noexcept;
+[[nodiscard]] bool is_finalized() noexcept;
+
+[[nodiscard]] int device_id() noexcept;
+[[nodiscard]] int num_devices() noexcept;
+[[nodiscard]] int num_threads() noexcept;
bool show_warnings() noexcept;
bool tune_internals() noexcept;
*/
void push_finalize_hook(std::function<void()> f);
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-/** \brief Finalize all known execution spaces */
-KOKKOS_DEPRECATED void finalize_all();
-#endif
+void fence(const std::string& name /*= "Kokkos::fence: Unnamed Global Fence"*/);
/** \brief Print "Bill of Materials" */
void print_configuration(std::ostream& os, bool verbose = false);
} // namespace Impl
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-class KOKKOS_ATTRIBUTE_NODISCARD ScopeGuard {
- public:
-#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
- KOKKOS_ATTRIBUTE_NODISCARD
-#endif
- ScopeGuard(int& argc, char* argv[]) {
- sg_init = false;
-#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
- if (is_initialized()) {
- std::cerr << Impl::scopeguard_create_while_initialized_warning()
- << std::endl;
- }
- if (is_finalized()) {
- std::cerr << Impl::scopeguard_create_after_finalize_warning()
- << std::endl;
- }
-#endif
- if (!is_initialized()) {
- initialize(argc, argv);
- sg_init = true;
- }
- }
-
-#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
- KOKKOS_ATTRIBUTE_NODISCARD
-#endif
- explicit ScopeGuard(
- const InitializationSettings& settings = InitializationSettings()) {
- sg_init = false;
-#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
- if (is_initialized()) {
- std::cerr << Impl::scopeguard_create_while_initialized_warning()
- << std::endl;
- }
- if (is_finalized()) {
- std::cerr << Impl::scopeguard_create_after_finalize_warning()
- << std::endl;
- }
-#endif
- if (!is_initialized()) {
- initialize(settings);
- sg_init = true;
- }
- }
-
- ~ScopeGuard() {
-#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
- if (is_finalized()) {
- std::cerr << Impl::scopeguard_destruct_after_finalize_warning()
- << std::endl;
- }
-#endif
- if (is_initialized() && sg_init) {
- finalize();
- }
- }
-
- private:
- bool sg_init;
-
- public:
- ScopeGuard& operator=(const ScopeGuard&) = delete;
- ScopeGuard& operator=(ScopeGuard&&) = delete;
- ScopeGuard(const ScopeGuard&) = delete;
- ScopeGuard(ScopeGuard&&) = delete;
-};
-
-#else // ifndef KOKKOS_ENABLE_DEPRECATED_CODE3
-
class KOKKOS_ATTRIBUTE_NODISCARD ScopeGuard {
public:
+ template <class... Args>
#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
- KOKKOS_ATTRIBUTE_NODISCARD
+ [[nodiscard]]
#endif
- ScopeGuard(int& argc, char* argv[]) {
+ ScopeGuard(Args&&... args) {
if (is_initialized()) {
Kokkos::abort(
Impl::scopeguard_create_while_initialized_warning().c_str());
if (is_finalized()) {
Kokkos::abort(Impl::scopeguard_create_after_finalize_warning().c_str());
}
- initialize(argc, argv);
- }
-
-#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
- KOKKOS_ATTRIBUTE_NODISCARD
-#endif
- ScopeGuard(
- const InitializationSettings& settings = InitializationSettings()) {
- if (is_initialized()) {
- Kokkos::abort(
- Impl::scopeguard_create_while_initialized_warning().c_str());
- }
- if (is_finalized()) {
- Kokkos::abort(Impl::scopeguard_create_after_finalize_warning().c_str());
- }
- initialize(settings);
+ initialize(static_cast<Args&&>(args)...);
}
~ScopeGuard() {
}
ScopeGuard& operator=(const ScopeGuard&) = delete;
- ScopeGuard& operator=(ScopeGuard&&) = delete;
- ScopeGuard(const ScopeGuard&) = delete;
- ScopeGuard(ScopeGuard&&) = delete;
+ ScopeGuard& operator=(ScopeGuard&&) = delete;
+ ScopeGuard(const ScopeGuard&) = delete;
+ ScopeGuard(ScopeGuard&&) = delete;
};
-#endif
} // namespace Kokkos
// Customization point for backends
// Default behavior is to return the passed in instance
template <class ExecSpace, class... Args>
-std::vector<ExecSpace> partition_space(ExecSpace space, Args...) {
+std::vector<ExecSpace> partition_space(ExecSpace const& space, Args...) {
static_assert(is_execution_space<ExecSpace>::value,
"Kokkos Error: partition_space expects an Execution Space as "
"first argument");
-#ifdef __cpp_fold_expressions
static_assert(
(... && std::is_arithmetic_v<Args>),
"Kokkos Error: partitioning arguments must be integers or floats");
-#endif
std::vector<ExecSpace> instances(sizeof...(Args));
for (int s = 0; s < int(sizeof...(Args)); s++) instances[s] = space;
return instances;
}
template <class ExecSpace, class T>
-std::vector<ExecSpace> partition_space(ExecSpace space,
- std::vector<T>& weights) {
+std::vector<ExecSpace> partition_space(ExecSpace const& space,
+ std::vector<T> const& weights) {
static_assert(is_execution_space<ExecSpace>::value,
"Kokkos Error: partition_space expects an Execution Space as "
"first argument");
static_assert(
- std::is_arithmetic<T>::value,
+ std::is_arithmetic_v<T>,
"Kokkos Error: partitioning arguments must be integers or floats");
std::vector<ExecSpace> instances(weights.size());
// implementation of the RAII wrapper is using Kokkos::single.
#include <Kokkos_AcquireUniqueTokenImpl.hpp>
-// Specializations required after core definitions
-#include <KokkosCore_Config_PostInclude.hpp>
+//----------------------------------------------------------------------------
+// Redefinition of the macros min and max if we pushed them at entry of
+// Kokkos_Core.hpp
+#if defined(KOKKOS_IMPL_PUSH_MACRO_MIN)
+#pragma pop_macro("min")
+#undef KOKKOS_IMPL_PUSH_MACRO_MIN
+#endif
+#if defined(KOKKOS_IMPL_PUSH_MACRO_MAX)
+#pragma pop_macro("max")
+#undef KOKKOS_IMPL_PUSH_MACRO_MAX
+#endif
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CORE_FWD_HPP
#define KOKKOS_CORE_FWD_HPP
// and compiler environment then sets a collection of #define macros.
#include <Kokkos_Macros.hpp>
+#include <Kokkos_Printf.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_Utilities.hpp>
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-#include <Kokkos_MasterLock.hpp>
-#endif
-
//----------------------------------------------------------------------------
-// Have assumed a 64bit build (8byte pointers) throughout the code base.
-
+// Have assumed a 64-bit build (8-byte pointers) throughout the code base.
+// 32-bit build allowed but unsupported.
+#ifdef KOKKOS_IMPL_32BIT
+static_assert(sizeof(void *) == 4,
+ "Kokkos assumes 64-bit build; i.e., 4-byte pointers");
+#else
static_assert(sizeof(void *) == 8,
"Kokkos assumes 64-bit build; i.e., 8-byte pointers");
-
+#endif
//----------------------------------------------------------------------------
namespace Kokkos {
struct Device;
// forward declare here so that backend initializer calls can use it.
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-struct InitArguments;
-#endif
class InitializationSettings;
} // namespace Kokkos
using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
Experimental::OpenMPTarget;
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP)
-using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
- Experimental::HIP;
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION = HIP;
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL)
-using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
- Experimental::SYCL;
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION = SYCL;
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENACC)
using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
Experimental::OpenACC;
using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION = Serial;
#else
#error \
- "At least one of the following execution spaces must be defined in order to use Kokkos: Kokkos::Cuda, Kokkos::Experimental::HIP, Kokkos::Experimental::SYCL, Kokkos::Experimental::OpenMPTarget, Kokkos::Experimental::OpenACC, Kokkos::OpenMP, Kokkos::Threads, Kokkos::Experimental::HPX, or Kokkos::Serial."
+ "At least one of the following execution spaces must be defined in order to use Kokkos: Kokkos::Cuda, Kokkos::HIP, Kokkos::SYCL, Kokkos::Experimental::OpenMPTarget, Kokkos::Experimental::OpenACC, Kokkos::OpenMP, Kokkos::Threads, Kokkos::Experimental::HPX, or Kokkos::Serial."
#endif
#if defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP)
"At least one of the following execution spaces must be defined in order to use Kokkos: Kokkos::OpenMP, Kokkos::Threads, Kokkos::Experimental::HPX, or Kokkos::Serial."
#endif
+// check for devices that support sharedSpace
+#if defined(KOKKOS_ENABLE_CUDA)
+using SharedSpace = CudaUVMSpace;
+#define KOKKOS_HAS_SHARED_SPACE
+#elif defined(KOKKOS_ENABLE_HIP)
+using SharedSpace = HIPManagedSpace;
+#define KOKKOS_HAS_SHARED_SPACE
+#elif defined(KOKKOS_ENABLE_SYCL)
+using SharedSpace = SYCLSharedUSMSpace;
+#define KOKKOS_HAS_SHARED_SPACE
+// if only host compile point to HostSpace
+#elif !defined(KOKKOS_ENABLE_OPENACC) && !defined(KOKKOS_ENABLE_OPENMPTARGET)
+using SharedSpace = HostSpace;
+#define KOKKOS_HAS_SHARED_SPACE
+#endif
+
+inline constexpr bool has_shared_space =
+#if defined KOKKOS_HAS_SHARED_SPACE
+ true;
+#else
+ false;
+#endif
+
+#if defined(KOKKOS_ENABLE_CUDA)
+using SharedHostPinnedSpace = CudaHostPinnedSpace;
+#define KOKKOS_HAS_SHARED_HOST_PINNED_SPACE
+#elif defined(KOKKOS_ENABLE_HIP)
+using SharedHostPinnedSpace = HIPHostPinnedSpace;
+#define KOKKOS_HAS_SHARED_HOST_PINNED_SPACE
+#elif defined(KOKKOS_ENABLE_SYCL)
+ using SharedHostPinnedSpace = SYCLHostUSMSpace;
+#define KOKKOS_HAS_SHARED_HOST_PINNED_SPACE
+#elif !defined(KOKKOS_ENABLE_OPENACC) && !defined(KOKKOS_ENABLE_OPENMPTARGET)
+ using SharedHostPinnedSpace = HostSpace;
+#define KOKKOS_HAS_SHARED_HOST_PINNED_SPACE
+#endif
+
+inline constexpr bool has_shared_host_pinned_space =
+#if defined KOKKOS_HAS_SHARED_HOST_PINNED_SPACE
+ true;
+#else
+ false;
+#endif
+
} // namespace Kokkos
//----------------------------------------------------------------------------
msg);))
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-
-#if defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA) && \
- defined(KOKKOS_ENABLE_CUDA)
-using ActiveExecutionMemorySpace KOKKOS_DEPRECATED = Kokkos::CudaSpace;
-#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL)
-using ActiveExecutionMemorySpace KOKKOS_DEPRECATED =
- Kokkos::Experimental::SYCLDeviceUSMSpace;
-#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HIP_GPU)
-using ActiveExecutionMemorySpace KOKKOS_DEPRECATED =
- Kokkos::Experimental::HIPSpace;
-#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST)
-using ActiveExecutionMemorySpace KOKKOS_DEPRECATED = Kokkos::HostSpace;
-#else
-using ActiveExecutionMemorySpace KOKKOS_DEPRECATED = void;
-#endif
-
-template <typename DstMemorySpace, typename SrcMemorySpace>
-struct MemorySpaceAccess;
-
-template <typename DstMemorySpace, typename SrcMemorySpace,
- bool = Kokkos::Impl::MemorySpaceAccess<DstMemorySpace,
- SrcMemorySpace>::accessible>
-struct verify_space {
- KOKKOS_DEPRECATED KOKKOS_FUNCTION static void check() {}
-};
-
-template <typename DstMemorySpace, typename SrcMemorySpace>
-struct verify_space<DstMemorySpace, SrcMemorySpace, false> {
- KOKKOS_DEPRECATED KOKKOS_FUNCTION static void check() {
- Kokkos::abort(
- "Kokkos::View ERROR: attempt to access inaccessible memory space");
- };
-};
-#endif
-
} // namespace Impl
-
-namespace Experimental {
-template <class, class, class, class>
-class LogicalMemorySpace;
-}
-
} // namespace Kokkos
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-#define KOKKOS_RESTRICT_EXECUTION_TO_DATA(DATA_SPACE, DATA_PTR) \
- Kokkos::Impl::verify_space<Kokkos::Impl::ActiveExecutionMemorySpace, \
- DATA_SPACE>::check();
-
-#define KOKKOS_RESTRICT_EXECUTION_TO_(DATA_SPACE) \
- Kokkos::Impl::verify_space<Kokkos::Impl::ActiveExecutionMemorySpace, \
- DATA_SPACE>::check();
-#endif
-
//----------------------------------------------------------------------------
namespace Kokkos {
class Enable = void>
struct DeepCopy;
-template <typename ExecutionSpace, class DT, class... DP>
-struct ZeroMemset;
-
template <class ViewType, class Layout = typename ViewType::array_layout,
class ExecSpace = typename ViewType::execution_space,
- int Rank = ViewType::Rank, typename iType = int64_t>
+ int Rank = ViewType::rank, typename iType = int64_t>
struct ViewFill;
template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
///
/// This is an implementation detail of parallel_reduce. Users should
/// skip this and go directly to the nonmember function parallel_reduce.
-template <class FunctorType, class ExecPolicy, class ReducerType = InvalidType,
- class ExecutionSpace = typename Impl::FunctorPolicyExecutionSpace<
- FunctorType, ExecPolicy>::execution_space>
+template <typename CombinedFunctorReducerType, typename PolicyType,
+ typename ExecutionSpaceType>
class ParallelReduce;
+template <typename FunctorType, typename FunctorAnalysisReducerType,
+ typename Enable = void>
+class CombinedFunctorReducer;
+
/// \class ParallelScan
/// \brief Implementation detail of parallel_scan.
///
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_CRS_HPP
#define KOKKOS_CRS_HPP
/*
* Default Constructors, operators and destructor
*/
- KOKKOS_DEFAULTED_FUNCTION Crs() = default;
- KOKKOS_DEFAULTED_FUNCTION Crs(Crs const&) = default;
- KOKKOS_DEFAULTED_FUNCTION Crs(Crs&&) = default;
+ KOKKOS_DEFAULTED_FUNCTION Crs() = default;
+ KOKKOS_DEFAULTED_FUNCTION Crs(Crs const&) = default;
+ KOKKOS_DEFAULTED_FUNCTION Crs(Crs&&) = default;
KOKKOS_DEFAULTED_FUNCTION Crs& operator=(Crs const&) = default;
- KOKKOS_DEFAULTED_FUNCTION Crs& operator=(Crs&&) = default;
- KOKKOS_DEFAULTED_FUNCTION ~Crs() = default;
+ KOKKOS_DEFAULTED_FUNCTION Crs& operator=(Crs&&) = default;
+ KOKKOS_DEFAULTED_FUNCTION ~Crs() = default;
/** \brief Assign to a view of the rhs array.
* If the old view is the last view
public:
KOKKOS_INLINE_FUNCTION
- void operator()(index_type i) const { atomic_increment(&out[in.entries(i)]); }
+ void operator()(index_type i) const { atomic_inc(&out[in.entries(i)]); }
GetCrsTransposeCounts(InCrs const& arg_in, OutCounts const& arg_out)
: in(arg_in), out(arg_out) {
using policy_type = RangePolicy<index_type, execution_space>;
Functor m_functor;
counts_type m_counts;
struct Count {};
- inline void operator()(Count, size_type i) const {
+ KOKKOS_FUNCTION void operator()(Count, size_type i) const {
m_counts(i) = m_functor(i, nullptr);
}
struct Fill {};
- inline void operator()(Fill, size_type i) const {
+ KOKKOS_FUNCTION void operator()(Fill, size_type i) const {
auto j = m_crs.row_map(i);
/* we don't want to access entries(entries.size()), even if its just to get
its address and never use it. this can happen when row (i) is empty and
CountAndFillBase(CrsType& crs, Functor const& f) : m_crs(crs), m_functor(f) {}
};
-#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
-#if defined(KOKKOS_ENABLE_CUDA)
-#define EXEC_SPACE Kokkos::Cuda
-#elif defined(KOKKOS_ENABLE_HIP)
-#define EXEC_SPACE Kokkos::Experimental::HIP
-#endif
-template <class CrsType, class Functor>
-struct CountAndFillBase<CrsType, Functor, EXEC_SPACE> {
- using data_type = typename CrsType::data_type;
- using size_type = typename CrsType::size_type;
- using row_map_type = typename CrsType::row_map_type;
- using counts_type = row_map_type;
- CrsType m_crs;
- Functor m_functor;
- counts_type m_counts;
- struct Count {};
- __device__ inline void operator()(Count, size_type i) const {
- m_counts(i) = m_functor(i, nullptr);
- }
- struct Fill {};
- __device__ inline void operator()(Fill, size_type i) const {
- auto j = m_crs.row_map(i);
- /* we don't want to access entries(entries.size()), even if its just to get
- its address and never use it. this can happen when row (i) is empty and
- all rows after it are also empty. we could compare to row_map(i + 1), but
- that is a read from global memory, whereas dimension_0() should be part
- of the View in registers (or constant memory) */
- data_type* fill = (j == static_cast<decltype(j)>(m_crs.entries.extent(0)))
- ? nullptr
- : (&(m_crs.entries(j)));
- m_functor(i, fill);
- }
- CountAndFillBase(CrsType& crs, Functor const& f) : m_crs(crs), m_functor(f) {}
-};
-#endif
-
template <class CrsType, class Functor>
struct CountAndFill : public CountAndFillBase<CrsType, Functor> {
using base_type = CountAndFillBase<CrsType, Functor>;
closure.execute();
}
auto nentries = Kokkos::get_crs_row_map_from_counts(this->m_crs.row_map,
- this->m_counts);
+ this->m_counts);
this->m_counts = counts_type();
this->m_crs.entries = entries_type("entries", nentries);
{
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_DETECTION_IDIOM_HPP
#define KOKKOS_DETECTION_IDIOM_HPP
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DETECTIONIDIOM
#endif
-#include <impl/Kokkos_Utilities.hpp> // void_t
+#include <Kokkos_Macros.hpp> // FIXME doesn't actually need it if it wasn't
+ // for the header self-containment test
+
#include <type_traits>
// NOTE This header implements the detection idiom from Version 2 of the C++
// specialization recognizes and handles only types supporting Op
template <class Default, template <class...> class Op, class... Args>
-struct detector<Default, void_t<Op<Args...>>, Op, Args...> {
+struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> {
using value_t = std::true_type;
using type = Op<Args...>;
};
} // namespace Impl
struct nonesuch : private Impl::nonesuch_base {
- ~nonesuch() = delete;
- nonesuch(nonesuch const&) = delete;
+ ~nonesuch() = delete;
+ nonesuch(nonesuch const&) = delete;
void operator=(nonesuch const&) = delete;
};
using is_detected_convertible =
std::is_convertible<detected_t<Op, Args...>, To>;
-#ifdef KOKKOS_ENABLE_CXX17
template <template <class...> class Op, class... Args>
inline constexpr bool is_detected_v = is_detected<Op, Args...>::value;
template <class Expected, template <class...> class Op, class... Args>
inline constexpr bool is_detected_exact_v =
- is_detected_exact<Expected, Op, Args...>::value;
+ is_detected_exact<Expected, Op, Args...>::value; // NOLINT
template <class Expected, template <class...> class Op, class... Args>
inline constexpr bool is_detected_convertible_v =
is_detected_convertible<Expected, Op, Args...>::value;
-#endif
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_EXECPOLICY_HPP
#define KOKKOS_EXECPOLICY_HPP
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_AnalyzePolicy.hpp>
#include <Kokkos_Concepts.hpp>
+#include <Kokkos_TypeInfo.hpp>
+#ifndef KOKKOS_ENABLE_IMPL_TYPEINFO
#include <typeinfo>
+#endif
+#include <limits>
//----------------------------------------------------------------------------
struct ChunkSize {
int value;
+ explicit ChunkSize(int value_) : value(value_) {}
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ template <typename T = void>
+ KOKKOS_DEPRECATED_WITH_COMMENT("ChunkSize should be constructed explicitly.")
ChunkSize(int value_) : value(value_) {}
+#endif
};
/** \brief Execution policy for work over a range of an integral type.
m_granularity_mask(0) {}
/** \brief Total range */
+ template <typename IndexType1, typename IndexType2,
+ std::enable_if_t<(std::is_convertible_v<IndexType1, member_type> &&
+ std::is_convertible_v<IndexType2, member_type>),
+ bool> = false>
+ inline RangePolicy(const IndexType1 work_begin, const IndexType2 work_end)
+ : RangePolicy(typename traits::execution_space(), work_begin, work_end) {}
+
+ /** \brief Total range */
+ template <typename IndexType1, typename IndexType2,
+ std::enable_if_t<(std::is_convertible_v<IndexType1, member_type> &&
+ std::is_convertible_v<IndexType2, member_type>),
+ bool> = false>
inline RangePolicy(const typename traits::execution_space& work_space,
- const member_type work_begin, const member_type work_end)
+ const IndexType1 work_begin, const IndexType2 work_end)
: m_space(work_space),
- m_begin(work_begin < work_end ? work_begin : 0),
- m_end(work_begin < work_end ? work_end : 0),
+ m_begin(work_begin),
+ m_end(work_end),
m_granularity(0),
m_granularity_mask(0) {
+ check_conversion_safety(work_begin);
+ check_conversion_safety(work_end);
+ check_bounds_validity();
set_auto_chunk_size();
}
- /** \brief Total range */
- inline RangePolicy(const member_type work_begin, const member_type work_end)
- : RangePolicy(typename traits::execution_space(), work_begin, work_end) {
- set_auto_chunk_size();
- }
-
- /** \brief Total range */
- template <class... Args>
- inline RangePolicy(const typename traits::execution_space& work_space,
- const member_type work_begin, const member_type work_end,
- Args... args)
+ template <typename IndexType1, typename IndexType2,
+ std::enable_if_t<(std::is_convertible_v<IndexType1, member_type> &&
+ std::is_convertible_v<IndexType2, member_type>),
+ bool> = false>
+ RangePolicy(const typename traits::execution_space& work_space,
+ const IndexType1 work_begin, const IndexType2 work_end,
+ const ChunkSize chunk_size)
: m_space(work_space),
- m_begin(work_begin < work_end ? work_begin : 0),
- m_end(work_begin < work_end ? work_end : 0),
+ m_begin(work_begin),
+ m_end(work_end),
m_granularity(0),
m_granularity_mask(0) {
- set_auto_chunk_size();
- set(args...);
+ check_conversion_safety(work_begin);
+ check_conversion_safety(work_end);
+ check_bounds_validity();
+ set_chunk_size(chunk_size.value);
}
/** \brief Total range */
- template <class... Args>
- inline RangePolicy(const member_type work_begin, const member_type work_end,
- Args... args)
- : RangePolicy(typename traits::execution_space(), work_begin, work_end) {
- set_auto_chunk_size();
- set(args...);
- }
-
- private:
- inline void set() {}
+ template <typename IndexType1, typename IndexType2, typename... Args,
+ std::enable_if_t<(std::is_convertible_v<IndexType1, member_type> &&
+ std::is_convertible_v<IndexType2, member_type>),
+ bool> = false>
+ RangePolicy(const IndexType1 work_begin, const IndexType2 work_end,
+ const ChunkSize chunk_size)
+ : RangePolicy(typename traits::execution_space(), work_begin, work_end,
+ chunk_size) {}
public:
- template <class... Args>
- inline void set(Args...) {
- static_assert(
- 0 == sizeof...(Args),
- "Kokkos::RangePolicy: unhandled constructor arguments encountered.");
- }
-
- template <class... Args>
- inline void set(const ChunkSize& chunksize, Args... args) {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED_WITH_COMMENT("Use set_chunk_size instead")
+ inline void set(ChunkSize chunksize) {
m_granularity = chunksize.value;
m_granularity_mask = m_granularity - 1;
- set(args...);
}
+#endif
public:
/** \brief return chunk_size */
private:
/** \brief finalize chunk_size if it was set to AUTO*/
inline void set_auto_chunk_size() {
- int64_t concurrency =
- static_cast<int64_t>(traits::execution_space::concurrency());
+#ifdef KOKKOS_ENABLE_SYCL
+ if (std::is_same_v<typename traits::execution_space, Kokkos::SYCL>) {
+ // chunk_size <=1 lets the compiler choose the workgroup size when
+ // launching kernels
+ m_granularity = 1;
+ m_granularity_mask = 0;
+ return;
+ }
+#endif
+ auto concurrency = static_cast<int64_t>(m_space.concurrency());
if (concurrency == 0) concurrency = 1;
if (m_granularity > 0) {
m_granularity_mask = m_granularity - 1;
}
+ void check_bounds_validity() {
+ if (m_end < m_begin) {
+ std::string msg = "Kokkos::RangePolicy bounds error: The lower bound (" +
+ std::to_string(m_begin) +
+ ") is greater than the upper bound (" +
+ std::to_string(m_end) + ").\n";
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ Kokkos::abort(msg.c_str());
+#endif
+ m_begin = 0;
+ m_end = 0;
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+ Kokkos::Impl::log_warning(msg);
+#endif
+ }
+ }
+
+ // To be replaced with std::in_range (c++20)
+ template <typename IndexType>
+ static void check_conversion_safety([[maybe_unused]] const IndexType bound) {
+ // Checking that the round-trip conversion preserves input index value
+ if constexpr (std::is_convertible_v<member_type, IndexType>) {
+#if !defined(KOKKOS_ENABLE_DEPRECATED_CODE_4) || \
+ defined(KOKKOS_ENABLE_DEPRECATION_WARNINGS)
+
+ std::string msg =
+ "Kokkos::RangePolicy bound type error: an unsafe implicit conversion "
+ "is performed on a bound (" +
+ std::to_string(bound) +
+ "), which may "
+ "not preserve its original value.\n";
+ bool warn = false;
+
+ if constexpr (std::is_arithmetic_v<member_type> &&
+ (std::is_signed_v<IndexType> !=
+ std::is_signed_v<member_type>)) {
+ // check signed to unsigned
+ if constexpr (std::is_signed_v<IndexType>)
+ warn |= (bound < static_cast<IndexType>(
+ std::numeric_limits<member_type>::min()));
+
+ // check unsigned to signed
+ if constexpr (std::is_signed_v<member_type>)
+ warn |= (bound > static_cast<IndexType>(
+ std::numeric_limits<member_type>::max()));
+ }
+
+ // check narrowing
+ warn |=
+ (static_cast<IndexType>(static_cast<member_type>(bound)) != bound);
+
+ if (warn) {
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ Kokkos::abort(msg.c_str());
+#endif
+
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+ Kokkos::Impl::log_warning(msg);
+#endif
+ }
+#endif
+ }
+ }
+
public:
/** \brief Subrange for a partition's rank and size.
*
};
};
+RangePolicy() -> RangePolicy<>;
+
+RangePolicy(int64_t, int64_t) -> RangePolicy<>;
+RangePolicy(int64_t, int64_t, ChunkSize const&) -> RangePolicy<>;
+
+RangePolicy(DefaultExecutionSpace const&, int64_t, int64_t) -> RangePolicy<>;
+RangePolicy(DefaultExecutionSpace const&, int64_t, int64_t, ChunkSize const&)
+ -> RangePolicy<>;
+
+template <typename ES, typename = std::enable_if_t<is_execution_space_v<ES>>>
+RangePolicy(ES const&, int64_t, int64_t) -> RangePolicy<ES>;
+
+template <typename ES, typename = std::enable_if_t<is_execution_space_v<ES>>>
+RangePolicy(ES const&, int64_t, int64_t, ChunkSize const&) -> RangePolicy<ES>;
+
} // namespace Kokkos
//----------------------------------------------------------------------------
template <class iType, class... Args>
struct ExtractVectorLength {
static inline iType value(
- std::enable_if_t<std::is_integral<iType>::value, iType> val, Args...) {
+ std::enable_if_t<std::is_integral_v<iType>, iType> val, Args...) {
return val;
}
- static inline std::enable_if_t<!std::is_integral<iType>::value, int> value(
- std::enable_if_t<!std::is_integral<iType>::value, iType>, Args...) {
+ static inline std::enable_if_t<!std::is_integral_v<iType>, int> value(
+ std::enable_if_t<!std::is_integral_v<iType>, iType>, Args...) {
return 1;
}
};
template <class iType, class... Args>
-inline std::enable_if_t<std::is_integral<iType>::value, iType>
-extract_vector_length(iType val, Args...) {
+inline std::enable_if_t<std::is_integral_v<iType>, iType> extract_vector_length(
+ iType val, Args...) {
return val;
}
template <class iType, class... Args>
-inline std::enable_if_t<!std::is_integral<iType>::value, int>
-extract_vector_length(iType, Args...) {
+inline std::enable_if_t<!std::is_integral_v<iType>, int> extract_vector_length(
+ iType, Args...) {
return 1;
}
}
};
-// Throws a runtime exception if level is not `0` or `1`
+// Causes abnormal program termination if level is not `0` or `1`
void team_policy_check_valid_storage_level_argument(int level);
/** \brief Execution policy for parallel work over a league of teams of
}
};
+// Execution space not provided deduces to TeamPolicy<>
+
+TeamPolicy() -> TeamPolicy<>;
+
+TeamPolicy(int, int) -> TeamPolicy<>;
+TeamPolicy(int, int, int) -> TeamPolicy<>;
+TeamPolicy(int, Kokkos::AUTO_t const&) -> TeamPolicy<>;
+TeamPolicy(int, Kokkos::AUTO_t const&, int) -> TeamPolicy<>;
+TeamPolicy(int, Kokkos::AUTO_t const&, Kokkos::AUTO_t const&) -> TeamPolicy<>;
+TeamPolicy(int, int, Kokkos::AUTO_t const&) -> TeamPolicy<>;
+
+// DefaultExecutionSpace deduces to TeamPolicy<>
+
+TeamPolicy(DefaultExecutionSpace const&, int, int) -> TeamPolicy<>;
+TeamPolicy(DefaultExecutionSpace const&, int, int, int) -> TeamPolicy<>;
+TeamPolicy(DefaultExecutionSpace const&, int, Kokkos::AUTO_t const&)
+ -> TeamPolicy<>;
+TeamPolicy(DefaultExecutionSpace const&, int, Kokkos::AUTO_t const&, int)
+ -> TeamPolicy<>;
+TeamPolicy(DefaultExecutionSpace const&, int, Kokkos::AUTO_t const&,
+ Kokkos::AUTO_t const&) -> TeamPolicy<>;
+TeamPolicy(DefaultExecutionSpace const&, int, int, Kokkos::AUTO_t const&)
+ -> TeamPolicy<>;
+
+// ES != DefaultExecutionSpace deduces to TeamPolicy<ES>
+
+template <typename ES,
+ typename = std::enable_if_t<Kokkos::is_execution_space_v<ES>>>
+TeamPolicy(ES const&, int, int) -> TeamPolicy<ES>;
+
+template <typename ES,
+ typename = std::enable_if_t<Kokkos::is_execution_space_v<ES>>>
+TeamPolicy(ES const&, int, int, int) -> TeamPolicy<ES>;
+
+template <typename ES,
+ typename = std::enable_if_t<Kokkos::is_execution_space_v<ES>>>
+TeamPolicy(ES const&, int, Kokkos::AUTO_t const&) -> TeamPolicy<ES>;
+
+template <typename ES,
+ typename = std::enable_if_t<Kokkos::is_execution_space_v<ES>>>
+TeamPolicy(ES const&, int, Kokkos::AUTO_t const&, int) -> TeamPolicy<ES>;
+
+template <typename ES,
+ typename = std::enable_if_t<Kokkos::is_execution_space_v<ES>>>
+TeamPolicy(ES const&, int, Kokkos::AUTO_t const&, Kokkos::AUTO_t const&)
+ -> TeamPolicy<ES>;
+
+template <typename ES,
+ typename = std::enable_if_t<Kokkos::is_execution_space_v<ES>>>
+TeamPolicy(ES const&, int, int, Kokkos::AUTO_t const&) -> TeamPolicy<ES>;
+
namespace Impl {
template <typename iType, class TeamMemberType>
const index_type& count) noexcept
: start(static_cast<index_type>(0)), end(count) {}
- KOKKOS_INLINE_FUNCTION
- constexpr ThreadVectorRangeBoundariesStruct(const index_type& count) noexcept
- : start(static_cast<index_type>(0)), end(count) {}
-
KOKKOS_INLINE_FUNCTION
constexpr ThreadVectorRangeBoundariesStruct(
const TeamMemberType, const index_type& arg_begin,
const index_type& arg_end) noexcept
: start(static_cast<index_type>(arg_begin)), end(arg_end) {}
-
- KOKKOS_INLINE_FUNCTION
- constexpr ThreadVectorRangeBoundariesStruct(
- const index_type& arg_begin, const index_type& arg_end) noexcept
- : start(static_cast<index_type>(arg_begin)), end(arg_end) {}
};
template <class TeamMemberType>
namespace Impl {
+enum class TeamMDRangeLastNestLevel : bool { NotLastNestLevel, LastNestLevel };
+enum class TeamMDRangeParThread : bool { NotParThread, ParThread };
+enum class TeamMDRangeParVector : bool { NotParVector, ParVector };
+enum class TeamMDRangeThreadAndVector : bool { NotBoth, Both };
+
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct HostBasedNestLevel;
+
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct AcceleratorBasedNestLevel;
+
+// ThreadAndVectorNestLevel determines on which nested level parallelization
+// happens.
+// - Rank is Kokkos::Rank<TotalNestLevel, Iter>
+// - TotalNestLevel is the total number of loop nests
+// - Iter is whether to go forward or backward through ranks (i.e. the
+// iteration order for MDRangePolicy)
+// - ThreadAndVector determines whether both vector and thread parallelism is
+// in use
+template <typename Rank, typename ExecSpace,
+ TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel;
+
+struct NoReductionTag {};
+
+template <typename Rank, typename TeamMDPolicy, typename Lambda,
+ typename ReductionValueType>
+KOKKOS_INLINE_FUNCTION void md_parallel_impl(TeamMDPolicy const& policy,
+ Lambda const& lambda,
+ ReductionValueType&& val);
+} // namespace Impl
+
+template <typename Rank, typename TeamHandle>
+struct TeamThreadMDRange;
+
+template <unsigned N, Iterate OuterDir, Iterate InnerDir, typename TeamHandle>
+struct TeamThreadMDRange<Rank<N, OuterDir, InnerDir>, TeamHandle> {
+ using NestLevelType = int;
+ using BoundaryType = int;
+ using TeamHandleType = TeamHandle;
+ using ExecutionSpace = typename TeamHandleType::execution_space;
+ using ArrayLayout = typename ExecutionSpace::array_layout;
+
+ static constexpr NestLevelType total_nest_level =
+ Rank<N, OuterDir, InnerDir>::rank;
+ static constexpr Iterate iter = OuterDir;
+ static constexpr auto par_thread = Impl::TeamMDRangeParThread::ParThread;
+ static constexpr auto par_vector = Impl::TeamMDRangeParVector::NotParVector;
+
+ static constexpr Iterate direction =
+ OuterDir == Iterate::Default ? Impl::layout_iterate_type_selector<
+ ArrayLayout>::outer_iteration_pattern
+ : iter;
+
+ template <class... Args>
+ KOKKOS_FUNCTION TeamThreadMDRange(TeamHandleType const& team_, Args&&... args)
+ : team(team_), boundaries{static_cast<BoundaryType>(args)...} {
+ static_assert(sizeof...(Args) == total_nest_level);
+ }
+
+ TeamHandleType const& team;
+ BoundaryType boundaries[total_nest_level];
+};
+
+template <typename TeamHandle, typename... Args>
+KOKKOS_DEDUCTION_GUIDE TeamThreadMDRange(TeamHandle const&, Args&&...)
+ -> TeamThreadMDRange<Rank<sizeof...(Args), Iterate::Default>, TeamHandle>;
+
+template <typename Rank, typename TeamHandle>
+struct ThreadVectorMDRange;
+
+template <unsigned N, Iterate OuterDir, Iterate InnerDir, typename TeamHandle>
+struct ThreadVectorMDRange<Rank<N, OuterDir, InnerDir>, TeamHandle> {
+ using NestLevelType = int;
+ using BoundaryType = int;
+ using TeamHandleType = TeamHandle;
+ using ExecutionSpace = typename TeamHandleType::execution_space;
+ using ArrayLayout = typename ExecutionSpace::array_layout;
+
+ static constexpr NestLevelType total_nest_level =
+ Rank<N, OuterDir, InnerDir>::rank;
+ static constexpr Iterate iter = OuterDir;
+ static constexpr auto par_thread = Impl::TeamMDRangeParThread::NotParThread;
+ static constexpr auto par_vector = Impl::TeamMDRangeParVector::ParVector;
+
+ static constexpr Iterate direction =
+ OuterDir == Iterate::Default ? Impl::layout_iterate_type_selector<
+ ArrayLayout>::outer_iteration_pattern
+ : iter;
+
+ template <class... Args>
+ KOKKOS_INLINE_FUNCTION ThreadVectorMDRange(TeamHandleType const& team_,
+ Args&&... args)
+ : team(team_), boundaries{static_cast<BoundaryType>(args)...} {
+ static_assert(sizeof...(Args) == total_nest_level);
+ }
+
+ TeamHandleType const& team;
+ BoundaryType boundaries[total_nest_level];
+};
+
+template <typename TeamHandle, typename... Args>
+KOKKOS_DEDUCTION_GUIDE ThreadVectorMDRange(TeamHandle const&, Args&&...)
+ -> ThreadVectorMDRange<Rank<sizeof...(Args), Iterate::Default>, TeamHandle>;
+
+template <typename Rank, typename TeamHandle>
+struct TeamVectorMDRange;
+
+template <unsigned N, Iterate OuterDir, Iterate InnerDir, typename TeamHandle>
+struct TeamVectorMDRange<Rank<N, OuterDir, InnerDir>, TeamHandle> {
+ using NestLevelType = int;
+ using BoundaryType = int;
+ using TeamHandleType = TeamHandle;
+ using ExecutionSpace = typename TeamHandleType::execution_space;
+ using ArrayLayout = typename ExecutionSpace::array_layout;
+
+ static constexpr NestLevelType total_nest_level =
+ Rank<N, OuterDir, InnerDir>::rank;
+ static constexpr Iterate iter = OuterDir;
+ static constexpr auto par_thread = Impl::TeamMDRangeParThread::ParThread;
+ static constexpr auto par_vector = Impl::TeamMDRangeParVector::ParVector;
+
+ static constexpr Iterate direction =
+ iter == Iterate::Default ? Impl::layout_iterate_type_selector<
+ ArrayLayout>::outer_iteration_pattern
+ : iter;
+
+ template <class... Args>
+ KOKKOS_INLINE_FUNCTION TeamVectorMDRange(TeamHandleType const& team_,
+ Args&&... args)
+ : team(team_), boundaries{static_cast<BoundaryType>(args)...} {
+ static_assert(sizeof...(Args) == total_nest_level);
+ }
+
+ TeamHandleType const& team;
+ BoundaryType boundaries[total_nest_level];
+};
+
+template <typename TeamHandle, typename... Args>
+KOKKOS_DEDUCTION_GUIDE TeamVectorMDRange(TeamHandle const&, Args&&...)
+ -> TeamVectorMDRange<Rank<sizeof...(Args), Iterate::Default>, TeamHandle>;
+
+template <typename Rank, typename TeamHandle, typename Lambda,
+ typename ReducerValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ TeamThreadMDRange<Rank, TeamHandle> const& policy, Lambda const& lambda,
+ ReducerValueType& val) {
+ static_assert(/*!Kokkos::is_view_v<ReducerValueType> &&*/
+ !std::is_array_v<ReducerValueType> &&
+ !std::is_pointer_v<ReducerValueType> &&
+ !Kokkos::is_reducer_v<ReducerValueType>,
+ "Only scalar return types are allowed!");
+
+ val = ReducerValueType{};
+ Impl::md_parallel_impl<Rank>(policy, lambda, val);
+ policy.team.team_reduce(
+ Kokkos::Sum<ReducerValueType, typename TeamHandle::execution_space>{val});
+}
+
+template <typename Rank, typename TeamHandle, typename Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ TeamThreadMDRange<Rank, TeamHandle> const& policy, Lambda const& lambda) {
+ Impl::md_parallel_impl<Rank>(policy, lambda, Impl::NoReductionTag());
+}
+
+template <typename Rank, typename TeamHandle, typename Lambda,
+ typename ReducerValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ ThreadVectorMDRange<Rank, TeamHandle> const& policy, Lambda const& lambda,
+ ReducerValueType& val) {
+ static_assert(/*!Kokkos::is_view_v<ReducerValueType> &&*/
+ !std::is_array_v<ReducerValueType> &&
+ !std::is_pointer_v<ReducerValueType> &&
+ !Kokkos::is_reducer_v<ReducerValueType>,
+ "Only a scalar return types are allowed!");
+
+ val = ReducerValueType{};
+ Impl::md_parallel_impl<Rank>(policy, lambda, val);
+ if constexpr (false
+#ifdef KOKKOS_ENABLE_CUDA
+ || std::is_same_v<typename TeamHandle::execution_space,
+ Kokkos::Cuda>
+#elif defined(KOKKOS_ENABLE_HIP)
+ || std::is_same_v<typename TeamHandle::execution_space,
+ Kokkos::HIP>
+#elif defined(KOKKOS_ENABLE_SYCL)
+ || std::is_same_v<typename TeamHandle::execution_space,
+ Kokkos::SYCL>
+#endif
+ )
+ policy.team.vector_reduce(
+ Kokkos::Sum<ReducerValueType, typename TeamHandle::execution_space>{
+ val});
+}
+
+template <typename Rank, typename TeamHandle, typename Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ ThreadVectorMDRange<Rank, TeamHandle> const& policy, Lambda const& lambda) {
+ Impl::md_parallel_impl<Rank>(policy, lambda, Impl::NoReductionTag());
+}
+
+template <typename Rank, typename TeamHandle, typename Lambda,
+ typename ReducerValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ TeamVectorMDRange<Rank, TeamHandle> const& policy, Lambda const& lambda,
+ ReducerValueType& val) {
+ static_assert(/*!Kokkos::is_view_v<ReducerValueType> &&*/
+ !std::is_array_v<ReducerValueType> &&
+ !std::is_pointer_v<ReducerValueType> &&
+ !Kokkos::is_reducer_v<ReducerValueType>,
+ "Only a scalar return types are allowed!");
+
+ val = ReducerValueType{};
+ Impl::md_parallel_impl<Rank>(policy, lambda, val);
+ if constexpr (false
+#ifdef KOKKOS_ENABLE_CUDA
+ || std::is_same_v<typename TeamHandle::execution_space,
+ Kokkos::Cuda>
+#elif defined(KOKKOS_ENABLE_HIP)
+ || std::is_same_v<typename TeamHandle::execution_space,
+ Kokkos::HIP>
+#elif defined(KOKKOS_ENABLE_SYCL)
+ || std::is_same_v<typename TeamHandle::execution_space,
+ Kokkos::SYCL>
+#endif
+ )
+ policy.team.vector_reduce(
+ Kokkos::Sum<ReducerValueType, typename TeamHandle::execution_space>{
+ val});
+ policy.team.team_reduce(
+ Kokkos::Sum<ReducerValueType, typename TeamHandle::execution_space>{val});
+}
+
+template <typename Rank, typename TeamHandle, typename Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ TeamVectorMDRange<Rank, TeamHandle> const& policy, Lambda const& lambda) {
+ Impl::md_parallel_impl<Rank>(policy, lambda, Impl::NoReductionTag());
+}
+
+namespace Impl {
+
template <typename FunctorType, typename TagType,
- bool HasTag = !std::is_void<TagType>::value>
+ bool HasTag = !std::is_void_v<TagType>>
struct ParallelConstructName;
template <typename FunctorType, typename TagType>
struct ParallelConstructName<FunctorType, TagType, true> {
ParallelConstructName(std::string const& label) : label_ref(label) {
if (label.empty()) {
+#ifdef KOKKOS_ENABLE_IMPL_TYPEINFO
+ default_name =
+ std::string(TypeInfo<std::remove_const_t<FunctorType>>::name()) +
+ "/" + std::string(TypeInfo<TagType>::name());
+#else
default_name = std::string(typeid(FunctorType).name()) + "/" +
typeid(TagType).name();
+#endif
}
}
std::string const& get() {
struct ParallelConstructName<FunctorType, TagType, false> {
ParallelConstructName(std::string const& label) : label_ref(label) {
if (label.empty()) {
- default_name = std::string(typeid(FunctorType).name());
+#ifdef KOKKOS_ENABLE_IMPL_TYPEINFO
+ default_name = TypeInfo<std::remove_const_t<FunctorType>>::name();
+#else
+ default_name = typeid(FunctorType).name();
+#endif
}
}
std::string const& get() {
template <class... Args>
struct PatternImplSpecializationFromTag<Kokkos::ParallelForTag, Args...>
- : identity<ParallelFor<Args...>> {};
+ : type_identity<ParallelFor<Args...>> {};
template <class... Args>
struct PatternImplSpecializationFromTag<Kokkos::ParallelReduceTag, Args...>
- : identity<ParallelReduce<Args...>> {};
+ : type_identity<ParallelReduce<Args...>> {};
template <class... Args>
struct PatternImplSpecializationFromTag<Kokkos::ParallelScanTag, Args...>
- : identity<ParallelScan<Args...>> {};
+ : type_identity<ParallelScan<Args...>> {};
template <class PatternImpl>
struct PatternTagFromImplSpecialization;
template <class... Args>
struct PatternTagFromImplSpecialization<ParallelFor<Args...>>
- : identity<ParallelForTag> {};
+ : type_identity<ParallelForTag> {};
template <class... Args>
struct PatternTagFromImplSpecialization<ParallelReduce<Args...>>
- : identity<ParallelReduceTag> {};
+ : type_identity<ParallelReduceTag> {};
template <class... Args>
struct PatternTagFromImplSpecialization<ParallelScan<Args...>>
- : identity<ParallelScanTag> {};
+ : type_identity<ParallelScanTag> {};
} // end namespace Impl
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_KOKKOS_EXTENTS_HPP
+#define KOKKOS_KOKKOS_EXTENTS_HPP
+
+#include <cstddef>
+#include <type_traits>
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_IMPL_MDSPAN
+#include <mdspan/mdspan.hpp>
+#else
+#include <limits>
+#endif
+
+namespace Kokkos {
+
+#ifndef KOKKOS_ENABLE_IMPL_MDSPAN
+constexpr size_t dynamic_extent = std::numeric_limits<size_t>::max();
+#endif
+
+namespace Experimental {
+
+template <size_t... ExtentSpecs>
+struct Extents {
+ /* TODO @enhancement flesh this out more */
+};
+
+template <class Exts, size_t NewExtent>
+struct PrependExtent;
+
+template <size_t... Exts, size_t NewExtent>
+struct PrependExtent<Extents<Exts...>, NewExtent> {
+ using type = Extents<NewExtent, Exts...>;
+};
+
+template <class Exts, size_t NewExtent>
+struct AppendExtent;
+
+template <size_t... Exts, size_t NewExtent>
+struct AppendExtent<Extents<Exts...>, NewExtent> {
+ using type = Extents<Exts..., NewExtent>;
+};
+} // end namespace Experimental
+
+namespace Impl {
+
+namespace _parse_view_extents_impl {
+
+template <class T>
+struct _all_remaining_extents_dynamic : std::true_type {};
+
+template <class T>
+struct _all_remaining_extents_dynamic<T*> : _all_remaining_extents_dynamic<T> {
+};
+
+template <class T, unsigned N>
+struct _all_remaining_extents_dynamic<T[N]> : std::false_type {};
+
+template <class T, class Result, class = void>
+struct _parse_impl {
+ using type = Result;
+};
+
+// We have to treat the case of int**[x] specially, since it *doesn't* go
+// backwards
+template <class T, size_t... ExtentSpec>
+struct _parse_impl<T*, Kokkos::Experimental::Extents<ExtentSpec...>,
+ std::enable_if_t<_all_remaining_extents_dynamic<T>::value>>
+ : _parse_impl<T, Kokkos::Experimental::Extents<Kokkos::dynamic_extent,
+ ExtentSpec...>> {};
+
+// int*(*[x])[y] should still work also (meaning int[][x][][y])
+template <class T, size_t... ExtentSpec>
+struct _parse_impl<
+ T*, Kokkos::Experimental::Extents<ExtentSpec...>,
+ std::enable_if_t<!_all_remaining_extents_dynamic<T>::value>> {
+ using _next = Kokkos::Experimental::AppendExtent<
+ typename _parse_impl<T, Kokkos::Experimental::Extents<ExtentSpec...>,
+ void>::type,
+ Kokkos::dynamic_extent>;
+ using type = typename _next::type;
+};
+
+template <class T, size_t... ExtentSpec, unsigned N>
+struct _parse_impl<T[N], Kokkos::Experimental::Extents<ExtentSpec...>, void>
+ : _parse_impl<T,
+ Kokkos::Experimental::Extents<ExtentSpec...,
+ size_t(N)> // TODO @pedantic
+ // this could be a
+ // narrowing cast
+ > {};
+
+} // end namespace _parse_view_extents_impl
+
+template <class DataType>
+struct ParseViewExtents {
+ using type = typename _parse_view_extents_impl ::_parse_impl<
+ DataType, Kokkos::Experimental::Extents<>>::type;
+};
+
+template <class ValueType, size_t Ext>
+struct ApplyExtent {
+ using type = ValueType[Ext];
+};
+
+template <class ValueType>
+struct ApplyExtent<ValueType, Kokkos::dynamic_extent> {
+ using type = ValueType*;
+};
+
+template <class ValueType, unsigned N, size_t Ext>
+struct ApplyExtent<ValueType[N], Ext> {
+ using type = typename ApplyExtent<ValueType, Ext>::type[N];
+};
+
+template <class ValueType, size_t Ext>
+struct ApplyExtent<ValueType*, Ext> {
+ using type = ValueType* [Ext];
+};
+
+template <class ValueType>
+struct ApplyExtent<ValueType*, dynamic_extent> {
+ using type = typename ApplyExtent<ValueType, dynamic_extent>::type*;
+};
+
+template <class ValueType, unsigned N>
+struct ApplyExtent<ValueType[N], dynamic_extent> {
+ using type = typename ApplyExtent<ValueType, dynamic_extent>::type[N];
+};
+
+} // end namespace Impl
+
+} // end namespace Kokkos
+
+#endif // KOKKOS_KOKKOS_EXTENTS_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
#endif
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
#endif
+
#ifndef KOKKOS_FUTURE_HPP
#define KOKKOS_FUTURE_HPP
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
// For now, hack this in as a partial specialization
// TODO @tasking @cleanup Make this the "normal" class template and make the old
// code the specialization
template <typename ValueType, typename ExecutionSpace, typename QueueType>
-class BasicFuture<ValueType, SimpleTaskScheduler<ExecutionSpace, QueueType>> {
+class KOKKOS_DEPRECATED
+ BasicFuture<ValueType, SimpleTaskScheduler<ExecutionSpace, QueueType>> {
public:
using value_type = ValueType;
using execution_space = ExecutionSpace;
////////////////////////////////////////////////////////////////////////////////
template <typename ValueType, typename Scheduler>
-class BasicFuture {
+class KOKKOS_DEPRECATED BasicFuture {
private:
template <typename, typename>
friend class BasicTaskScheduler;
// Is a Future with the given execution space
template <typename, typename ExecSpace = void>
-struct is_future : public std::false_type {};
+struct KOKKOS_DEPRECATED is_future : public std::false_type {};
template <typename ValueType, typename Scheduler, typename ExecSpace>
-struct is_future<BasicFuture<ValueType, Scheduler>, ExecSpace>
- : std::integral_constant<
- bool,
- std::is_same<ExecSpace, typename Scheduler::execution_space>::value ||
- std::is_void<ExecSpace>::value> {};
+struct KOKKOS_DEPRECATED is_future<BasicFuture<ValueType, Scheduler>, ExecSpace>
+ : std::bool_constant<
+ std::is_same_v<ExecSpace, typename Scheduler::execution_space> ||
+ std::is_void_v<ExecSpace>> {};
////////////////////////////////////////////////////////////////////////////////
// END OLD CODE
private:
enum { Arg1_is_space = Kokkos::is_space<Arg1>::value };
enum { Arg2_is_space = Kokkos::is_space<Arg2>::value };
- enum { Arg1_is_value = !Arg1_is_space && !std::is_void<Arg1>::value };
- enum { Arg2_is_value = !Arg2_is_space && !std::is_void<Arg2>::value };
+ enum { Arg1_is_value = !Arg1_is_space && !std::is_void_v<Arg1> };
+ enum { Arg2_is_value = !Arg2_is_space && !std::is_void_v<Arg2> };
static_assert(!(Arg1_is_space && Arg2_is_space),
"Future cannot be given two spaces");
*
*/
template <class Arg1 = void, class Arg2 = void>
-using Future = typename Impl::ResolveFutureArgOrder<Arg1, Arg2>::type;
+using Future KOKKOS_DEPRECATED =
+ typename Impl::ResolveFutureArgOrder<Arg1, Arg2>::type;
} // namespace Kokkos
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_GRAPH_HPP
#define KOKKOS_GRAPH_HPP
// GraphAccess needs to be defined, not just declared
#include <impl/Kokkos_GraphImpl.hpp>
-#include <impl/Kokkos_Utilities.hpp> // fold emulation
-
#include <functional>
#include <memory>
// <editor-fold desc="Graph"> {{{1
template <class ExecutionSpace>
-struct KOKKOS_ATTRIBUTE_NODISCARD Graph {
+struct [[nodiscard]] Graph {
public:
//----------------------------------------------------------------------------
// <editor-fold desc="public member types"> {{{2
return m_impl_ptr->get_execution_space();
}
- void submit() const {
+ void instantiate() {
+ KOKKOS_EXPECTS(bool(m_impl_ptr))
+ (*m_impl_ptr).instantiate();
+ }
+
+ void submit(const execution_space& exec) const {
KOKKOS_EXPECTS(bool(m_impl_ptr))
- (*m_impl_ptr).submit();
+ (*m_impl_ptr).submit(exec);
}
+
+ void submit() const { submit(get_execution_space()); }
+
+ decltype(auto) native_graph();
+
+ decltype(auto) native_graph_exec();
};
// </editor-fold> end Graph }}}1
.lock();
auto node_ptr_impl = graph_ptr_impl->create_aggregate_ptr(arg_pred_refs...);
graph_ptr_impl->add_node(node_ptr_impl);
- KOKKOS_IMPL_FOLD_COMMA_OPERATOR(
- graph_ptr_impl->add_predecessor(node_ptr_impl, arg_pred_refs) /* ... */);
+ (graph_ptr_impl->add_predecessor(node_ptr_impl, arg_pred_refs), ...);
return Kokkos::Impl::GraphAccess::make_graph_node_ref(
std::move(graph_ptr_impl), std::move(node_ptr_impl));
}
// function template injection works.
auto rv = Kokkos::Impl::GraphAccess::construct_graph(std::move(ex));
// Invoke the user's graph construction closure
- ((Closure &&) arg_closure)(Kokkos::Impl::GraphAccess::create_root_ref(rv));
+ ((Closure&&)arg_closure)(Kokkos::Impl::GraphAccess::create_root_ref(rv));
// and given them back the graph
// KOKKOS_ENSURES(rv.m_impl_ptr.use_count() == 1)
return rv;
}
+template <class ExecutionSpace = DefaultExecutionSpace>
+std::enable_if_t<Kokkos::is_execution_space_v<ExecutionSpace>,
+ Graph<ExecutionSpace>>
+create_graph(ExecutionSpace exec = ExecutionSpace{}) {
+ return Kokkos::Impl::GraphAccess::construct_graph(std::move(exec));
+}
+
template <
class ExecutionSpace = DefaultExecutionSpace,
class Closure = Kokkos::Impl::DoNotExplicitlySpecifyThisTemplateParameter>
-Graph<ExecutionSpace> create_graph(Closure&& arg_closure) {
- return create_graph(ExecutionSpace{}, (Closure &&) arg_closure);
+std::enable_if_t<
+ !Kokkos::is_execution_space_v<Kokkos::Impl::remove_cvref_t<Closure>>,
+ Graph<ExecutionSpace>>
+create_graph(Closure&& arg_closure) {
+ return create_graph(ExecutionSpace{}, (Closure&&)arg_closure);
}
// </editor-fold> end create_graph }}}1
//==============================================================================
+template <class ExecutionSpace>
+decltype(auto) Graph<ExecutionSpace>::native_graph() {
+ KOKKOS_EXPECTS(bool(m_impl_ptr));
+#if defined(KOKKOS_ENABLE_CUDA)
+ if constexpr (std::is_same_v<ExecutionSpace, Kokkos::Cuda>) {
+ return m_impl_ptr->cuda_graph();
+ }
+#elif defined(KOKKOS_ENABLE_HIP) && defined(KOKKOS_IMPL_HIP_NATIVE_GRAPH)
+ if constexpr (std::is_same_v<ExecutionSpace, Kokkos::HIP>) {
+ return m_impl_ptr->hip_graph();
+ }
+#elif defined(KOKKOS_ENABLE_SYCL) && defined(SYCL_EXT_ONEAPI_GRAPH)
+ if constexpr (std::is_same_v<ExecutionSpace, Kokkos::SYCL>) {
+ return m_impl_ptr->sycl_graph();
+ }
+#endif
+}
+
+template <class ExecutionSpace>
+decltype(auto) Graph<ExecutionSpace>::native_graph_exec() {
+ KOKKOS_EXPECTS(bool(m_impl_ptr));
+#if defined(KOKKOS_ENABLE_CUDA)
+ if constexpr (std::is_same_v<ExecutionSpace, Kokkos::Cuda>) {
+ return m_impl_ptr->cuda_graph_exec();
+ }
+#elif defined(KOKKOS_ENABLE_HIP) && defined(KOKKOS_IMPL_HIP_NATIVE_GRAPH)
+ if constexpr (std::is_same_v<ExecutionSpace, Kokkos::HIP>) {
+ return m_impl_ptr->hip_graph_exec();
+ }
+#elif defined(KOKKOS_ENABLE_SYCL) && defined(SYCL_EXT_ONEAPI_GRAPH)
+ if constexpr (std::is_same_v<ExecutionSpace, Kokkos::SYCL>) {
+ return m_impl_ptr->sycl_graph_exec();
+ }
+#endif
+}
+
} // end namespace Experimental
} // namespace Kokkos
#include <impl/Kokkos_GraphNodeImpl.hpp>
#include <impl/Kokkos_Default_Graph_Impl.hpp>
#include <Cuda/Kokkos_Cuda_Graph_Impl.hpp>
+#if defined(KOKKOS_ENABLE_HIP)
+// The implementation of hipGraph in ROCm 5.2 is bugged, so we cannot use it.
+#if defined(KOKKOS_IMPL_HIP_NATIVE_GRAPH)
+#include <HIP/Kokkos_HIP_Graph_Impl.hpp>
+#endif
+#endif
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+#include <SYCL/Kokkos_SYCL_Graph_Impl.hpp>
+#endif
#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH
#undef KOKKOS_IMPL_PUBLIC_INCLUDE
#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_KOKKOS_GRAPHNODE_HPP
#define KOKKOS_KOKKOS_GRAPHNODE_HPP
// Note: because of these assertions, instantiating this class template is not
// intended to be SFINAE-safe, so do validation before you instantiate.
-// WORKAROUND Could not get it to compile with IBM XL V16.1.1
-#ifndef KOKKOS_COMPILER_IBM
static_assert(
- std::is_same<Predecessor, TypeErasedTag>::value ||
+ std::is_same_v<Predecessor, TypeErasedTag> ||
Kokkos::Impl::is_specialization_of<Predecessor, GraphNodeRef>::value,
"Invalid predecessor template parameter given to GraphNodeRef");
-#endif
static_assert(
Kokkos::is_execution_space<ExecutionSpace>::value,
"Invalid execution space template parameter given to GraphNodeRef");
- static_assert(std::is_same<Predecessor, TypeErasedTag>::value ||
+ static_assert(std::is_same_v<Predecessor, TypeErasedTag> ||
Kokkos::Impl::is_graph_kernel<Kernel>::value,
"Invalid kernel template parameter given to GraphNodeRef");
typename return_t::node_impl_t>(
m_node_impl->execution_space_instance(),
Kokkos::Impl::_graph_node_kernel_ctor_tag{},
- (NextKernelDeduced &&) arg_kernel,
+ (NextKernelDeduced&&)arg_kernel,
// *this is the predecessor
Kokkos::Impl::_graph_node_predecessor_ctor_tag{}, *this));
// <editor-fold desc="rule of 6 ctors"> {{{3
// Copyable and movable (basically just shared_ptr semantics
- GraphNodeRef() noexcept = default;
- GraphNodeRef(GraphNodeRef const&) = default;
- GraphNodeRef(GraphNodeRef&&) noexcept = default;
- GraphNodeRef& operator=(GraphNodeRef const&) = default;
+ GraphNodeRef() noexcept = default;
+ GraphNodeRef(GraphNodeRef const&) = default;
+ GraphNodeRef(GraphNodeRef&&) noexcept = default;
+ GraphNodeRef& operator=(GraphNodeRef const&) = default;
GraphNodeRef& operator=(GraphNodeRef&&) noexcept = default;
~GraphNodeRef() = default;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// <editor-fold desc="Type-erasing converting ctor and assignment"> {{{3
- template <
- class OtherKernel, class OtherPredecessor,
- std::enable_if_t<
- // Not a copy/move constructor
- !std::is_same<GraphNodeRef, GraphNodeRef<execution_space, OtherKernel,
- OtherPredecessor>>::value &&
- // must be an allowed type erasure of the kernel
- Kokkos::Impl::is_compatible_type_erasure<OtherKernel,
- graph_kernel>::value &&
- // must be an allowed type erasure of the predecessor
- Kokkos::Impl::is_compatible_type_erasure<
- OtherPredecessor, graph_predecessor>::value,
- int> = 0>
+ template <class OtherKernel, class OtherPredecessor,
+ std::enable_if_t<
+ // Not a copy/move constructor
+ !std::is_same_v<GraphNodeRef,
+ GraphNodeRef<execution_space, OtherKernel,
+ OtherPredecessor>> &&
+ // must be an allowed type erasure of the kernel
+ Kokkos::Impl::is_compatible_type_erasure<
+ OtherKernel, graph_kernel>::value &&
+ // must be an allowed type erasure of the predecessor
+ Kokkos::Impl::is_compatible_type_erasure<
+ OtherPredecessor, graph_predecessor>::value,
+ int> = 0>
/* implicit */
GraphNodeRef(
GraphNodeRef<execution_space, OtherKernel, OtherPredecessor> const& other)
//|| policy_t::execution_space_is_defaulted,
"Execution Space mismatch between execution policy and graph");
- auto policy = Experimental::require((Policy &&) arg_policy,
+ auto policy = Experimental::require((Policy&&)arg_policy,
Kokkos::Impl::KernelInGraphProperty{});
using next_policy_t = decltype(policy);
std::decay_t<Functor>,
Kokkos::ParallelForTag>;
return this->_then_kernel(next_kernel_t{std::move(arg_name), policy.space(),
- (Functor &&) functor,
- (Policy &&) policy});
+ (Functor&&)functor,
+ (Policy&&)policy});
}
template <
int> = 0>
auto then_parallel_for(Policy&& policy, Functor&& functor) const {
// needs to static assert constraint: DataParallelFunctor<Functor>
- return this->then_parallel_for("", (Policy &&) policy,
- (Functor &&) functor);
+ return this->then_parallel_for("", (Policy&&)policy, (Functor&&)functor);
}
template <class Functor>
// needs to static assert constraint: DataParallelFunctor<Functor>
return this->then_parallel_for(std::move(name),
Kokkos::RangePolicy<execution_space>(0, n),
- (Functor &&) functor);
+ (Functor&&)functor);
}
template <class Functor>
auto then_parallel_for(std::size_t n, Functor&& functor) const {
// needs to static assert constraint: DataParallelFunctor<Functor>
- return this->then_parallel_for("", n, (Functor &&) functor);
+ return this->then_parallel_for("", n, (Functor&&)functor);
}
// </editor-fold> end then_parallel_for }}}2
Kokkos::is_reducer<return_type_remove_cvref>::value,
"Output argument to parallel reduce in a graph must be a "
"View or a Reducer");
+
+ if constexpr (Kokkos::is_reducer_v<return_type_remove_cvref>) {
+ static_assert(
+ Kokkos::SpaceAccessibility<
+ ExecutionSpace, typename return_type_remove_cvref::
+ result_view_type::memory_space>::accessible,
+ "The reduction target must be accessible by the graph execution "
+ "space.");
+ } else {
+ static_assert(
+ Kokkos::SpaceAccessibility<
+ ExecutionSpace,
+ typename return_type_remove_cvref::memory_space>::accessible,
+ "The reduction target must be accessible by the graph execution "
+ "space.");
+ }
+
using return_type =
// Yes, you do really have to do this...
std::conditional_t<Kokkos::is_reducer<return_type_remove_cvref>::value,
// End of Kokkos reducer disaster
//----------------------------------------
- auto policy = Experimental::require((Policy &&) arg_policy,
+ auto policy = Experimental::require((Policy&&)arg_policy,
Kokkos::Impl::KernelInGraphProperty{});
+ using passed_reducer_type = typename return_value_adapter::reducer_type;
+
+ using reducer_selector = Kokkos::Impl::if_c<
+ std::is_same<InvalidType, passed_reducer_type>::value, functor_type,
+ passed_reducer_type>;
+ using analysis = Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::REDUCE, Policy,
+ typename reducer_selector::type,
+ typename return_value_adapter::value_type>;
+ typename analysis::Reducer final_reducer(
+ reducer_selector::select(functor, return_value));
+ Kokkos::Impl::CombinedFunctorReducer<functor_type,
+ typename analysis::Reducer>
+ functor_reducer(functor, final_reducer);
+
using next_policy_t = decltype(policy);
- using next_kernel_t = Kokkos::Impl::GraphNodeKernelImpl<
- ExecutionSpace, next_policy_t, functor_type, Kokkos::ParallelReduceTag,
- typename return_value_adapter::reducer_type>;
+ using next_kernel_t =
+ Kokkos::Impl::GraphNodeKernelImpl<ExecutionSpace, next_policy_t,
+ decltype(functor_reducer),
+ Kokkos::ParallelReduceTag>;
return this->_then_kernel(next_kernel_t{
std::move(arg_name), graph_impl_ptr->get_execution_space(),
- (Functor &&) functor, (Policy &&) policy,
+ functor_reducer, (Policy&&)policy,
return_value_adapter::return_value(return_value, functor)});
}
int> = 0>
auto then_parallel_reduce(Policy&& arg_policy, Functor&& functor,
ReturnType&& return_value) const {
- return this->then_parallel_reduce("", (Policy &&) arg_policy,
- (Functor &&) functor,
- (ReturnType &&) return_value);
+ return this->then_parallel_reduce("", (Policy&&)arg_policy,
+ (Functor&&)functor,
+ (ReturnType&&)return_value);
}
template <class Functor, class ReturnType>
ReturnType&& return_value) const {
return this->then_parallel_reduce(
std::move(label), Kokkos::RangePolicy<execution_space>{0, idx_end},
- (Functor &&) functor, (ReturnType &&) return_value);
+ (Functor&&)functor, (ReturnType&&)return_value);
}
template <class Functor, class ReturnType>
auto then_parallel_reduce(typename execution_space::size_type idx_end,
Functor&& functor,
ReturnType&& return_value) const {
- return this->then_parallel_reduce("", idx_end, (Functor &&) functor,
- (ReturnType &&) return_value);
+ return this->then_parallel_reduce("", idx_end, (Functor&&)functor,
+ (ReturnType&&)return_value);
}
// </editor-fold> end then_parallel_reduce }}}2
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_KOKKOS_GRAPH_FWD_HPP
+#define KOKKOS_KOKKOS_GRAPH_FWD_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+struct TypeErasedTag {};
+
+template <class ExecutionSpace>
+struct Graph;
+
+template <class ExecutionSpace, class Kernel = TypeErasedTag,
+ class Predecessor = TypeErasedTag>
+class GraphNodeRef;
+
+} // end namespace Experimental
+} // end namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
+#endif
+#endif // KOKKOS_KOKKOS_GRAPH_FWD_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HALF_HPP_
+#define KOKKOS_HALF_HPP_
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
+#endif
+
+#include <impl/Kokkos_Half_FloatingPointWrapper.hpp>
+#include <impl/Kokkos_Half_NumericTraits.hpp>
+#include <impl/Kokkos_Half_MathematicalFunctions.hpp>
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
+#endif
+#endif // KOKKOS_HALF_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_HOSTSPACE_HPP
+#define KOKKOS_HOSTSPACE_HPP
+
+#include <cstring>
+#include <string>
+#include <iosfwd>
+#include <typeinfo>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Concepts.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+#include "impl/Kokkos_HostSpace_deepcopy.hpp"
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+/// \class HostSpace
+/// \brief Memory management for host memory.
+///
+/// HostSpace is a memory space that governs host memory. "Host"
+/// memory means the usual CPU-accessible memory.
+class HostSpace {
+ public:
+ //! Tag this class as a kokkos memory space
+ using memory_space = HostSpace;
+ using size_type = size_t;
+
+ /// \typedef execution_space
+ /// \brief Default execution space for this memory space.
+ ///
+ /// Every memory space has a default execution space. This is
+ /// useful for things like initializing a View (which happens in
+ /// parallel using the View's default execution space).
+ using execution_space = DefaultHostExecutionSpace;
+
+ //! This memory space preferred device_type
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+
+ HostSpace() = default;
+ HostSpace(HostSpace&& rhs) = default;
+ HostSpace(const HostSpace& rhs) = default;
+ HostSpace& operator=(HostSpace&&) = default;
+ HostSpace& operator=(const HostSpace&) = default;
+ ~HostSpace() = default;
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ /**\brief Non-default memory space instance to choose allocation mechansim,
+ * if available */
+
+#if defined(KOKKOS_COMPILER_GNU) && KOKKOS_COMPILER_GNU < 1100
+ // We see deprecation warnings even when not using the deprecated
+ // HostSpace constructor below when using gcc before release 11.
+ enum
+#else
+ enum KOKKOS_DEPRECATED
+#endif
+ AllocationMechanism {
+ STD_MALLOC,
+ POSIX_MEMALIGN,
+ POSIX_MMAP,
+ INTEL_MM_ALLOC
+ };
+
+ KOKKOS_DEPRECATED
+ explicit HostSpace(const AllocationMechanism&);
+#endif
+
+ /**\brief Allocate untracked memory in the space */
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+ void* allocate(const size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ /**\brief Deallocate untracked memory in the space */
+ void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+ void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+
+ /**\brief Return Name of the MemorySpace */
+ static constexpr const char* name() { return m_name; }
+
+ private:
+ static constexpr const char* m_name = "Host";
+};
+
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+static_assert(Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
+ Kokkos::HostSpace>::assignable);
+
+template <typename S>
+struct HostMirror {
+ private:
+ // If input execution space can access HostSpace then keep it.
+ // Example: Kokkos::OpenMP can access, Kokkos::Cuda cannot
+ enum {
+ keep_exe = Kokkos::Impl::MemorySpaceAccess<
+ typename S::execution_space::memory_space,
+ Kokkos::HostSpace>::accessible
+ };
+
+ // If HostSpace can access memory space then keep it.
+ // Example: Cannot access Kokkos::CudaSpace, can access Kokkos::CudaUVMSpace
+ enum {
+ keep_mem =
+ Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
+ typename S::memory_space>::accessible
+ };
+
+ public:
+ using Space = std::conditional_t<
+ keep_exe && keep_mem, S,
+ std::conditional_t<keep_mem,
+ Kokkos::Device<Kokkos::HostSpace::execution_space,
+ typename S::memory_space>,
+ Kokkos::HostSpace>>;
+};
+
+} // namespace Impl
+
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::HostSpace);
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class ExecutionSpace>
+struct DeepCopy<HostSpace, HostSpace, ExecutionSpace> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ hostspace_parallel_deepcopy(dst, src, n);
+ }
+
+ DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+ if constexpr (!Kokkos::SpaceAccessibility<ExecutionSpace,
+ Kokkos::HostSpace>::accessible) {
+ exec.fence(
+ "Kokkos::Impl::DeepCopy<HostSpace, HostSpace, "
+ "ExecutionSpace>::DeepCopy: fence before copy");
+ hostspace_parallel_deepcopy_async(dst, src, n);
+ } else {
+ hostspace_parallel_deepcopy_async(exec, dst, src, n);
+ }
+ }
+};
+
+} // namespace Impl
+
+} // namespace Kokkos
+
+#endif // #define KOKKOS_HOSTSPACE_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
/// \file Kokkos_Layout.hpp
/// \brief Declaration of various \c MemoryLayout options.
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_LAYOUT_HPP
#define KOKKOS_LAYOUT_HPP
using array_layout = LayoutLeft;
size_t dimension[ARRAY_LAYOUT_MAX_RANK];
+ // we don't have a constructor to set the stride directly
+ // but we will deprecate the class anyway (or at least using an instance of
+ // this class) when switching the internal implementation to use mdspan
+ size_t stride;
enum : bool { is_extent_constructible = true };
- LayoutLeft(LayoutLeft const&) = default;
- LayoutLeft(LayoutLeft&&) = default;
+ LayoutLeft(LayoutLeft const&) = default;
+ LayoutLeft(LayoutLeft&&) = default;
LayoutLeft& operator=(LayoutLeft const&) = default;
- LayoutLeft& operator=(LayoutLeft&&) = default;
+ LayoutLeft& operator=(LayoutLeft&&) = default;
KOKKOS_INLINE_FUNCTION
explicit constexpr LayoutLeft(size_t N0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
size_t N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
size_t N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
size_t N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
- : dimension{N0, N1, N2, N3, N4, N5, N6, N7} {}
+ : dimension{N0, N1, N2, N3, N4, N5, N6, N7},
+ stride(KOKKOS_IMPL_CTOR_DEFAULT_ARG) {}
friend bool operator==(const LayoutLeft& left, const LayoutLeft& right) {
for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
using array_layout = LayoutRight;
size_t dimension[ARRAY_LAYOUT_MAX_RANK];
+ // we don't have a constructor to set the stride directly
+ // but we will deprecate the class anyway (or at least using an instance of
+ // this class) when switching the internal implementation to use mdspan
+ size_t stride;
enum : bool { is_extent_constructible = true };
- LayoutRight(LayoutRight const&) = default;
- LayoutRight(LayoutRight&&) = default;
+ LayoutRight(LayoutRight const&) = default;
+ LayoutRight(LayoutRight&&) = default;
LayoutRight& operator=(LayoutRight const&) = default;
- LayoutRight& operator=(LayoutRight&&) = default;
+ LayoutRight& operator=(LayoutRight&&) = default;
KOKKOS_INLINE_FUNCTION
explicit constexpr LayoutRight(size_t N0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
size_t N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
size_t N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
size_t N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
- : dimension{N0, N1, N2, N3, N4, N5, N6, N7} {}
+ : dimension{N0, N1, N2, N3, N4, N5, N6, N7},
+ stride{KOKKOS_IMPL_CTOR_DEFAULT_ARG} {}
friend bool operator==(const LayoutRight& left, const LayoutRight& right) {
for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
enum : bool { is_extent_constructible = false };
- LayoutStride(LayoutStride const&) = default;
- LayoutStride(LayoutStride&&) = default;
+ LayoutStride(LayoutStride const&) = default;
+ LayoutStride(LayoutStride&&) = default;
LayoutStride& operator=(LayoutStride const&) = default;
- LayoutStride& operator=(LayoutStride&&) = default;
+ LayoutStride& operator=(LayoutStride&&) = default;
/** \brief Compute strides from ordered dimensions.
*
size_t N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S5 = 0,
size_t N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S6 = 0,
size_t N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S7 = 0)
- : dimension{N0, N1, N2, N3, N4, N5, N6, N7}, stride{S0, S1, S2, S3,
- S4, S5, S6, S7} {}
+ : dimension{N0, N1, N2, N3, N4, N5, N6, N7},
+ stride{S0, S1, S2, S3, S4, S5, S6, S7} {}
friend bool operator==(const LayoutStride& left, const LayoutStride& right) {
for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
Right // Right indices stride fastest
};
-// To check for LayoutTiled
-// This is to hide extra compile-time 'identifier' info within the LayoutTiled
-// class by not relying on template specialization to include the ArgN*'s
-template <typename LayoutTiledCheck, class Enable = void>
-struct is_layouttiled : std::false_type {};
-
-template <typename LayoutTiledCheck>
-struct is_layouttiled<LayoutTiledCheck,
- std::enable_if_t<LayoutTiledCheck::is_array_layout_tiled>>
- : std::true_type {};
-
-namespace Experimental {
-
-/// LayoutTiled
-// Must have Rank >= 2
-template <
- Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
- unsigned ArgN1, unsigned ArgN2 = 0, unsigned ArgN3 = 0, unsigned ArgN4 = 0,
- unsigned ArgN5 = 0, unsigned ArgN6 = 0, unsigned ArgN7 = 0,
- bool IsPowerOfTwo =
- (Kokkos::Impl::is_integral_power_of_two(ArgN0) &&
- Kokkos::Impl::is_integral_power_of_two(ArgN1) &&
- (Kokkos::Impl::is_integral_power_of_two(ArgN2) || (ArgN2 == 0)) &&
- (Kokkos::Impl::is_integral_power_of_two(ArgN3) || (ArgN3 == 0)) &&
- (Kokkos::Impl::is_integral_power_of_two(ArgN4) || (ArgN4 == 0)) &&
- (Kokkos::Impl::is_integral_power_of_two(ArgN5) || (ArgN5 == 0)) &&
- (Kokkos::Impl::is_integral_power_of_two(ArgN6) || (ArgN6 == 0)) &&
- (Kokkos::Impl::is_integral_power_of_two(ArgN7) || (ArgN7 == 0)))>
-struct LayoutTiled {
- static_assert(IsPowerOfTwo,
- "LayoutTiled must be given power-of-two tile dimensions");
-
- using array_layout = LayoutTiled<OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3,
- ArgN4, ArgN5, ArgN6, ArgN7, IsPowerOfTwo>;
- static constexpr Iterate outer_pattern = OuterP;
- static constexpr Iterate inner_pattern = InnerP;
-
- enum { N0 = ArgN0 };
- enum { N1 = ArgN1 };
- enum { N2 = ArgN2 };
- enum { N3 = ArgN3 };
- enum { N4 = ArgN4 };
- enum { N5 = ArgN5 };
- enum { N6 = ArgN6 };
- enum { N7 = ArgN7 };
-
- size_t dimension[ARRAY_LAYOUT_MAX_RANK];
-
- enum : bool { is_extent_constructible = true };
-
- LayoutTiled(LayoutTiled const&) = default;
- LayoutTiled(LayoutTiled&&) = default;
- LayoutTiled& operator=(LayoutTiled const&) = default;
- LayoutTiled& operator=(LayoutTiled&&) = default;
-
- KOKKOS_INLINE_FUNCTION
- explicit constexpr LayoutTiled(size_t argN0 = 0, size_t argN1 = 0,
- size_t argN2 = 0, size_t argN3 = 0,
- size_t argN4 = 0, size_t argN5 = 0,
- size_t argN6 = 0, size_t argN7 = 0)
- : dimension{argN0, argN1, argN2, argN3, argN4, argN5, argN6, argN7} {}
-
- friend bool operator==(const LayoutTiled& left, const LayoutTiled& right) {
- for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
- if (left.dimension[rank] != right.dimension[rank]) return false;
- return true;
- }
-
- friend bool operator!=(const LayoutTiled& left, const LayoutTiled& right) {
- return !(left == right);
- }
-};
-
-} // namespace Experimental
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+template <typename Layout, class Enable = void>
+struct KOKKOS_DEPRECATED is_layouttiled : std::false_type {};
+#endif
+namespace Impl {
// For use with view_copy
template <typename... Layout>
struct layout_iterate_type_selector {
static const Kokkos::Iterate inner_iteration_pattern =
Kokkos::Iterate::Default;
};
+} // namespace Impl
-template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
- unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
-struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
- Kokkos::Iterate::Left, Kokkos::Iterate::Left, ArgN0, ArgN1, ArgN2, ArgN3,
- ArgN4, ArgN5, ArgN6, ArgN7, true>> {
- static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Left;
- static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Left;
-};
-
-template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
- unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
-struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
- Kokkos::Iterate::Right, Kokkos::Iterate::Left, ArgN0, ArgN1, ArgN2, ArgN3,
- ArgN4, ArgN5, ArgN6, ArgN7, true>> {
- static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Right;
- static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Left;
-};
-
-template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
- unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
-struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
- Kokkos::Iterate::Left, Kokkos::Iterate::Right, ArgN0, ArgN1, ArgN2, ArgN3,
- ArgN4, ArgN5, ArgN6, ArgN7, true>> {
- static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Left;
- static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Right;
-};
-
-template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
- unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
-struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
- Kokkos::Iterate::Right, Kokkos::Iterate::Right, ArgN0, ArgN1, ArgN2, ArgN3,
- ArgN4, ArgN5, ArgN6, ArgN7, true>> {
- static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Right;
- static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Right;
-};
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+template <typename... Layout>
+using layout_iterate_type_selector KOKKOS_DEPRECATED =
+ Impl::layout_iterate_type_selector<Layout...>;
+#endif
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_MACROS_HPP
#define KOKKOS_MACROS_HPP
* KOKKOS_ENABLE_OPENMP Kokkos::OpenMP execution space
* KOKKOS_ENABLE_OPENMPTARGET Kokkos::Experimental::OpenMPTarget
* execution space
- * KOKKOS_ENABLE_HIP Kokkos::Experimental::HIP execution space
- * KOKKOS_ENABLE_SYCL Kokkos::Experimental::SYCL execution space
+ * KOKKOS_ENABLE_HIP Kokkos::HIP execution space
+ * KOKKOS_ENABLE_SYCL Kokkos::SYCL execution space
* KOKKOS_ENABLE_HWLOC HWLOC library is available.
* KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK Insert array bounds checks, is expensive!
* KOKKOS_ENABLE_CUDA_UVM Use CUDA UVM for Cuda memory space.
*/
+#define KOKKOS_VERSION_LESS(MAJOR, MINOR, PATCH) \
+ (KOKKOS_VERSION < ((MAJOR)*10000 + (MINOR)*100 + (PATCH)))
+
+#define KOKKOS_VERSION_LESS_EQUAL(MAJOR, MINOR, PATCH) \
+ (KOKKOS_VERSION <= ((MAJOR)*10000 + (MINOR)*100 + (PATCH)))
+
+#define KOKKOS_VERSION_GREATER(MAJOR, MINOR, PATCH) \
+ (KOKKOS_VERSION > ((MAJOR)*10000 + (MINOR)*100 + (PATCH)))
+
+#define KOKKOS_VERSION_GREATER_EQUAL(MAJOR, MINOR, PATCH) \
+ (KOKKOS_VERSION >= ((MAJOR)*10000 + (MINOR)*100 + (PATCH)))
+
+#define KOKKOS_VERSION_EQUAL(MAJOR, MINOR, PATCH) \
+ (KOKKOS_VERSION == ((MAJOR)*10000 + (MINOR)*100 + (PATCH)))
+
+#if !KOKKOS_VERSION_EQUAL(KOKKOS_VERSION_MAJOR, KOKKOS_VERSION_MINOR, \
+ KOKKOS_VERSION_PATCH)
+#error implementation bug
+#endif
+
#ifndef KOKKOS_DONT_INCLUDE_CORE_CONFIG_H
#include <KokkosCore_config.h>
+#include <impl/Kokkos_DesulAtomicsConfig.hpp>
+#include <impl/Kokkos_NvidiaGpuArchitectures.hpp>
+#endif
+
+#if !defined(KOKKOS_ENABLE_CXX17)
+#if __has_include(<version>)
+#include <version>
+#else
+#include <ciso646>
+#endif
+#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE < 10
+#error \
+ "Compiling with support for C++20 or later requires a libstdc++ version later than 9"
+#endif
#endif
//----------------------------------------------------------------------------
* KOKKOS_COMPILER_NVCC
* KOKKOS_COMPILER_GNU
* KOKKOS_COMPILER_INTEL
- * KOKKOS_COMPILER_IBM
+ * KOKKOS_COMPILER_INTEL_LLVM
* KOKKOS_COMPILER_CRAYC
* KOKKOS_COMPILER_APPLECC
* KOKKOS_COMPILER_CLANG
- * KOKKOS_COMPILER_PGI
+ * KOKKOS_COMPILER_NVHPC
* KOKKOS_COMPILER_MSVC
*
- * Macros for which compiler extension to use for atomics on intrinsic types
- *
- * KOKKOS_ENABLE_CUDA_ATOMICS
- * KOKKOS_ENABLE_GNU_ATOMICS
- * KOKKOS_ENABLE_INTEL_ATOMICS
- * KOKKOS_ENABLE_OPENMP_ATOMICS
- *
* A suite of 'KOKKOS_ENABLE_PRAGMA_...' are defined for internal use.
*
* Macros for marking functions to run in an execution space:
//----------------------------------------------------------------------------
-#if !defined(KOKKOS_ENABLE_THREADS) && !defined(KOKKOS_ENABLE_CUDA) && \
- !defined(KOKKOS_ENABLE_OPENMP) && !defined(KOKKOS_ENABLE_HPX) && \
- !defined(KOKKOS_ENABLE_OPENMPTARGET) && !defined(KOKKOS_ENABLE_HIP) && \
- !defined(KOKKOS_ENABLE_SYCL)
-#define KOKKOS_INTERNAL_NOT_PARALLEL
+#if defined(KOKKOS_ENABLE_ATOMICS_BYPASS) && \
+ (defined(KOKKOS_ENABLE_THREADS) || defined(KOKKOS_ENABLE_CUDA) || \
+ defined(KOKKOS_ENABLE_OPENMP) || defined(KOKKOS_ENABLE_HPX) || \
+ defined(KOKKOS_ENABLE_OPENMPTARGET) || defined(KOKKOS_ENABLE_HIP) || \
+ defined(KOKKOS_ENABLE_SYCL) || defined(KOKKOS_ENABLE_OPENACC))
+#error Atomics may only be disabled if neither a host parallel nor a device backend is enabled
#endif
#define KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA
// Code is parsed and separated into host and device code.
// Host code is compiled again with another compiler.
// Device code is compile to 'ptx'.
-#define KOKKOS_COMPILER_NVCC __NVCC__
+// NOTE: There is no __CUDACC_VER_PATCH__ officially, its __CUDACC_VER_BUILD__
+// which does have more than one digit (potentially undefined number of them).
+// This macro definition is in line with our other compiler defs
+#define KOKKOS_COMPILER_NVCC \
+ __CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__ * 10
#endif // #if defined( __NVCC__ )
#if !defined(KOKKOS_LAMBDA)
#define KOKKOS_LAMBDA [=]
#endif
-#if (defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)) && \
- !defined(KOKKOS_CLASS_LAMBDA)
+#if !defined(KOKKOS_CLASS_LAMBDA)
#define KOKKOS_CLASS_LAMBDA [ =, *this ]
#endif
-//#if !defined( __CUDA_ARCH__ ) // Not compiling Cuda code to 'ptx'.
+// #if !defined( __CUDA_ARCH__ ) // Not compiling Cuda code to 'ptx'.
// Intel compiler for host code.
#if defined(__INTEL_COMPILER)
#define KOKKOS_COMPILER_INTEL __INTEL_COMPILER
+
#elif defined(__INTEL_LLVM_COMPILER)
-#define KOKKOS_COMPILER_INTEL __INTEL_LLVM_COMPILER
-#elif defined(__ICC)
-// Old define
-#define KOKKOS_COMPILER_INTEL __ICC
-#elif defined(__ECC)
-// Very old define
-#define KOKKOS_COMPILER_INTEL __ECC
-#endif
+#define KOKKOS_COMPILER_INTEL_LLVM __INTEL_LLVM_COMPILER
+
+// Cray compiler for device offload code
+#elif defined(__cray__) && defined(__clang__)
+#define KOKKOS_COMPILER_CRAY_LLVM \
+ __cray_major__ * 100 + __cray_minor__ * 10 + __cray_patchlevel__
+#elif defined(_CRAYC)
// CRAY compiler for host code
-#if defined(_CRAYC)
#define KOKKOS_COMPILER_CRAYC _CRAYC
-#endif
-
-#if defined(__IBMCPP__)
-// IBM C++
-#define KOKKOS_COMPILER_IBM __IBMCPP__
-#elif defined(__IBMC__)
-#define KOKKOS_COMPILER_IBM __IBMC__
-#elif defined(__ibmxl_vrm__) // xlclang++
-#define KOKKOS_COMPILER_IBM __ibmxl_vrm__
-#endif
-#if defined(__APPLE_CC__)
+#elif defined(__APPLE_CC__)
#define KOKKOS_COMPILER_APPLECC __APPLE_CC__
-#endif
-#if defined(__clang__) && !defined(KOKKOS_COMPILER_INTEL) && \
- !defined(KOKKOS_COMPILER_IBM)
+#elif defined(__NVCOMPILER)
+#define KOKKOS_COMPILER_NVHPC \
+ __NVCOMPILER_MAJOR__ * 10000 + __NVCOMPILER_MINOR__ * 100 + \
+ __NVCOMPILER_PATCHLEVEL__
+
+#elif defined(__clang__)
+// Check this after the Clang-based proprietary compilers which will also define
+// __clang__
#define KOKKOS_COMPILER_CLANG \
__clang_major__ * 100 + __clang_minor__ * 10 + __clang_patchlevel__
-#endif
-#if !defined(__clang__) && !defined(KOKKOS_COMPILER_INTEL) && defined(__GNUC__)
+#elif defined(__GNUC__)
+// Check this here because many compilers (at least Clang variants and Intel
+// classic) define `__GNUC__` for compatibility
#define KOKKOS_COMPILER_GNU \
__GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__
-#if (530 > KOKKOS_COMPILER_GNU)
-#error "Compiling with GCC version earlier than 5.3.0 is not supported."
-#endif
-#endif
-
-#if defined(__PGIC__)
-#define KOKKOS_COMPILER_PGI \
- __PGIC__ * 100 + __PGIC_MINOR__ * 10 + __PGIC_PATCHLEVEL__
-
-#if (1740 > KOKKOS_COMPILER_PGI)
-#error "Compiling with PGI version earlier than 17.4 is not supported."
-#endif
-#endif
-
-#if defined(__NVCOMPILER)
-#define KOKKOS_COMPILER_NVHPC \
- __NVCOMPILER_MAJOR__ * 100 + __NVCOMPILER_MINOR__ * 10 + \
- __NVCOMPILER_PATCHLEVEL__
+#if (820 > KOKKOS_COMPILER_GNU)
+#error "Compiling with GCC version earlier than 8.2.0 is not supported."
#endif
-#if defined(_MSC_VER) && !defined(KOKKOS_COMPILER_INTEL)
+#elif defined(_MSC_VER)
+// Check this after Intel and Clang because those define _MSC_VER for
+// compatibility
#define KOKKOS_COMPILER_MSVC _MSC_VER
#endif
// of the supported OpenMP API version.
#endif // #if defined( _OPENMP )
-#if defined(KOKKOS_ENABLE_CXX17)
-#define KOKKOS_IMPL_FALLTHROUGH [[fallthrough]];
-#elif defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 710)
-#define KOKKOS_IMPL_FALLTHROUGH [[gnu::fallthrough]];
-#elif defined(KOKKOS_COMPILER_CLANG)
-#define KOKKOS_IMPL_FALLTHROUGH [[clang::fallthrough]];
-#else
-#define KOKKOS_IMPL_FALLTHROUGH
-#endif
-
//----------------------------------------------------------------------------
// Intel compiler macros
-#if defined(KOKKOS_COMPILER_INTEL)
-// FIXME_SYCL
-#if !defined(KOKKOS_ENABLE_SYCL)
+#if defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
+#if defined(KOKKOS_COMPILER_INTEL_LLVM) && \
+ KOKKOS_COMPILER_INTEL_LLVM >= 20230100
#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
-#endif
-#if (1800 > KOKKOS_COMPILER_INTEL)
-#define KOKKOS_ENABLE_PRAGMA_SIMD 1
-#endif
-
-// FIXME Workaround for ICE with intel 17,18,19,20,21 in Trilinos
-#if (KOKKOS_COMPILER_INTEL <= 2100)
-#define KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
-#endif
-// FIXME_SYCL
-#if !defined(KOKKOS_ENABLE_SYCL)
#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
#endif
#endif
#endif
-#if (1700 > KOKKOS_COMPILER_INTEL)
-#error "Compiling with Intel version earlier than 17.0 is not supported."
+#if defined(KOKKOS_COMPILER_INTEL) && (1900 > KOKKOS_COMPILER_INTEL)
+#error "Compiling with Intel version earlier than 19.0.5 is not supported."
#endif
#if !defined(KOKKOS_ENABLE_ASM) && !defined(_WIN32)
#endif
#endif
-#if defined(KOKKOS_ARCH_AVX512MIC)
-#define KOKKOS_ENABLE_RFO_PREFETCH 1
-#if (KOKKOS_COMPILER_INTEL < 1800) && !defined(KOKKOS_KNL_USE_ASM_WORKAROUND)
-#define KOKKOS_KNL_USE_ASM_WORKAROUND 1
-#endif
-#endif
-
-#if (1800 > KOKKOS_COMPILER_INTEL)
-#define KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION
-#endif
-
#if defined(__MIC__)
// Compiling for Xeon Phi
#endif
#if defined(KOKKOS_COMPILER_CRAYC)
#endif
-//----------------------------------------------------------------------------
-// IBM Compiler macros
-
-#if defined(KOKKOS_COMPILER_IBM)
-#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
-//#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
-//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
-//#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
-//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
-
-#if !defined(KOKKOS_ENABLE_ASM)
-#define KOKKOS_ENABLE_ASM 1
-#endif
-#endif
-
//----------------------------------------------------------------------------
// CLANG compiler macros
#if defined(KOKKOS_COMPILER_CLANG)
-//#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
-//#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
-//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
-//#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
-//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
+// #define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+// #define KOKKOS_ENABLE_PRAGMA_IVDEP 1
+// #define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+// #define KOKKOS_ENABLE_PRAGMA_VECTOR 1
#if !defined(KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION)
#define KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
// GNU Compiler macros
#if defined(KOKKOS_COMPILER_GNU)
-//#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
-//#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
-//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
-//#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
-//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
-
-#if defined(KOKKOS_ARCH_AVX512MIC)
-#define KOKKOS_ENABLE_RFO_PREFETCH 1
-#endif
+// #define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+// #define KOKKOS_ENABLE_PRAGMA_IVDEP 1
+// #define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+// #define KOKKOS_ENABLE_PRAGMA_VECTOR 1
#if !defined(KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION)
#define KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
//----------------------------------------------------------------------------
-#if defined(KOKKOS_COMPILER_PGI)
+#if defined(KOKKOS_COMPILER_NVHPC)
#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
-//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+// #define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
-//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
#endif
//----------------------------------------------------------------------------
#endif
#if !defined(KOKKOS_INLINE_FUNCTION_DELETED)
-#define KOKKOS_INLINE_FUNCTION_DELETED inline
+#define KOKKOS_INLINE_FUNCTION_DELETED
#endif
#if !defined(KOKKOS_DEFAULTED_FUNCTION)
-#define KOKKOS_DEFAULTED_FUNCTION inline
+#define KOKKOS_DEFAULTED_FUNCTION
+#endif
+
+#if !defined(KOKKOS_DEDUCTION_GUIDE)
+#define KOKKOS_DEDUCTION_GUIDE
#endif
#if !defined(KOKKOS_IMPL_HOST_FUNCTION)
#define KOKKOS_IMPL_DEVICE_FUNCTION
#endif
-// Temporary solution for SYCL not supporting printf in kernels.
-// Might disappear at any point once we have found another solution.
-#if !defined(KOKKOS_IMPL_DO_NOT_USE_PRINTF)
-#define KOKKOS_IMPL_DO_NOT_USE_PRINTF(...) printf(__VA_ARGS__)
+// FIXME_OPENACC FIXME_OPENMPTARGET
+// Move to setup files once there is more content
+// clang-format off
+#if defined(KOKKOS_ENABLE_OPENACC)
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION @"KOKKOS_RELOCATABLE_FUNCTION is not supported for the OpenACC backend"
+#endif
+#if defined(KOKKOS_ENABLE_OPENMPTARGET)
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION @"KOKKOS_RELOCATABLE_FUNCTION is not supported for the OpenMPTarget backend"
+#endif
+// clang-format on
+
+#if !defined(KOKKOS_IMPL_RELOCATABLE_FUNCTION)
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION
#endif
//----------------------------------------------------------------------------
#define KOKKOS_FORCEINLINE_FUNCTION \
KOKKOS_IMPL_FORCEINLINE_FUNCTION \
__attribute__((annotate("KOKKOS_FORCEINLINE_FUNCTION")))
+#define KOKKOS_RELOCATABLE_FUNCTION \
+ KOKKOS_IMPL_RELOCATABLE_FUNCTION \
+ __attribute__((annotate("KOKKOS_RELOCATABLE_FUNCTION")))
#else
#define KOKKOS_FUNCTION KOKKOS_IMPL_FUNCTION
#define KOKKOS_INLINE_FUNCTION KOKKOS_IMPL_INLINE_FUNCTION
#define KOKKOS_FORCEINLINE_FUNCTION KOKKOS_IMPL_FORCEINLINE_FUNCTION
+#define KOKKOS_RELOCATABLE_FUNCTION KOKKOS_IMPL_RELOCATABLE_FUNCTION
#endif
//----------------------------------------------------------------------------
#if 1 < ((defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_CUDA) ? 1 : 0) + \
(defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP) ? 1 : 0) + \
(defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL) ? 1 : 0) + \
+ (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENACC) ? 1 : 0) + \
(defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMPTARGET) ? 1 : 0) + \
(defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP) ? 1 : 0) + \
(defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_THREADS) ? 1 : 0) + \
#endif
// If default is not specified then chose from enabled execution spaces.
-// Priority: CUDA, HIP, SYCL, OPENMPTARGET, OPENMP, THREADS, HPX, SERIAL
+// Priority: CUDA, HIP, SYCL, OPENACC, OPENMPTARGET, OPENMP, THREADS, HPX,
+// SERIAL
#if defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_CUDA)
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP)
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENACC)
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMPTARGET)
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP)
#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_THREADS)
#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP
#elif defined(KOKKOS_ENABLE_SYCL)
#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL
+#elif defined(KOKKOS_ENABLE_OPENACC)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENACC
#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMPTARGET
#elif defined(KOKKOS_ENABLE_OPENMP)
#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SERIAL
#endif
-//----------------------------------------------------------------------------
-// Determine for what space the code is being compiled:
-
-#if defined(__CUDACC__) && defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA)
-#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA
-#elif defined(__SYCL_DEVICE_ONLY__) && defined(KOKKOS_ENABLE_SYCL)
-#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL
-#elif defined(__HIPCC__) && defined(__HIP_DEVICE_COMPILE__) && \
- defined(KOKKOS_ENABLE_HIP)
-#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HIP_GPU
-#else
-#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST
-#endif
-
//----------------------------------------------------------------------------
// Remove surrounding parentheses if present
#endif
#endif
+#ifdef KOKKOS_ENABLE_OPENACC
+#ifdef KOKKOS_COMPILER_NVHPC
+#define KOKKOS_IF_ON_DEVICE(CODE) \
+ if (__builtin_is_device_code()) { \
+ KOKKOS_IMPL_STRIP_PARENS(CODE) \
+ }
+#define KOKKOS_IF_ON_HOST(CODE) \
+ if (!__builtin_is_device_code()) { \
+ KOKKOS_IMPL_STRIP_PARENS(CODE) \
+ }
+#else
+#include <openacc.h>
+// FIXME_OPENACC acc_on_device is a non-constexpr function
+#define KOKKOS_IF_ON_DEVICE(CODE) \
+ if constexpr (acc_on_device(acc_device_not_host)) { \
+ KOKKOS_IMPL_STRIP_PARENS(CODE) \
+ }
+#define KOKKOS_IF_ON_HOST(CODE) \
+ if constexpr (acc_on_device(acc_device_host)) { \
+ KOKKOS_IMPL_STRIP_PARENS(CODE) \
+ }
+#endif
+#endif
+
#if !defined(KOKKOS_IF_ON_HOST) && !defined(KOKKOS_IF_ON_DEVICE)
#if (defined(KOKKOS_ENABLE_CUDA) && defined(__CUDA_ARCH__)) || \
(defined(KOKKOS_ENABLE_HIP) && defined(__HIP_DEVICE_COMPILE__)) || \
// If compiling with CUDA, we must use relocatable device code to enable the
// task policy.
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
#if defined(KOKKOS_ENABLE_CUDA)
#if defined(KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE)
#define KOKKOS_ENABLE_TASKDAG
#endif
// FIXME_SYCL Tasks not implemented
-#elif !defined(KOKKOS_ENABLE_HIP) && !defined(KOKKOS_ENABLE_SYCL)
+#elif !defined(KOKKOS_ENABLE_HIP) && !defined(KOKKOS_ENABLE_SYCL) && \
+ !defined(KOKKOS_ENABLE_OPENMPTARGET)
#define KOKKOS_ENABLE_TASKDAG
#endif
+#endif
+
+#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_ENABLE_DEPRECATED_CODE_4)
+#define KOKKOS_ENABLE_CUDA_LDG_INTRINSIC
+#endif
#define KOKKOS_INVALID_INDEX (~std::size_t(0))
#define KOKKOS_IMPL_CTOR_DEFAULT_ARG KOKKOS_INVALID_INDEX
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-#define KOKKOS_CONSTEXPR_14 constexpr
-#define KOKKOS_DEPRECATED_TRAILING_ATTRIBUTE
-#endif
-
// Guard intel compiler version 19 and older
// intel error #2651: attribute does not apply to any entity
// using <deprecated_type> KOKKOS_DEPRECATED = ...
#define KOKKOS_IMPL_WARNING(desc) KOKKOS_IMPL_DO_PRAGMA(message(#desc))
#endif
-// DJS 05/28/2019: Bugfix: Issue 2155
-// Use KOKKOS_ENABLE_CUDA_LDG_INTRINSIC to avoid memory leak in RandomAccess
-// View
-#if defined(KOKKOS_ENABLE_CUDA) && !defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
-#define KOKKOS_ENABLE_CUDA_LDG_INTRINSIC
+// clang-format off
+#if defined(__NVCOMPILER)
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH() \
+ _Pragma("diag_suppress 1216") \
+ _Pragma("diag_suppress deprecated_entity_with_custom_message")
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP() \
+ _Pragma("diag_default 1216") \
+ _Pragma("diag_suppress deprecated_entity_with_custom_message")
+#elif defined(__EDG__)
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH() \
+ _Pragma("warning push") \
+ _Pragma("warning disable 1478")
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP() \
+ _Pragma("warning pop")
+#elif defined(__GNUC__) || defined(__clang__)
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH() \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP() \
+ _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH() \
+ _Pragma("warning(push)") \
+ _Pragma("warning(disable: 4996)")
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP() \
+ _Pragma("warning(pop)")
+#else
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+ #define KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
+#if defined(__NVCOMPILER)
+#define KOKKOS_IMPL_DISABLE_UNREACHABLE_WARNINGS_PUSH() \
+ _Pragma("diag_suppress code_is_unreachable") \
+ _Pragma("diag_suppress initialization_not_reachable")
+#define KOKKOS_IMPL_DISABLE_UNREACHABLE_WARNINGS_POP() \
+ _Pragma("diag_default code_is_unreachable") \
+ _Pragma("diag_default initialization_not_reachable")
+#else
+#define KOKKOS_IMPL_DISABLE_UNREACHABLE_WARNINGS_PUSH()
+#define KOKKOS_IMPL_DISABLE_UNREACHABLE_WARNINGS_POP()
#endif
+// clang-format on
-#if defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)
#define KOKKOS_ATTRIBUTE_NODISCARD [[nodiscard]]
+
+#ifndef KOKKOS_ENABLE_CXX17
+#define KOKKOS_IMPL_ATTRIBUTE_UNLIKELY [[unlikely]]
#else
-#define KOKKOS_ATTRIBUTE_NODISCARD
+#define KOKKOS_IMPL_ATTRIBUTE_UNLIKELY
#endif
-#if (defined(KOKKOS_COMPILER_GNU) || defined(KOKKOS_COMPILER_CLANG) || \
- defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_PGI)) && \
- !defined(_WIN32)
+#if (defined(KOKKOS_COMPILER_GNU) || defined(KOKKOS_COMPILER_CLANG) || \
+ defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM) || \
+ defined(KOKKOS_COMPILER_NVHPC)) && \
+ !defined(_WIN32) && !defined(__ANDROID__)
#if __has_include(<execinfo.h>)
#define KOKKOS_IMPL_ENABLE_STACKTRACE
#endif
#undef __CUDA_ARCH__
#endif
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-#define KOKKOS_THREAD_LOCAL \
- KOKKOS_DEPRECATED_WITH_COMMENT("Use thread_local instead!") thread_local
-#endif
-
#if (defined(KOKKOS_IMPL_WINDOWS_CUDA) || defined(KOKKOS_COMPILER_MSVC)) && \
!defined(KOKKOS_COMPILER_CLANG)
// MSVC (as of 16.5.5 at least) does not do empty base class optimization by
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#ifndef KOKKOS_MATHEMATICAL_CONSTANTS_HPP
+#define KOKKOS_MATHEMATICAL_CONSTANTS_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <type_traits>
+
+namespace Kokkos::numbers {
+
+#define KOKKOS_IMPL_MATH_CONSTANT(TRAIT, VALUE) \
+ template <class T> \
+ inline constexpr auto TRAIT##_v = \
+ std::enable_if_t<std::is_floating_point_v<T>, T>(VALUE); \
+ inline constexpr auto TRAIT = TRAIT##_v<double>
+
+// clang-format off
+KOKKOS_IMPL_MATH_CONSTANT(e, 2.718281828459045235360287471352662498L);
+KOKKOS_IMPL_MATH_CONSTANT(log2e, 1.442695040888963407359924681001892137L);
+KOKKOS_IMPL_MATH_CONSTANT(log10e, 0.434294481903251827651128918916605082L);
+KOKKOS_IMPL_MATH_CONSTANT(pi, 3.141592653589793238462643383279502884L);
+KOKKOS_IMPL_MATH_CONSTANT(inv_pi, 0.318309886183790671537767526745028724L);
+KOKKOS_IMPL_MATH_CONSTANT(inv_sqrtpi, 0.564189583547756286948079451560772586L);
+KOKKOS_IMPL_MATH_CONSTANT(ln2, 0.693147180559945309417232121458176568L);
+KOKKOS_IMPL_MATH_CONSTANT(ln10, 2.302585092994045684017991454684364208L);
+KOKKOS_IMPL_MATH_CONSTANT(sqrt2, 1.414213562373095048801688724209698079L);
+KOKKOS_IMPL_MATH_CONSTANT(sqrt3, 1.732050807568877293527446341505872367L);
+KOKKOS_IMPL_MATH_CONSTANT(inv_sqrt3, 0.577350269189625764509148780501957456L);
+KOKKOS_IMPL_MATH_CONSTANT(egamma, 0.577215664901532860606512090082402431L);
+KOKKOS_IMPL_MATH_CONSTANT(phi, 1.618033988749894848204586834365638118L);
+// clang-format on
+
+#undef KOKKOS_IMPL_MATH_CONSTANT
+
+} // namespace Kokkos::numbers
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
+#endif
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_MATHEMATICAL_FUNCTIONS_HPP
#define KOKKOS_MATHEMATICAL_FUNCTIONS_HPP
#include <type_traits>
#ifdef KOKKOS_ENABLE_SYCL
+// FIXME_SYCL
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
#include <CL/sycl.hpp>
#endif
+#endif
namespace Kokkos {
namespace Impl {
-template <class T, bool = std::is_integral<T>::value>
+template <class T, bool = std::is_integral_v<T>>
struct promote {
using type = double;
};
template <class T>
using promote_t = typename promote<T>::type;
template <class T, class U,
- bool = std::is_arithmetic<T>::value&& std::is_arithmetic<U>::value>
+ bool = std::is_arithmetic_v<T>&& std::is_arithmetic_v<U>>
struct promote_2 {
using type = decltype(promote_t<T>() + promote_t<U>());
};
struct promote_2<T, U, false> {};
template <class T, class U>
using promote_2_t = typename promote_2<T, U>::type;
+template <class T, class U, class V,
+ bool = std::is_arithmetic_v<T>&& std::is_arithmetic_v<U>&&
+ std::is_arithmetic_v<V>>
+struct promote_3 {
+ using type = decltype(promote_t<T>() + promote_t<U>() + promote_t<V>());
+};
+template <class T, class U, class V>
+struct promote_3<T, U, V, false> {};
+template <class T, class U, class V>
+using promote_3_t = typename promote_3<T, U, V>::type;
} // namespace Impl
// NOTE long double overloads are not available on the device
#endif
#endif
-#if defined(KOKKOS_ENABLE_DEPRECATED_CODE_3)
-#define KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
- USING_DECLARATIONS_IN_EXPERIMENTAL_NAMESPACE) \
- USING_DECLARATIONS_IN_EXPERIMENTAL_NAMESPACE
-#else
-#define KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
- USING_DECLARATIONS_IN_EXPERIMENTAL_NAMESPACE) \
- /* nothing */
-#endif
-
-#define KOKKOS_IMPL_MATH_UNARY_FUNCTION(FUNC) \
- KOKKOS_INLINE_FUNCTION float FUNC(float x) { \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x); \
- } \
- KOKKOS_INLINE_FUNCTION double FUNC(double x) { \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x); \
- } \
- inline long double FUNC(long double x) { \
- using std::FUNC; \
- return FUNC(x); \
- } \
- KOKKOS_INLINE_FUNCTION float FUNC##f(float x) { \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x); \
- } \
- inline long double FUNC##l(long double x) { \
- using std::FUNC; \
- return FUNC(x); \
- } \
- template <class T> \
- KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, double> \
- FUNC(T x) { \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(static_cast<double>(x)); \
- } \
- KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
- namespace Experimental { \
- using ::Kokkos::FUNC; \
- using ::Kokkos::FUNC##f; \
- using ::Kokkos::FUNC##l; \
- })
+#define KOKKOS_IMPL_MATH_UNARY_FUNCTION(FUNC) \
+ KOKKOS_INLINE_FUNCTION float FUNC(float x) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x); \
+ } \
+ KOKKOS_INLINE_FUNCTION double FUNC(double x) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x); \
+ } \
+ inline long double FUNC(long double x) { \
+ using std::FUNC; \
+ return FUNC(x); \
+ } \
+ KOKKOS_INLINE_FUNCTION float FUNC##f(float x) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x); \
+ } \
+ inline long double FUNC##l(long double x) { \
+ using std::FUNC; \
+ return FUNC(x); \
+ } \
+ template <class T> \
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral_v<T>, double> FUNC( \
+ T x) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(static_cast<double>(x)); \
+ }
// isinf, isnan, and isinfinite do not work on Windows with CUDA with std::
// getting warnings about calling host function in device function then
// runtime test fails
#if defined(_WIN32) && defined(KOKKOS_ENABLE_CUDA)
-#define KOKKOS_IMPL_MATH_UNARY_PREDICATE(FUNC) \
- KOKKOS_INLINE_FUNCTION bool FUNC(float x) { return ::FUNC(x); } \
- KOKKOS_INLINE_FUNCTION bool FUNC(double x) { return ::FUNC(x); } \
- inline bool FUNC(long double x) { \
- using std::FUNC; \
- return FUNC(x); \
- } \
- template <class T> \
- KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, bool> \
- FUNC(T x) { \
- return ::FUNC(static_cast<double>(x)); \
- } \
- KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
- namespace Experimental { using ::Kokkos::FUNC; })
+#define KOKKOS_IMPL_MATH_UNARY_PREDICATE(FUNC) \
+ KOKKOS_INLINE_FUNCTION bool FUNC(float x) { return ::FUNC(x); } \
+ KOKKOS_INLINE_FUNCTION bool FUNC(double x) { return ::FUNC(x); } \
+ inline bool FUNC(long double x) { \
+ using std::FUNC; \
+ return FUNC(x); \
+ } \
+ template <class T> \
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral_v<T>, bool> FUNC( \
+ T x) { \
+ return ::FUNC(static_cast<double>(x)); \
+ }
#else
-#define KOKKOS_IMPL_MATH_UNARY_PREDICATE(FUNC) \
- KOKKOS_INLINE_FUNCTION bool FUNC(float x) { \
+#define KOKKOS_IMPL_MATH_UNARY_PREDICATE(FUNC) \
+ KOKKOS_INLINE_FUNCTION bool FUNC(float x) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x); \
+ } \
+ KOKKOS_INLINE_FUNCTION bool FUNC(double x) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x); \
+ } \
+ inline bool FUNC(long double x) { \
+ using std::FUNC; \
+ return FUNC(x); \
+ } \
+ template <class T> \
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral_v<T>, bool> FUNC( \
+ T x) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(static_cast<double>(x)); \
+ }
+#endif
+
+#define KOKKOS_IMPL_MATH_BINARY_FUNCTION(FUNC) \
+ KOKKOS_INLINE_FUNCTION float FUNC(float x, float y) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x, y); \
+ } \
+ KOKKOS_INLINE_FUNCTION double FUNC(double x, double y) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x, y); \
+ } \
+ inline long double FUNC(long double x, long double y) { \
+ using std::FUNC; \
+ return FUNC(x, y); \
+ } \
+ KOKKOS_INLINE_FUNCTION float FUNC##f(float x, float y) { \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(x, y); \
+ } \
+ inline long double FUNC##l(long double x, long double y) { \
+ using std::FUNC; \
+ return FUNC(x, y); \
+ } \
+ template <class T1, class T2> \
+ KOKKOS_INLINE_FUNCTION \
+ std::enable_if_t<std::is_arithmetic_v<T1> && std::is_arithmetic_v<T2> && \
+ !std::is_same_v<T1, long double> && \
+ !std::is_same_v<T2, long double>, \
+ Kokkos::Impl::promote_2_t<T1, T2>> \
+ FUNC(T1 x, T2 y) { \
+ using Promoted = Kokkos::Impl::promote_2_t<T1, T2>; \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y)); \
+ } \
+ template <class T1, class T2> \
+ inline std::enable_if_t<std::is_arithmetic_v<T1> && \
+ std::is_arithmetic_v<T2> && \
+ (std::is_same_v<T1, long double> || \
+ std::is_same_v<T2, long double>), \
+ long double> \
+ FUNC(T1 x, T2 y) { \
+ using Promoted = Kokkos::Impl::promote_2_t<T1, T2>; \
+ static_assert(std::is_same_v<Promoted, long double>); \
+ using std::FUNC; \
+ return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y)); \
+ }
+
+#define KOKKOS_IMPL_MATH_TERNARY_FUNCTION(FUNC) \
+ KOKKOS_INLINE_FUNCTION float FUNC(float x, float y, float z) { \
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x); \
+ return FUNC(x, y, z); \
} \
- KOKKOS_INLINE_FUNCTION bool FUNC(double x) { \
+ KOKKOS_INLINE_FUNCTION double FUNC(double x, double y, double z) { \
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x); \
+ return FUNC(x, y, z); \
} \
- inline bool FUNC(long double x) { \
+ inline long double FUNC(long double x, long double y, long double z) { \
using std::FUNC; \
- return FUNC(x); \
+ return FUNC(x, y, z); \
} \
- template <class T> \
- KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, bool> \
- FUNC(T x) { \
+ KOKKOS_INLINE_FUNCTION float FUNC##f(float x, float y, float z) { \
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(static_cast<double>(x)); \
+ return FUNC(x, y, z); \
} \
- KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
- namespace Experimental { using ::Kokkos::FUNC; })
-#endif
+ inline long double FUNC##l(long double x, long double y, long double z) { \
+ using std::FUNC; \
+ return FUNC(x, y, z); \
+ } \
+ template <class T1, class T2, class T3> \
+ KOKKOS_INLINE_FUNCTION std::enable_if_t< \
+ std::is_arithmetic_v<T1> && std::is_arithmetic_v<T2> && \
+ std::is_arithmetic_v<T3> && !std::is_same_v<T1, long double> && \
+ !std::is_same_v<T2, long double> && \
+ !std::is_same_v<T3, long double>, \
+ Kokkos::Impl::promote_3_t<T1, T2, T3>> \
+ FUNC(T1 x, T2 y, T3 z) { \
+ using Promoted = Kokkos::Impl::promote_3_t<T1, T2, T3>; \
+ using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
+ return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y), \
+ static_cast<Promoted>(z)); \
+ } \
+ template <class T1, class T2, class T3> \
+ inline std::enable_if_t<std::is_arithmetic_v<T1> && \
+ std::is_arithmetic_v<T2> && \
+ std::is_arithmetic_v<T3> && \
+ (std::is_same_v<T1, long double> || \
+ std::is_same_v<T2, long double> || \
+ std::is_same_v<T3, long double>), \
+ long double> \
+ FUNC(T1 x, T2 y, T3 z) { \
+ using Promoted = Kokkos::Impl::promote_3_t<T1, T2, T3>; \
+ static_assert(std::is_same_v<Promoted, long double>); \
+ using std::FUNC; \
+ return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y), \
+ static_cast<Promoted>(z)); \
+ }
-#define KOKKOS_IMPL_MATH_BINARY_FUNCTION(FUNC) \
- KOKKOS_INLINE_FUNCTION float FUNC(float x, float y) { \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x, y); \
- } \
- KOKKOS_INLINE_FUNCTION double FUNC(double x, double y) { \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x, y); \
- } \
- inline long double FUNC(long double x, long double y) { \
- using std::FUNC; \
- return FUNC(x, y); \
- } \
- KOKKOS_INLINE_FUNCTION float FUNC##f(float x, float y) { \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(x, y); \
- } \
- inline long double FUNC##l(long double x, long double y) { \
- using std::FUNC; \
- return FUNC(x, y); \
- } \
- template <class T1, class T2> \
- KOKKOS_INLINE_FUNCTION std::enable_if_t< \
- std::is_arithmetic<T1>::value && std::is_arithmetic<T2>::value && \
- !std::is_same<T1, long double>::value && \
- !std::is_same<T2, long double>::value, \
- Kokkos::Impl::promote_2_t<T1, T2>> \
- FUNC(T1 x, T2 y) { \
- using Promoted = Kokkos::Impl::promote_2_t<T1, T2>; \
- using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC; \
- return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y)); \
- } \
- template <class T1, class T2> \
- inline std::enable_if_t<std::is_arithmetic<T1>::value && \
- std::is_arithmetic<T2>::value && \
- (std::is_same<T1, long double>::value || \
- std::is_same<T2, long double>::value), \
- long double> \
- FUNC(T1 x, T2 y) { \
- using Promoted = Kokkos::Impl::promote_2_t<T1, T2>; \
- static_assert(std::is_same<Promoted, long double>::value, ""); \
- using std::FUNC; \
- return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y)); \
- } \
- KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
- namespace Experimental { \
- using ::Kokkos::FUNC; \
- using ::Kokkos::FUNC##f; \
- using ::Kokkos::FUNC##l; \
- })
// Basic operations
KOKKOS_INLINE_FUNCTION int abs(int n) {
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
}
KOKKOS_INLINE_FUNCTION long abs(long n) {
// FIXME_NVHPC ptxas fatal : unresolved extern function 'labs'
-#ifdef KOKKOS_COMPILER_NVHPC
+#if defined(KOKKOS_COMPILER_NVHPC) && KOKKOS_COMPILER_NVHPC < 230700
return n > 0 ? n : -n;
#else
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
}
KOKKOS_INLINE_FUNCTION long long abs(long long n) {
// FIXME_NVHPC ptxas fatal : unresolved extern function 'labs'
-#ifdef KOKKOS_COMPILER_NVHPC
+#if defined(KOKKOS_COMPILER_NVHPC) && KOKKOS_COMPILER_NVHPC < 230700
return n > 0 ? n : -n;
#else
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
#endif
}
KOKKOS_INLINE_FUNCTION float abs(float x) {
+#ifdef KOKKOS_ENABLE_SYCL
+ return sycl::fabs(x); // sycl::abs is only provided for integral types
+#else
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
return abs(x);
+#endif
}
KOKKOS_INLINE_FUNCTION double abs(double x) {
+#ifdef KOKKOS_ENABLE_SYCL
+ return sycl::fabs(x); // sycl::abs is only provided for integral types
+#else
using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
return abs(x);
+#endif
}
inline long double abs(long double x) {
using std::abs;
return abs(x);
}
-KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(
- namespace Experimental { using ::Kokkos::abs; })
KOKKOS_IMPL_MATH_UNARY_FUNCTION(fabs)
KOKKOS_IMPL_MATH_BINARY_FUNCTION(fmod)
KOKKOS_IMPL_MATH_BINARY_FUNCTION(remainder)
// remquo
-// fma
+KOKKOS_IMPL_MATH_TERNARY_FUNCTION(fma)
KOKKOS_IMPL_MATH_BINARY_FUNCTION(fmax)
KOKKOS_IMPL_MATH_BINARY_FUNCTION(fmin)
KOKKOS_IMPL_MATH_BINARY_FUNCTION(fdim)
KOKKOS_INLINE_FUNCTION double nan(char const*) { return sycl::nan(0ul); }
#endif
inline long double nanl(char const* arg) { return ::nanl(arg); }
-KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(
- namespace Experimental {
- using ::Kokkos::nan;
- using ::Kokkos::nanf;
- using ::Kokkos::nanl;
- })
// Exponential functions
KOKKOS_IMPL_MATH_UNARY_FUNCTION(exp)
// FIXME_NVHPC nvc++ has issues with exp2
-#ifndef KOKKOS_COMPILER_NVHPC
-KOKKOS_IMPL_MATH_UNARY_FUNCTION(exp2)
-#else
+#if defined(KOKKOS_COMPILER_NVHPC) && KOKKOS_COMPILER_NVHPC < 230700
KOKKOS_INLINE_FUNCTION float exp2(float val) {
constexpr float ln2 = 0.693147180559945309417232121458176568L;
return exp(ln2 * val);
constexpr double ln2 = 0.693147180559945309417232121458176568L;
return exp(ln2 * static_cast<double>(val));
}
+#else
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(exp2)
#endif
KOKKOS_IMPL_MATH_UNARY_FUNCTION(expm1)
KOKKOS_IMPL_MATH_UNARY_FUNCTION(log)
KOKKOS_IMPL_MATH_UNARY_FUNCTION(sqrt)
KOKKOS_IMPL_MATH_UNARY_FUNCTION(cbrt)
KOKKOS_IMPL_MATH_BINARY_FUNCTION(hypot)
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
+ defined(KOKKOS_ENABLE_SYCL)
+KOKKOS_INLINE_FUNCTION float hypot(float x, float y, float z) {
+ return sqrt(x * x + y * y + z * z);
+}
+KOKKOS_INLINE_FUNCTION double hypot(double x, double y, double z) {
+ return sqrt(x * x + y * y + z * z);
+}
+inline long double hypot(long double x, long double y, long double z) {
+ return sqrt(x * x + y * y + z * z);
+}
+KOKKOS_INLINE_FUNCTION float hypotf(float x, float y, float z) {
+ return sqrt(x * x + y * y + z * z);
+}
+inline long double hypotl(long double x, long double y, long double z) {
+ return sqrt(x * x + y * y + z * z);
+}
+template <
+ class T1, class T2, class T3,
+ class Promoted = std::enable_if_t<
+ std::is_arithmetic_v<T1> && std::is_arithmetic_v<T2> &&
+ std::is_arithmetic_v<T3> && !std::is_same_v<T1, long double> &&
+ !std::is_same_v<T2, long double> &&
+ !std::is_same_v<T3, long double>,
+ Impl::promote_3_t<T1, T2, T3>>>
+KOKKOS_INLINE_FUNCTION Promoted hypot(T1 x, T2 y, T3 z) {
+ return hypot(static_cast<Promoted>(x), static_cast<Promoted>(y),
+ static_cast<Promoted>(z));
+}
+template <
+ class T1, class T2, class T3,
+ class = std::enable_if_t<
+ std::is_arithmetic_v<T1> && std::is_arithmetic_v<T2> &&
+ std::is_arithmetic_v<T3> &&
+ (std::is_same_v<T1, long double> || std::is_same_v<T2, long double> ||
+ std::is_same_v<T3, long double>)>>
+inline long double hypot(T1 x, T2 y, T3 z) {
+ return hypot(static_cast<long double>(x), static_cast<long double>(y),
+ static_cast<long double>(z));
+}
+#else
+KOKKOS_IMPL_MATH_TERNARY_FUNCTION(hypot)
+#endif
// Trigonometric functions
KOKKOS_IMPL_MATH_UNARY_FUNCTION(sin)
KOKKOS_IMPL_MATH_UNARY_FUNCTION(cos)
// islessgreater
// isunordered
-#undef KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED
#undef KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE
#undef KOKKOS_IMPL_MATH_UNARY_FUNCTION
#undef KOKKOS_IMPL_MATH_UNARY_PREDICATE
#undef KOKKOS_IMPL_MATH_BINARY_FUNCTION
+#undef KOKKOS_IMPL_MATH_TERNARY_FUNCTION
+
+// non-standard math functions provided by CUDA/HIP/SYCL
+KOKKOS_INLINE_FUNCTION float rsqrt(float val) {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+ KOKKOS_IF_ON_DEVICE(return ::rsqrtf(val);)
+ KOKKOS_IF_ON_HOST(return 1.0f / Kokkos::sqrt(val);)
+#elif defined(KOKKOS_ENABLE_SYCL)
+ KOKKOS_IF_ON_DEVICE(return sycl::rsqrt(val);)
+ KOKKOS_IF_ON_HOST(return 1.0f / Kokkos::sqrt(val);)
+#else
+ return 1.0f / Kokkos::sqrt(val);
+#endif
+}
+KOKKOS_INLINE_FUNCTION double rsqrt(double val) {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+ KOKKOS_IF_ON_DEVICE(return ::rsqrt(val);)
+ KOKKOS_IF_ON_HOST(return 1.0 / Kokkos::sqrt(val);)
+#elif defined(KOKKOS_ENABLE_SYCL)
+ KOKKOS_IF_ON_DEVICE(return sycl::rsqrt(val);)
+ KOKKOS_IF_ON_HOST(return 1.0 / Kokkos::sqrt(val);)
+#else
+ return 1.0 / Kokkos::sqrt(val);
+#endif
+}
+inline long double rsqrt(long double val) { return 1.0l / Kokkos::sqrt(val); }
+KOKKOS_INLINE_FUNCTION float rsqrtf(float x) { return Kokkos::rsqrt(x); }
+inline long double rsqrtl(long double x) { return Kokkos::rsqrt(x); }
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral_v<T>, double> rsqrt(
+ T x) {
+ return Kokkos::rsqrt(static_cast<double>(x));
+}
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_MATHEMATICAL_SPECIAL_FUNCTIONS_HPP
#define KOKKOS_MATHEMATICAL_SPECIAL_FUNCTIONS_HPP
using Kokkos::exp;
using Kokkos::fabs;
using Kokkos::sin;
- using Kokkos::Experimental::epsilon;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::epsilon_v;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::pi_v;
using CmplxType = Kokkos::complex<RealType>;
- constexpr auto inf = infinity<RealType>::value;
- constexpr auto tol = epsilon<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
+ constexpr auto tol = epsilon_v<RealType>;
const RealType fnorm = 1.12837916709551;
const RealType gnorm = 0.564189583547756;
const RealType eh = 0.606530659712633;
const RealType ef = 0.778800783071405;
// const RealType tol = 1.0e-13;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
CmplxType cans;
using Kokkos::fabs;
using Kokkos::isinf;
using Kokkos::sin;
- using Kokkos::Experimental::epsilon;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::epsilon_v;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::inv_sqrtpi_v;
+ using Kokkos::numbers::pi_v;
using CmplxType = Kokkos::complex<RealType>;
- constexpr auto inf = infinity<RealType>::value;
- constexpr auto tol = epsilon<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
+ constexpr auto tol = epsilon_v<RealType>;
const RealType fnorm = 1.12837916709551;
- constexpr auto gnorm = Kokkos::Experimental::inv_sqrtpi_v<RealType>;
+ constexpr auto gnorm = inv_sqrtpi_v<RealType>;
const RealType eh = 0.606530659712633;
const RealType ef = 0.778800783071405;
// const RealType tol = 1.0e-13;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
CmplxType cans;
// Output: cbj0 --- J0(z)
using Kokkos::fabs;
using Kokkos::pow;
+ using Kokkos::numbers::pi_v;
CmplxType cbj0;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
const RealType a[12] = {
-0.703125e-01, 0.112152099609375e+00, -0.5725014209747314e+00,
0.6074042001273483e+01, -0.1100171402692467e+03, 0.3038090510922384e+04,
// Output: cby0 --- Y0(z)
using Kokkos::fabs;
using Kokkos::pow;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::egamma_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType cby0, cbj0;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
- constexpr auto el = Kokkos::Experimental::egamma_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
+ constexpr auto el = egamma_v<RealType>;
const RealType a[12] = {
-0.703125e-01, 0.112152099609375e+00, -0.5725014209747314e+00,
0.6074042001273483e+01, -0.1100171402692467e+03, 0.3038090510922384e+04,
// Output: cbj1 --- J1(z)
using Kokkos::fabs;
using Kokkos::pow;
+ using Kokkos::numbers::pi_v;
CmplxType cbj1;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
const RealType a1[12] = {0.1171875e+00, -0.144195556640625e+00,
0.6765925884246826e+00, -0.6883914268109947e+01,
0.1215978918765359e+03, -0.3302272294480852e+04,
// Output: cby1 --- Y1(z)
using Kokkos::fabs;
using Kokkos::pow;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::egamma_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType cby1, cbj0, cbj1, cby0;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
- constexpr auto el = Kokkos::Experimental::egamma_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
+ constexpr auto el = egamma_v<RealType>;
const RealType a1[12] = {0.1171875e+00, -0.144195556640625e+00,
0.6765925884246826e+00, -0.6883914268109947e+01,
0.1215978918765359e+03, -0.3302272294480852e+04,
//! for a complex argument
template <class CmplxType, class RealType, class IntType>
KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_i0(const CmplxType& z,
- const RealType& joint_val = 25,
- const IntType& bw_start = 70) {
+ const RealType& joint_val = 18,
+ const IntType& n_terms = 50) {
// This function is converted and modified from the corresponding Fortran
- // programs CIKNB and CIK01 in S. Zhang & J. Jin "Computation of Special
+ // programs CIK01 in S. Zhang & J. Jin "Computation of Special
// Functions" (Wiley, 1996).
// Input : z --- Complex argument
// joint_val --- Joint point of abs(z) separating small and large
// argument regions
- // bw_start --- Starting point for backward recurrence
+ // n_terms --- Numbers of terms used in the power series
// Output: cbi0 --- I0(z)
- CmplxType cbi0;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
- const RealType a[12] = {0.125,
- 7.03125e-2,
- 7.32421875e-2,
- 1.1215209960938e-1,
- 2.2710800170898e-1,
- 5.7250142097473e-1,
- 1.7277275025845e0,
- 6.0740420012735e0,
- 2.4380529699556e1,
- 1.1001714026925e2,
- 5.5133589612202e2,
- 3.0380905109224e3};
+ CmplxType cbi0(1.0, 0.0);
RealType a0 = Kokkos::abs(z);
CmplxType z1 = z;
- if (a0 < 1e-100) { // Treat z=0 as a special case
- cbi0 = CmplxType(1.0, 0.0);
- } else {
+ if (a0 > 1e-100) {
if (z.real() < 0.0) z1 = -z;
- if (a0 <= joint_val) { // Using backward recurrence for |z|<=joint_val
- // (default:25)
- CmplxType cbs = CmplxType(0.0, 0.0);
- // CmplxType csk0 = CmplxType(0.0,0.0);
- CmplxType cf0 = CmplxType(0.0, 0.0);
- CmplxType cf1 = CmplxType(1e-100, 0.0);
- CmplxType cf, cs0;
- for (int k = bw_start; k >= 0; k--) { // Backward recurrence (default:
- // 70)
- cf = 2.0 * (k + 1.0) * cf1 / z1 + cf0;
- if (k == 0) cbi0 = cf;
- // if ((k == 2*(k/2)) && (k != 0)) {
- // csk0 = csk0+4.0*cf/static_cast<RealType>(k);
- //}
- cbs = cbs + 2.0 * cf;
- cf0 = cf1;
- cf1 = cf;
+ if (a0 <= joint_val) {
+ // Using power series definition for |z|<=joint_val (default:18)
+ CmplxType cr = CmplxType(1.0e+00, 0.0e+00);
+ CmplxType z2 = z * z;
+ for (int k = 1; k < n_terms; ++k) {
+ cr = RealType(.25) * cr * z2 / CmplxType(k * k);
+ cbi0 += cr;
+ if (Kokkos::abs(cr / cbi0) < RealType(1.e-15)) continue;
}
- cs0 = Kokkos::exp(z1) / (cbs - cf);
- cbi0 = cbi0 * cs0;
- } else { // Using asymptotic expansion (6.2.1) for |z|>joint_val
- // (default:25)
- CmplxType ca = Kokkos::exp(z1) / Kokkos::sqrt(2.0 * pi * z1);
- cbi0 = CmplxType(1.0, 0.0);
- CmplxType zr = 1.0 / z1;
+ } else {
+ // Using asymptotic expansion (6.2.1) for |z|>joint_val (default:18)
+ const RealType a[12] = {0.125,
+ 7.03125e-2,
+ 7.32421875e-2,
+ 1.1215209960938e-1,
+ 2.2710800170898e-1,
+ 5.7250142097473e-1,
+ 1.7277275025845e0,
+ 6.0740420012735e0,
+ 2.4380529699556e1,
+ 1.1001714026925e2,
+ 5.5133589612202e2,
+ 3.0380905109224e3};
+
for (int k = 1; k <= 12; k++) {
- cbi0 = cbi0 + a[k - 1] * Kokkos::pow(zr, 1.0 * k);
+ cbi0 += a[k - 1] * Kokkos::pow(z1, -k);
}
- cbi0 = ca * cbi0;
+ cbi0 *= Kokkos::exp(z1) /
+ Kokkos::sqrt(2.0 * Kokkos::numbers::pi_v<RealType> * z1);
}
}
return cbi0;
// bw_start --- Starting point for backward recurrence
// Output: cbk0 --- K0(z)
using Kokkos::pow;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::egamma_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType cbk0, cbi0;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
- constexpr auto el = Kokkos::Experimental::egamma_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
+ constexpr auto el = egamma_v<RealType>;
RealType a0 = Kokkos::abs(z);
CmplxType ci = CmplxType(0.0, 1.0);
// argument regions
// bw_start --- Starting point for backward recurrence
// Output: cbi1 --- I1(z)
+ using Kokkos::numbers::pi_v;
+
CmplxType cbi1;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
const RealType b[12] = {-0.375,
-1.171875e-1,
-1.025390625e-1,
// bw_start --- Starting point for backward recurrence
// Output: cbk1 --- K1(z)
using Kokkos::pow;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::egamma_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType cbk0, cbi0, cbk1, cbi1;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
- constexpr auto el = Kokkos::Experimental::egamma_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
+ constexpr auto el = egamma_v<RealType>;
RealType a0 = Kokkos::abs(z);
CmplxType ci = CmplxType(0.0, 1.0);
// programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
//(Wiley, 1996).
using RealType = typename CmplxType::value_type;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType ch10, cbk0, cbj0, cby0;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
CmplxType ci = CmplxType(0.0, 1.0);
if ((z.real() == 0.0) && (z.imag() == 0.0)) {
// programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
//(Wiley, 1996).
using RealType = typename CmplxType::value_type;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType ch11, cbk1, cbj1, cby1;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
CmplxType ci = CmplxType(0.0, 1.0);
if ((z.real() == 0.0) && (z.imag() == 0.0)) {
// programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
//(Wiley, 1996).
using RealType = typename CmplxType::value_type;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType ch20, cbk0, cbj0, cby0;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
CmplxType ci = CmplxType(0.0, 1.0);
if ((z.real() == 0.0) && (z.imag() == 0.0)) {
// programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
//(Wiley, 1996).
using RealType = typename CmplxType::value_type;
- using Kokkos::Experimental::infinity;
+ using Kokkos::Experimental::infinity_v;
+ using Kokkos::numbers::pi_v;
- constexpr auto inf = infinity<RealType>::value;
+ constexpr auto inf = infinity_v<RealType>;
CmplxType ch21, cbk1, cbj1, cby1;
- constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+ constexpr auto pi = pi_v<RealType>;
CmplxType ci = CmplxType(0.0, 1.0);
if ((z.real() == 0.0) && (z.imag() == 0.0)) {
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_MEMORYPOOL_HPP
#define KOKKOS_MEMORYPOOL_HPP
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_SharedAlloc.hpp>
-#include <iostream>
-
namespace Kokkos {
namespace Impl {
/* Report violation of size constraints:
stats.consumed_superblocks++;
stats.consumed_blocks += block_used;
- stats.consumed_bytes += block_used * block_size;
+ stats.consumed_bytes += static_cast<size_t>(block_used) * block_size;
stats.reserved_blocks += block_count - block_used;
- stats.reserved_bytes += (block_count - block_used) * block_size;
+ stats.reserved_bytes +=
+ static_cast<size_t>(block_count - block_used) * block_size;
}
}
//--------------------------------------------------------------------------
- KOKKOS_DEFAULTED_FUNCTION MemoryPool(MemoryPool &&) = default;
- KOKKOS_DEFAULTED_FUNCTION MemoryPool(const MemoryPool &) = default;
- KOKKOS_DEFAULTED_FUNCTION MemoryPool &operator=(MemoryPool &&) = default;
+ KOKKOS_DEFAULTED_FUNCTION MemoryPool(MemoryPool &&) = default;
+ KOKKOS_DEFAULTED_FUNCTION MemoryPool(const MemoryPool &) = default;
+ KOKKOS_DEFAULTED_FUNCTION MemoryPool &operator=(MemoryPool &&) = default;
KOKKOS_DEFAULTED_FUNCTION MemoryPool &operator=(const MemoryPool &) = default;
KOKKOS_INLINE_FUNCTION MemoryPool()
/* Return 0 for invalid block size */
KOKKOS_INLINE_FUNCTION
uint32_t allocate_block_size(uint64_t alloc_size) const noexcept {
- return alloc_size <= (uint64_t(1) << m_max_block_size_lg2)
- ? (uint32_t(1) << get_block_size_lg2(uint32_t(alloc_size)))
+ return alloc_size <= (1UL << m_max_block_size_lg2)
+ ? (1UL << get_block_size_lg2(uint32_t(alloc_size)))
: 0;
}
*/
KOKKOS_FUNCTION
void *allocate(size_t alloc_size, int32_t attempt_limit = 1) const noexcept {
- if ((size_t(1) << m_max_block_size_lg2) < alloc_size) {
+ if (size_t(1LU << m_max_block_size_lg2) < alloc_size) {
Kokkos::abort(
"Kokkos MemoryPool allocation request exceeded specified maximum "
"allocation size");
// mask into superblock and then shift down for block index
const uint32_t bit =
- (d & ((ptrdiff_t(1) << m_sb_size_lg2) - 1)) >> block_size_lg2;
+ (d & (ptrdiff_t(1LU << m_sb_size_lg2) - 1)) >> block_size_lg2;
const int result = CB::release(sb_state_array, bit, block_state);
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_MEMORYTRAITS_HPP
#define KOKKOS_MEMORYTRAITS_HPP
struct MemoryTraits {
//! Tag this class as a kokkos memory traits:
using memory_traits = MemoryTraits<T>;
- enum : bool {
- is_unmanaged = (unsigned(0) != (T & unsigned(Kokkos::Unmanaged)))
- };
- enum : bool {
- is_random_access = (unsigned(0) != (T & unsigned(Kokkos::RandomAccess)))
- };
- enum : bool { is_atomic = (unsigned(0) != (T & unsigned(Kokkos::Atomic))) };
- enum : bool {
- is_restrict = (unsigned(0) != (T & unsigned(Kokkos::Restrict)))
- };
- enum : bool { is_aligned = (unsigned(0) != (T & unsigned(Kokkos::Aligned))) };
+
+ static constexpr unsigned impl_value = T;
+
+ static constexpr bool is_unmanaged =
+ (unsigned(0) != (T & unsigned(Kokkos::Unmanaged)));
+ static constexpr bool is_random_access =
+ (unsigned(0) != (T & unsigned(Kokkos::RandomAccess)));
+ static constexpr bool is_atomic =
+ (unsigned(0) != (T & unsigned(Kokkos::Atomic)));
+ static constexpr bool is_restrict =
+ (unsigned(0) != (T & unsigned(Kokkos::Restrict)));
+ static constexpr bool is_aligned =
+ (unsigned(0) != (T & unsigned(Kokkos::Aligned)));
};
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
-static_assert(false,
- "Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
-#endif
-#ifndef KOKKOS_MIN_MAX_CLAMP_HPP
-#define KOKKOS_MIN_MAX_CLAMP_HPP
+#ifndef KOKKOS_MIN_MAX_HPP
+#define KOKKOS_MIN_MAX_HPP
#include <Kokkos_Macros.hpp>
#include <Kokkos_Pair.hpp>
namespace Kokkos {
-// clamp
-template <class T>
-constexpr KOKKOS_INLINE_FUNCTION const T& clamp(const T& value, const T& lo,
- const T& hi) {
- KOKKOS_EXPECTS(!(hi < lo));
- return (value < lo) ? lo : (hi < value) ? hi : value;
-}
-
-template <class T, class ComparatorType>
-constexpr KOKKOS_INLINE_FUNCTION const T& clamp(const T& value, const T& lo,
- const T& hi,
- ComparatorType comp) {
- KOKKOS_EXPECTS(!comp(hi, lo));
- return comp(value, lo) ? lo : comp(hi, value) ? hi : value;
-}
-
// max
template <class T>
constexpr KOKKOS_INLINE_FUNCTION const T& max(const T& a, const T& b) {
return result;
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-namespace Experimental {
-using ::Kokkos::clamp;
-using ::Kokkos::max;
-using ::Kokkos::min;
-using ::Kokkos::minmax;
-} // namespace Experimental
-#endif
-
} // namespace Kokkos
#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_NUMERIC_TRAITS_HPP
+#define KOKKOS_NUMERIC_TRAITS_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERIC_TRAITS
+#endif
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#include <Kokkos_ReductionIdentity.hpp>
+#endif
+#include <cfloat>
+#include <climits>
+#include <cmath>
+#include <cstdint>
+#include <type_traits>
+
+namespace Kokkos::Experimental {
+namespace Impl {
+// clang-format off
+template <class> struct infinity_helper {};
+template <> struct infinity_helper<float> { static constexpr float value = HUGE_VALF; };
+template <> struct infinity_helper<double> { static constexpr double value = HUGE_VAL; };
+template <> struct infinity_helper<long double> { static constexpr long double value = HUGE_VALL; };
+template <class> struct finite_min_helper {};
+template <> struct finite_min_helper<bool> { static constexpr bool value = false; };
+template <> struct finite_min_helper<char> { static constexpr char value = CHAR_MIN; };
+template <> struct finite_min_helper<signed char> { static constexpr signed char value = SCHAR_MIN; };
+template <> struct finite_min_helper<unsigned char> { static constexpr unsigned char value = 0; };
+template <> struct finite_min_helper<short> { static constexpr short value = SHRT_MIN; };
+template <> struct finite_min_helper<unsigned short> { static constexpr unsigned short value = 0; };
+template <> struct finite_min_helper<int> { static constexpr int value = INT_MIN; };
+template <> struct finite_min_helper<unsigned int> { static constexpr unsigned int value = 0; };
+template <> struct finite_min_helper<long int> { static constexpr long int value = LONG_MIN; };
+template <> struct finite_min_helper<unsigned long int> { static constexpr unsigned long int value = 0; };
+template <> struct finite_min_helper<long long int> { static constexpr long long int value = LLONG_MIN; };
+template <> struct finite_min_helper<unsigned long long int> { static constexpr unsigned long long int value = 0; };
+template <> struct finite_min_helper<float> { static constexpr float value = -FLT_MAX; };
+template <> struct finite_min_helper<double> { static constexpr double value = -DBL_MAX; };
+template <> struct finite_min_helper<long double> { static constexpr long double value = -LDBL_MAX; };
+template <class> struct finite_max_helper {};
+template <> struct finite_max_helper<bool> { static constexpr bool value = true; };
+template <> struct finite_max_helper<char> { static constexpr char value = CHAR_MAX; };
+template <> struct finite_max_helper<signed char> { static constexpr signed char value = SCHAR_MAX; };
+template <> struct finite_max_helper<unsigned char> { static constexpr unsigned char value = UCHAR_MAX; };
+template <> struct finite_max_helper<short> { static constexpr short value = SHRT_MAX; };
+template <> struct finite_max_helper<unsigned short> { static constexpr unsigned short value = USHRT_MAX; };
+template <> struct finite_max_helper<int> { static constexpr int value = INT_MAX; };
+template <> struct finite_max_helper<unsigned int> { static constexpr unsigned int value = UINT_MAX; };
+template <> struct finite_max_helper<long int> { static constexpr long int value = LONG_MAX; };
+template <> struct finite_max_helper<unsigned long int> { static constexpr unsigned long int value = ULONG_MAX; };
+template <> struct finite_max_helper<long long int> { static constexpr long long int value = LLONG_MAX; };
+template <> struct finite_max_helper<unsigned long long int> { static constexpr unsigned long long int value = ULLONG_MAX; };
+template <> struct finite_max_helper<float> { static constexpr float value = FLT_MAX; };
+template <> struct finite_max_helper<double> { static constexpr double value = DBL_MAX; };
+template <> struct finite_max_helper<long double> { static constexpr long double value = LDBL_MAX; };
+template <class> struct epsilon_helper {};
+template <> struct epsilon_helper<float> { static constexpr float value = FLT_EPSILON; };
+template <> struct epsilon_helper<double> { static constexpr double value = DBL_EPSILON; };
+template <> struct epsilon_helper<long double> {
+ static constexpr long double value = LDBL_EPSILON;
+};
+template <class> struct round_error_helper {};
+template <> struct round_error_helper<float> { static constexpr float value = 0.5F; };
+template <> struct round_error_helper<double> { static constexpr double value = 0.5; };
+template <> struct round_error_helper<long double> { static constexpr long double value = 0.5L; };
+template <class> struct norm_min_helper {};
+template <> struct norm_min_helper<float> { static constexpr float value = FLT_MIN; };
+template <> struct norm_min_helper<double> { static constexpr double value = DBL_MIN; };
+template <> struct norm_min_helper<long double> { static constexpr long double value = LDBL_MIN; };
+template <class> struct denorm_min_helper {};
+// Workaround for GCC <9.2, Clang <9, Intel
+// vvvvvvvvvvvvvvvvvvvvvvvvv
+#if defined (FLT_TRUE_MIN) || defined(_MSC_VER)
+template <> struct denorm_min_helper<float> { static constexpr float value = FLT_TRUE_MIN; };
+template <> struct denorm_min_helper<double> { static constexpr double value = DBL_TRUE_MIN; };
+template <> struct denorm_min_helper<long double> { static constexpr long double value = LDBL_TRUE_MIN; };
+#else
+template <> struct denorm_min_helper<float> { static constexpr float value = __FLT_DENORM_MIN__; };
+template <> struct denorm_min_helper<double> { static constexpr double value = __DBL_DENORM_MIN__; };
+template <> struct denorm_min_helper<long double> { static constexpr long double value = __LDBL_DENORM_MIN__; };
+#endif
+template <class> struct quiet_NaN_helper {};
+template <> struct quiet_NaN_helper<float> { static constexpr float value = __builtin_nanf(""); };
+template <> struct quiet_NaN_helper<double> { static constexpr double value = __builtin_nan(""); };
+#if defined(_MSC_VER)
+template <> struct quiet_NaN_helper<long double> { static constexpr long double value = __builtin_nan(""); };
+#else
+template <> struct quiet_NaN_helper<long double> { static constexpr long double value = __builtin_nanl(""); };
+#endif
+template <class> struct signaling_NaN_helper {};
+template <> struct signaling_NaN_helper<float> { static constexpr float value = __builtin_nansf(""); };
+template <> struct signaling_NaN_helper<double> { static constexpr double value = __builtin_nans(""); };
+#if defined(_MSC_VER)
+template <> struct signaling_NaN_helper<long double> { static constexpr long double value = __builtin_nans(""); };
+#else
+template <> struct signaling_NaN_helper<long double> { static constexpr long double value = __builtin_nansl(""); };
+#endif
+template <class> struct digits_helper {};
+template <> struct digits_helper<bool> { static constexpr int value = 1; };
+template <> struct digits_helper<char> { static constexpr int value = CHAR_BIT - std::is_signed_v<char>; };
+template <> struct digits_helper<signed char> { static constexpr int value = CHAR_BIT - 1; };
+template <> struct digits_helper<unsigned char> { static constexpr int value = CHAR_BIT; };
+template <> struct digits_helper<short> { static constexpr int value = CHAR_BIT*sizeof(short)-1; };
+template <> struct digits_helper<unsigned short> { static constexpr int value = CHAR_BIT*sizeof(short); };
+template <> struct digits_helper<int> { static constexpr int value = CHAR_BIT*sizeof(int)-1; };
+template <> struct digits_helper<unsigned int> { static constexpr int value = CHAR_BIT*sizeof(int); };
+template <> struct digits_helper<long int> { static constexpr int value = CHAR_BIT*sizeof(long int)-1; };
+template <> struct digits_helper<unsigned long int> { static constexpr int value = CHAR_BIT*sizeof(long int); };
+template <> struct digits_helper<long long int> { static constexpr int value = CHAR_BIT*sizeof(long long int)-1; };
+template <> struct digits_helper<unsigned long long int> { static constexpr int value = CHAR_BIT*sizeof(long long int); };
+template <> struct digits_helper<float> { static constexpr int value = FLT_MANT_DIG; };
+template <> struct digits_helper<double> { static constexpr int value = DBL_MANT_DIG; };
+template <> struct digits_helper<long double> { static constexpr int value = LDBL_MANT_DIG; };
+template <class> struct digits10_helper {};
+template <> struct digits10_helper<bool> { static constexpr int value = 0; };
+// The fraction 643/2136 approximates log10(2) to 7 significant digits.
+// Workaround GCC compiler bug with -frounding-math that prevented the
+// floating-point expression to be evaluated at compile time.
+#define DIGITS10_HELPER_INTEGRAL(TYPE) \
+template <> struct digits10_helper<TYPE> { static constexpr int value = digits_helper<TYPE>::value * 643L / 2136; };
+DIGITS10_HELPER_INTEGRAL(char)
+DIGITS10_HELPER_INTEGRAL(signed char)
+DIGITS10_HELPER_INTEGRAL(unsigned char)
+DIGITS10_HELPER_INTEGRAL(short)
+DIGITS10_HELPER_INTEGRAL(unsigned short)
+DIGITS10_HELPER_INTEGRAL(int)
+DIGITS10_HELPER_INTEGRAL(unsigned int)
+DIGITS10_HELPER_INTEGRAL(long int)
+DIGITS10_HELPER_INTEGRAL(unsigned long int)
+DIGITS10_HELPER_INTEGRAL(long long int)
+DIGITS10_HELPER_INTEGRAL(unsigned long long int)
+#undef DIGITS10_HELPER_INTEGRAL
+template <> struct digits10_helper<float> { static constexpr int value = FLT_DIG; };
+template <> struct digits10_helper<double> { static constexpr int value = DBL_DIG; };
+template <> struct digits10_helper<long double> { static constexpr int value = LDBL_DIG; };
+template <class> struct max_digits10_helper {};
+// Approximate ceil(digits<T>::value * log10(2) + 1)
+#define MAX_DIGITS10_HELPER(TYPE) \
+template <> struct max_digits10_helper<TYPE> { static constexpr int value = (digits_helper<TYPE>::value * 643L + 2135) / 2136 + 1; };
+#ifdef FLT_DECIMAL_DIG
+template <> struct max_digits10_helper<float> { static constexpr int value = FLT_DECIMAL_DIG; };
+#else
+MAX_DIGITS10_HELPER(float)
+#endif
+#ifdef DBL_DECIMAL_DIG
+template <> struct max_digits10_helper<double> { static constexpr int value = DBL_DECIMAL_DIG; };
+#else
+MAX_DIGITS10_HELPER(double)
+#endif
+#ifdef DECIMAL_DIG
+template <> struct max_digits10_helper<long double> { static constexpr int value = DECIMAL_DIG; };
+#elif LDBL_DECIMAL_DIG
+template <> struct max_digits10_helper<long double> { static constexpr int value = LDBL_DECIMAL_DIG; };
+#else
+MAX_DIGITS10_HELPER(long double)
+#endif
+#undef MAX_DIGITS10_HELPER
+template <class> struct radix_helper {};
+template <> struct radix_helper<bool> { static constexpr int value = 2; };
+template <> struct radix_helper<char> { static constexpr int value = 2; };
+template <> struct radix_helper<signed char> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned char> { static constexpr int value = 2; };
+template <> struct radix_helper<short> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned short> { static constexpr int value = 2; };
+template <> struct radix_helper<int> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned int> { static constexpr int value = 2; };
+template <> struct radix_helper<long int> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned long int> { static constexpr int value = 2; };
+template <> struct radix_helper<long long int> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned long long int> { static constexpr int value = 2; };
+template <> struct radix_helper<float> { static constexpr int value = FLT_RADIX; };
+template <> struct radix_helper<double> { static constexpr int value = FLT_RADIX; };
+template <> struct radix_helper<long double> { static constexpr int value = FLT_RADIX; };
+template <class> struct min_exponent_helper {};
+template <> struct min_exponent_helper<float> { static constexpr int value = FLT_MIN_EXP; };
+template <> struct min_exponent_helper<double> { static constexpr int value = DBL_MIN_EXP; };
+template <> struct min_exponent_helper<long double> { static constexpr int value = LDBL_MIN_EXP; };
+template <class> struct min_exponent10_helper {};
+template <> struct min_exponent10_helper<float> { static constexpr int value = FLT_MIN_10_EXP; };
+template <> struct min_exponent10_helper<double> { static constexpr int value = DBL_MIN_10_EXP; };
+template <> struct min_exponent10_helper<long double> { static constexpr int value = LDBL_MIN_10_EXP; };
+template <class> struct max_exponent_helper {};
+template <> struct max_exponent_helper<float> { static constexpr int value = FLT_MAX_EXP; };
+template <> struct max_exponent_helper<double> { static constexpr int value = DBL_MAX_EXP; };
+template <> struct max_exponent_helper<long double> { static constexpr int value = LDBL_MAX_EXP; };
+template <class> struct max_exponent10_helper{};
+template <> struct max_exponent10_helper<float> { static constexpr int value = FLT_MAX_10_EXP; };
+template <> struct max_exponent10_helper<double> { static constexpr int value = DBL_MAX_10_EXP; };
+template <> struct max_exponent10_helper<long double> { static constexpr int value = LDBL_MAX_10_EXP; };
+// clang-format on
+} // namespace Impl
+
+#define KOKKOS_IMPL_DEFINE_TRAIT(TRAIT) \
+ template <class T> \
+ struct TRAIT : Impl::TRAIT##_helper<std::remove_cv_t<T>> {}; \
+ template <class T> \
+ inline constexpr auto TRAIT##_v = TRAIT<T>::value;
+
+// Numeric distinguished value traits
+KOKKOS_IMPL_DEFINE_TRAIT(infinity)
+KOKKOS_IMPL_DEFINE_TRAIT(finite_min)
+KOKKOS_IMPL_DEFINE_TRAIT(finite_max)
+KOKKOS_IMPL_DEFINE_TRAIT(epsilon)
+KOKKOS_IMPL_DEFINE_TRAIT(round_error)
+KOKKOS_IMPL_DEFINE_TRAIT(norm_min)
+KOKKOS_IMPL_DEFINE_TRAIT(denorm_min)
+KOKKOS_IMPL_DEFINE_TRAIT(quiet_NaN)
+KOKKOS_IMPL_DEFINE_TRAIT(signaling_NaN)
+
+// Numeric characteristics traits
+KOKKOS_IMPL_DEFINE_TRAIT(digits)
+KOKKOS_IMPL_DEFINE_TRAIT(digits10)
+KOKKOS_IMPL_DEFINE_TRAIT(max_digits10)
+KOKKOS_IMPL_DEFINE_TRAIT(radix)
+KOKKOS_IMPL_DEFINE_TRAIT(min_exponent)
+KOKKOS_IMPL_DEFINE_TRAIT(min_exponent10)
+KOKKOS_IMPL_DEFINE_TRAIT(max_exponent)
+KOKKOS_IMPL_DEFINE_TRAIT(max_exponent10)
+
+#undef KOKKOS_IMPL_DEFINE_TRAIT
+
+} // namespace Kokkos::Experimental
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERIC_TRAITS
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERIC_TRAITS
+#endif
+#endif
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
/// \file Kokkos_Pair.hpp
#endif
#include <Kokkos_Macros.hpp>
+#include <Kokkos_Swap.hpp>
#include <utility>
namespace Kokkos {
///
/// This calls the copy constructors of T1 and T2. It won't compile
/// if those copy constructors are not defined and public.
-#ifdef KOKKOS_COMPILER_NVHPC // FIXME_NVHPC bug in NVHPC regarding constexpr
- // constructors used in device code
+#if defined(KOKKOS_COMPILER_NVHPC) && KOKKOS_COMPILER_NVHPC < 230700
KOKKOS_FORCEINLINE_FUNCTION
#else
KOKKOS_FORCEINLINE_FUNCTION constexpr
/// This calls the copy constructors of T1 and T2. It won't compile
/// if those copy constructors are not defined and public.
template <class U, class V>
-#ifdef KOKKOS_COMPILER_NVHPC // FIXME_NVHPC bug in NVHPC regarding constexpr
- // constructors used in device code
+#if defined(KOKKOS_COMPILER_NVHPC) && KOKKOS_COMPILER_NVHPC < 230700
KOKKOS_FORCEINLINE_FUNCTION
#else
KOKKOS_FORCEINLINE_FUNCTION constexpr
: first(p.first), second(p.second) {
}
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
/// \brief Copy constructor.
///
/// This calls the copy constructors of T1 and T2. It won't compile
/// if those copy constructors are not defined and public.
template <class U, class V>
- KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const volatile pair<U, V>& p)
+ KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION constexpr pair(
+ const volatile pair<U, V>& p)
: first(p.first), second(p.second) {}
+#endif
/// \brief Assignment operator.
///
return *this;
}
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
/// \brief Assignment operator, for volatile <tt>*this</tt>.
///
/// \param p [in] Input; right-hand side of the assignment.
/// practice, this means that you should not chain assignments with
/// volatile lvalues.
template <class U, class V>
- KOKKOS_FORCEINLINE_FUNCTION void operator=(
+ KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION void operator=(
const volatile pair<U, V>& p) volatile {
first = p.first;
second = p.second;
// We deliberately do not return anything here. See explanation
// in public documentation above.
}
+#endif
// from std::pair<U,V>
template <class U, class V>
return (pair<T1&, T2&>(x, y));
}
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
//
// Specialization of Kokkos::pair for a \c void second argument. This
// is not actually a "pair"; it only contains one element, the first.
//
template <class T1>
-struct pair<T1, void> {
+struct KOKKOS_DEPRECATED pair<T1, void> {
using first_type = T1;
using second_type = void;
// Specialization of relational operators for Kokkos::pair<T1,void>.
//
+#if defined(KOKKOS_ENABLE_DEPRECATION_WARNINGS) && \
+ defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU < 1110)
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
template <class T1>
-KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator==(
+KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator==(
const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
return lhs.first == rhs.first;
}
template <class T1>
-KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator!=(
+KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator!=(
const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
return !(lhs == rhs);
}
template <class T1>
-KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<(
+KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<(
const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
return lhs.first < rhs.first;
}
template <class T1>
-KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<=(
+KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<=(
const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
return !(rhs < lhs);
}
template <class T1>
-KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>(
+KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>(
const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
return rhs < lhs;
}
template <class T1>
-KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>=(
+KOKKOS_DEPRECATED KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>=(
const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
return !(lhs < rhs);
}
+#if defined(KOKKOS_ENABLE_DEPRECATION_WARNINGS) && \
+ defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU < 1110)
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+#endif
namespace Impl {
-
template <class T>
struct is_pair_like : std::false_type {};
template <class T, class U>
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
/// \file Kokkos_Parallel.hpp
/// \brief Declaration of parallel operators
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_PARALLEL_HPP
#define KOKKOS_PARALLEL_HPP
static_assert(
!is_detected<execution_space_t, Policy>::value ||
!is_detected<execution_space_t, Functor>::value ||
- std::is_same<policy_execution_space, functor_execution_space>::value,
+ std::is_same_v<policy_execution_space, functor_execution_space>,
"A policy with an execution space and a functor with an execution space "
"are given but the execution space types do not match!");
static_assert(!is_detected<execution_space_t, Policy>::value ||
!is_detected<device_type_t, Functor>::value ||
- std::is_same<policy_execution_space,
- functor_device_type_execution_space>::value,
+ std::is_same_v<policy_execution_space,
+ functor_device_type_execution_space>,
"A policy with an execution space and a functor with a device "
"type are given but the execution space types do not match!");
static_assert(!is_detected<device_type_t, Functor>::value ||
!is_detected<execution_space_t, Functor>::value ||
- std::is_same<functor_device_type_execution_space,
- functor_execution_space>::value,
+ std::is_same_v<functor_device_type_execution_space,
+ functor_execution_space>,
"A functor with both an execution space and device type is "
"given but their execution space types do not match!");
const FunctorType& functor) {
uint64_t kpID = 0;
- ExecPolicy inner_policy = policy;
- Kokkos::Tools::Impl::begin_parallel_for(inner_policy, functor, str, kpID);
+ /** Request a tuned policy from the tools subsystem */
+ const auto& response =
+ Kokkos::Tools::Impl::begin_parallel_for(policy, functor, str, kpID);
+ const auto& inner_policy = response.policy;
- Kokkos::Impl::shared_allocation_tracking_disable();
- Impl::ParallelFor<FunctorType, ExecPolicy> closure(functor, inner_policy);
- Kokkos::Impl::shared_allocation_tracking_enable();
+ auto closure =
+ Kokkos::Impl::construct_with_shared_allocation_tracking_disabled<
+ Impl::ParallelFor<FunctorType, ExecPolicy>>(functor, inner_policy);
closure.execute();
Kokkos::parallel_for("", policy, functor);
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class ExecPolicy, class FunctorType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload taking the label as first argument instead!")
-inline void parallel_for(
- const ExecPolicy& policy, const FunctorType& functor,
- const std::string& str,
- std::enable_if_t<is_execution_policy<ExecPolicy>::value>* = nullptr) {
- Kokkos::parallel_for(str, policy, functor);
-}
-#endif
-
template <class FunctorType>
inline void parallel_for(const std::string& str, const size_t work_count,
const FunctorType& functor) {
::Kokkos::parallel_for("", work_count, functor);
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class FunctorType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload taking the label as first argument instead!")
-inline void parallel_for(const size_t work_count, const FunctorType& functor,
- const std::string& str) {
- ::Kokkos::parallel_for(str, work_count, functor);
-}
-#endif
-
} // namespace Kokkos
#include <Kokkos_Parallel_Reduce.hpp>
std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>>
inline void parallel_scan(const std::string& str, const ExecutionPolicy& policy,
const FunctorType& functor) {
- uint64_t kpID = 0;
- ExecutionPolicy inner_policy = policy;
- Kokkos::Tools::Impl::begin_parallel_scan(inner_policy, functor, str, kpID);
+ uint64_t kpID = 0;
+ /** Request a tuned policy from the tools subsystem */
+ const auto& response =
+ Kokkos::Tools::Impl::begin_parallel_scan(policy, functor, str, kpID);
+ const auto& inner_policy = response.policy;
- Kokkos::Impl::shared_allocation_tracking_disable();
- Impl::ParallelScan<FunctorType, ExecutionPolicy> closure(functor,
- inner_policy);
- Kokkos::Impl::shared_allocation_tracking_enable();
+ auto closure =
+ Kokkos::Impl::construct_with_shared_allocation_tracking_disabled<
+ Impl::ParallelScan<FunctorType, ExecutionPolicy>>(functor,
+ inner_policy);
closure.execute();
::Kokkos::parallel_scan("", policy, functor);
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class ExecutionPolicy, class FunctorType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload taking the label as first argument instead!")
-inline void parallel_scan(
- const ExecutionPolicy& policy, const FunctorType& functor,
- const std::string& str,
- std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>* = nullptr) {
- ::Kokkos::parallel_scan(str, policy, functor);
-}
-#endif
-
template <class FunctorType>
inline void parallel_scan(const std::string& str, const size_t work_count,
const FunctorType& functor) {
::Kokkos::parallel_scan("", work_count, functor);
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class FunctorType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload taking the label as first argument instead!")
-inline void parallel_scan(const size_t work_count, const FunctorType& functor,
- const std::string& str) {
- ::Kokkos::parallel_scan(str, work_count, functor);
-}
-#endif
-
template <class ExecutionPolicy, class FunctorType, class ReturnType,
class Enable =
std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>>
ExecutionPolicy inner_policy = policy;
Kokkos::Tools::Impl::begin_parallel_scan(inner_policy, functor, str, kpID);
- Kokkos::Impl::shared_allocation_tracking_disable();
- Impl::ParallelScanWithTotal<FunctorType, ExecutionPolicy, ReturnType> closure(
- functor, inner_policy, return_value);
- Kokkos::Impl::shared_allocation_tracking_enable();
-
- closure.execute();
+ if constexpr (Kokkos::is_view<ReturnType>::value) {
+ auto closure =
+ Kokkos::Impl::construct_with_shared_allocation_tracking_disabled<
+ Impl::ParallelScanWithTotal<FunctorType, ExecutionPolicy,
+ typename ReturnType::value_type>>(
+ functor, inner_policy, return_value);
+ closure.execute();
+ } else {
+ Kokkos::View<ReturnType, Kokkos::HostSpace> view(&return_value);
+ auto closure =
+ Kokkos::Impl::construct_with_shared_allocation_tracking_disabled<
+ Impl::ParallelScanWithTotal<FunctorType, ExecutionPolicy,
+ ReturnType>>(functor, inner_policy,
+ view);
+ closure.execute();
+ }
Kokkos::Tools::Impl::end_parallel_scan(inner_policy, functor, str, kpID);
- policy.space().fence(
- "Kokkos::parallel_scan: fence due to result being a value, not a view");
+ if (!Kokkos::is_view<ReturnType>::value)
+ policy.space().fence(
+ "Kokkos::parallel_scan: fence due to result being a value, not a view");
}
template <class ExecutionPolicy, class FunctorType, class ReturnType>
::Kokkos::parallel_scan("", policy, functor, return_value);
}
-#ifdef KOKKOS_ENABLE_DISABLE_DEPRECATED_CODE_3
-template <class ExecutionPolicy, class FunctorType, class ReturnType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload taking the label as first argument instead!")
-inline void parallel_scan(
- const ExecutionPolicy& policy, const FunctorType& functor,
- ReturnType& return_value, const std::string& str,
- std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>* = nullptr) {
- ::Kokkos::parallel_scan(str, policy, functor, return_value);
-}
-#endif
-
template <class FunctorType, class ReturnType>
inline void parallel_scan(const std::string& str, const size_t work_count,
const FunctorType& functor,
::Kokkos::parallel_scan("", work_count, functor, return_value);
}
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class FunctorType, class ReturnType>
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use the overload taking the label as first argument instead!")
-inline void parallel_scan(const size_t work_count, const FunctorType& functor,
- ReturnType& return_value, const std::string& str) {
- ::Kokkos::parallel_scan(str, work_count, functor, return_value);
-}
-#endif
-
} // namespace Kokkos
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_PARALLEL_REDUCE_HPP
#define KOKKOS_PARALLEL_REDUCE_HPP
-#include <Kokkos_NumericTraits.hpp>
+#include <Kokkos_ReductionIdentity.hpp>
#include <Kokkos_View.hpp>
#include <impl/Kokkos_FunctorAnalysis.hpp>
#include <impl/Kokkos_Tools_Generic.hpp>
#include <type_traits>
-#include <iostream>
namespace Kokkos {
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-template <class T>
-using is_reducer_type KOKKOS_DEPRECATED_WITH_COMMENT(
- "Use Kokkos::is_reducer instead!") = Kokkos::is_reducer<T>;
-#endif
-
template <class Scalar, class Space>
struct Sum {
public:
// Required
using reducer = Sum<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE Sum(View<Scalar, Properties...> const&)
+ -> Sum<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Space>
struct Prod {
public:
// Required
using reducer = Prod<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE Prod(View<Scalar, Properties...> const&)
+ -> Prod<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Space>
struct Min {
public:
// Required
using reducer = Min<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE Min(View<Scalar, Properties...> const&)
+ -> Min<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Space>
struct Max {
public:
// Required
using reducer = Max<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE Max(View<Scalar, Properties...> const&)
+ -> Max<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Space>
struct LAnd {
public:
// Required
using reducer = LAnd<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE LAnd(View<Scalar, Properties...> const&)
+ -> LAnd<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Space>
struct LOr {
public:
// Required
using reducer = LOr<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE LOr(View<Scalar, Properties...> const&)
+ -> LOr<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Space>
struct BAnd {
public:
// Required
using reducer = BAnd<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE BAnd(View<Scalar, Properties...> const&)
+ -> BAnd<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Space>
struct BOr {
public:
// Required
using reducer = BOr<Scalar, Space>;
using value_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<value_type> && !std::is_array_v<value_type>);
using result_view_type = Kokkos::View<value_type, Space>;
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE BOr(View<Scalar, Properties...> const&)
+ -> BOr<Scalar, typename View<Scalar, Properties...>::memory_space>;
+
template <class Scalar, class Index>
struct ValLocScalar {
Scalar val;
Index loc;
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const ValLocScalar& rhs) {
- val = rhs.val;
- loc = rhs.loc;
- }
};
template <class Scalar, class Index, class Space>
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
public:
// Required
// Required
KOKKOS_INLINE_FUNCTION
void join(value_type& dest, const value_type& src) const {
- if (src.val < dest.val) dest = src;
+ if (src.val < dest.val)
+ dest = src;
+ else if (src.val == dest.val &&
+ dest.loc == reduction_identity<index_type>::min()) {
+ dest.loc = src.loc;
+ }
}
KOKKOS_INLINE_FUNCTION
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE
+MinLoc(View<ValLocScalar<Scalar, Index>, Properties...> const&) -> MinLoc<
+ Scalar, Index,
+ typename View<ValLocScalar<Scalar, Index>, Properties...>::memory_space>;
+
template <class Scalar, class Index, class Space>
struct MaxLoc {
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
public:
// Required
// Required
KOKKOS_INLINE_FUNCTION
void join(value_type& dest, const value_type& src) const {
- if (src.val > dest.val) dest = src;
+ if (src.val > dest.val)
+ dest = src;
+ else if (src.val == dest.val &&
+ dest.loc == reduction_identity<index_type>::min()) {
+ dest.loc = src.loc;
+ }
}
KOKKOS_INLINE_FUNCTION
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE
+MaxLoc(View<ValLocScalar<Scalar, Index>, Properties...> const&) -> MaxLoc<
+ Scalar, Index,
+ typename View<ValLocScalar<Scalar, Index>, Properties...>::memory_space>;
+
template <class Scalar>
struct MinMaxScalar {
Scalar min_val, max_val;
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const MinMaxScalar& rhs) {
- min_val = rhs.min_val;
- max_val = rhs.max_val;
- }
};
template <class Scalar, class Space>
struct MinMax {
private:
using scalar_type = std::remove_cv_t<Scalar>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MinMax(View<MinMaxScalar<Scalar>, Properties...> const&)
+ -> MinMax<Scalar,
+ typename View<MinMaxScalar<Scalar>, Properties...>::memory_space>;
+
template <class Scalar, class Index>
struct MinMaxLocScalar {
Scalar min_val, max_val;
Index min_loc, max_loc;
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const MinMaxLocScalar& rhs) {
- min_val = rhs.min_val;
- min_loc = rhs.min_loc;
- max_val = rhs.max_val;
- max_loc = rhs.max_loc;
- }
};
template <class Scalar, class Index, class Space>
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
public:
// Required
if (src.min_val < dest.min_val) {
dest.min_val = src.min_val;
dest.min_loc = src.min_loc;
+ } else if (dest.min_val == src.min_val &&
+ dest.min_loc == reduction_identity<index_type>::min()) {
+ dest.min_loc = src.min_loc;
}
if (src.max_val > dest.max_val) {
dest.max_val = src.max_val;
dest.max_loc = src.max_loc;
+ } else if (dest.max_val == src.max_val &&
+ dest.max_loc == reduction_identity<index_type>::min()) {
+ dest.max_loc = src.max_loc;
}
}
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MinMaxLoc(
+ View<MinMaxLocScalar<Scalar, Index>, Properties...> const&)
+ -> MinMaxLoc<Scalar, Index,
+ typename View<MinMaxLocScalar<Scalar, Index>,
+ Properties...>::memory_space>;
+
// --------------------------------------------------
// reducers added to support std algorithms
// --------------------------------------------------
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MaxFirstLoc(
+ View<ValLocScalar<Scalar, Index>, Properties...> const&)
+ -> MaxFirstLoc<Scalar, Index,
+ typename View<ValLocScalar<Scalar, Index>,
+ Properties...>::memory_space>;
+
//
// MaxFirstLocCustomComparator
// recall that comp(a,b) returns true is a < b
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename ComparatorType,
+ typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MaxFirstLocCustomComparator(
+ View<ValLocScalar<Scalar, Index>, Properties...> const&, ComparatorType)
+ -> MaxFirstLocCustomComparator<Scalar, Index, ComparatorType,
+ typename View<ValLocScalar<Scalar, Index>,
+ Properties...>::memory_space>;
+
//
// MinFirstLoc
//
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MinFirstLoc(
+ View<ValLocScalar<Scalar, Index>, Properties...> const&)
+ -> MinFirstLoc<Scalar, Index,
+ typename View<ValLocScalar<Scalar, Index>,
+ Properties...>::memory_space>;
+
//
// MinFirstLocCustomComparator
// recall that comp(a,b) returns true is a < b
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename ComparatorType,
+ typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MinFirstLocCustomComparator(
+ View<ValLocScalar<Scalar, Index>, Properties...> const&, ComparatorType)
+ -> MinFirstLocCustomComparator<Scalar, Index, ComparatorType,
+ typename View<ValLocScalar<Scalar, Index>,
+ Properties...>::memory_space>;
+
//
// MinMaxFirstLastLoc
//
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MinMaxFirstLastLoc(
+ View<MinMaxLocScalar<Scalar, Index>, Properties...> const&)
+ -> MinMaxFirstLastLoc<Scalar, Index,
+ typename View<MinMaxLocScalar<Scalar, Index>,
+ Properties...>::memory_space>;
+
//
// MinMaxFirstLastLocCustomComparator
// recall that comp(a,b) returns true is a < b
private:
using scalar_type = std::remove_cv_t<Scalar>;
using index_type = std::remove_cv_t<Index>;
+ static_assert(!std::is_pointer_v<scalar_type> &&
+ !std::is_array_v<scalar_type>);
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Scalar, typename Index, typename ComparatorType,
+ typename... Properties>
+KOKKOS_DEDUCTION_GUIDE MinMaxFirstLastLocCustomComparator(
+ View<MinMaxLocScalar<Scalar, Index>, Properties...> const&, ComparatorType)
+ -> MinMaxFirstLastLocCustomComparator<
+ Scalar, Index, ComparatorType,
+ typename View<MinMaxLocScalar<Scalar, Index>,
+ Properties...>::memory_space>;
+
//
// FirstLoc
//
template <class Index>
struct FirstLocScalar {
Index min_loc_true;
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const FirstLocScalar& rhs) { min_loc_true = rhs.min_loc_true; }
};
template <class Index, class Space>
struct FirstLoc {
private:
using index_type = std::remove_cv_t<Index>;
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE
+FirstLoc(View<FirstLocScalar<Index>, Properties...> const&) -> FirstLoc<
+ Index, typename View<FirstLocScalar<Index>, Properties...>::memory_space>;
+
//
// LastLoc
//
template <class Index>
struct LastLocScalar {
Index max_loc_true;
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const LastLocScalar& rhs) { max_loc_true = rhs.max_loc_true; }
};
template <class Index, class Space>
struct LastLoc {
private:
using index_type = std::remove_cv_t<Index>;
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE LastLoc(View<LastLocScalar<Index>, Properties...> const&)
+ -> LastLoc<Index, typename View<LastLocScalar<Index>,
+ Properties...>::memory_space>;
+
template <class Index>
struct StdIsPartScalar {
Index max_loc_true, min_loc_false;
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const StdIsPartScalar& rhs) {
- min_loc_false = rhs.min_loc_false;
- max_loc_true = rhs.max_loc_true;
- }
};
//
struct StdIsPartitioned {
private:
using index_type = std::remove_cv_t<Index>;
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE StdIsPartitioned(
+ View<StdIsPartScalar<Index>, Properties...> const&)
+ -> StdIsPartitioned<Index, typename View<StdIsPartScalar<Index>,
+ Properties...>::memory_space>;
+
template <class Index>
struct StdPartPointScalar {
Index min_loc_false;
-
- KOKKOS_INLINE_FUNCTION
- void operator=(const StdPartPointScalar& rhs) {
- min_loc_false = rhs.min_loc_false;
- }
};
//
struct StdPartitionPoint {
private:
using index_type = std::remove_cv_t<Index>;
+ static_assert(std::is_integral_v<index_type>);
public:
// Required
bool references_scalar() const { return references_scalar_v; }
};
+template <typename Index, typename... Properties>
+KOKKOS_DEDUCTION_GUIDE StdPartitionPoint(
+ View<StdPartPointScalar<Index>, Properties...> const&)
+ -> StdPartitionPoint<Index, typename View<StdPartPointScalar<Index>,
+ Properties...>::memory_space>;
+
} // namespace Kokkos
namespace Kokkos {
namespace Impl {
+template <typename FunctorType, typename FunctorAnalysisReducerType,
+ typename Enable>
+class CombinedFunctorReducer {
+ public:
+ using functor_type = FunctorType;
+ using reducer_type = FunctorAnalysisReducerType;
+ CombinedFunctorReducer(const FunctorType& functor,
+ const FunctorAnalysisReducerType& reducer)
+ : m_functor(functor), m_reducer(reducer) {}
+ KOKKOS_FUNCTION const FunctorType& get_functor() const { return m_functor; }
+ KOKKOS_FUNCTION const FunctorAnalysisReducerType& get_reducer() const {
+ return m_reducer;
+ }
+
+ private:
+ FunctorType m_functor;
+ FunctorAnalysisReducerType m_reducer;
+};
+template <typename FunctorType, typename FunctorAnalysisReducerType>
+class CombinedFunctorReducer<
+ FunctorType, FunctorAnalysisReducerType,
+ std::enable_if_t<std::is_same_v<
+ FunctorType, typename FunctorAnalysisReducerType::functor_type>>> {
+ public:
+ using functor_type = FunctorType;
+ using reducer_type = FunctorAnalysisReducerType;
+ CombinedFunctorReducer(const FunctorType& functor,
+ const FunctorAnalysisReducerType&)
+ : m_reducer(functor) {}
+ KOKKOS_FUNCTION const FunctorType& get_functor() const {
+ return m_reducer.get_functor();
+ }
+ KOKKOS_FUNCTION const FunctorAnalysisReducerType& get_reducer() const {
+ return m_reducer;
+ }
+
+ private:
+ FunctorAnalysisReducerType m_reducer;
+};
+
template <class T, class ReturnType, class ValueTraits>
struct ParallelReduceReturnValue;
template <class ReturnType, class FunctorType>
struct ParallelReduceReturnValue<
std::enable_if_t<!Kokkos::is_view<ReturnType>::value &&
- (!std::is_array<ReturnType>::value &&
- !std::is_pointer<ReturnType>::value) &&
- !Kokkos::is_reducer<ReturnType>::value>,
+ (!std::is_array_v<ReturnType> &&
+ !std::is_pointer_v<
+ ReturnType>)&&!Kokkos::is_reducer<ReturnType>::value>,
ReturnType, FunctorType> {
using return_type =
Kokkos::View<ReturnType, Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
template <class ReturnType, class FunctorType>
struct ParallelReduceReturnValue<
- std::enable_if_t<(std::is_array<ReturnType>::value ||
- std::is_pointer<ReturnType>::value)>,
+ std::enable_if_t<(std::is_array_v<ReturnType> ||
+ std::is_pointer_v<ReturnType>)>,
ReturnType, FunctorType> {
using return_type = Kokkos::View<std::remove_const_t<ReturnType>,
Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
static return_type return_value(ReturnType& return_val,
const FunctorType& functor) {
- if (std::is_array<ReturnType>::value)
+ if (std::is_array_v<ReturnType>)
return return_type(return_val);
else
return return_type(return_val, functor.value_count);
struct ParallelReduceReturnValue<
std::enable_if_t<Kokkos::is_reducer<ReturnType>::value>, ReturnType,
FunctorType> {
- using return_type = ReturnType;
+ using return_type = typename ReturnType::result_view_type;
using reducer_type = ReturnType;
using value_type = typename return_type::value_type;
- static return_type return_value(ReturnType& return_val, const FunctorType&) {
- return return_val;
+ static auto return_value(ReturnType& return_val, const FunctorType&) {
+ return return_val.view();
}
};
template <class PolicyType, class FunctorType>
struct ParallelReducePolicyType<
- std::enable_if_t<std::is_integral<PolicyType>::value>, PolicyType,
- FunctorType> {
+ std::enable_if_t<std::is_integral_v<PolicyType>>, PolicyType, FunctorType> {
using execution_space =
typename Impl::FunctorPolicyExecutionSpace<FunctorType,
void>::execution_space;
const PolicyType& policy,
const FunctorType& functor,
ReturnType& return_value) {
- uint64_t kpID = 0;
-
- PolicyType inner_policy = policy;
- Kokkos::Tools::Impl::begin_parallel_reduce<
- typename return_value_adapter::reducer_type>(inner_policy, functor,
+ using PassedReducerType = typename return_value_adapter::reducer_type;
+ uint64_t kpID = 0;
+
+ using ReducerSelector =
+ Kokkos::Impl::if_c<std::is_same_v<InvalidType, PassedReducerType>,
+ FunctorType, PassedReducerType>;
+ using Analysis = FunctorAnalysis<FunctorPatternInterface::REDUCE,
+ PolicyType, typename ReducerSelector::type,
+ typename return_value_adapter::value_type>;
+ using CombinedFunctorReducerType =
+ CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>;
+
+ CombinedFunctorReducerType functor_reducer(
+ functor, typename Analysis::Reducer(
+ ReducerSelector::select(functor, return_value)));
+ const auto& response = Kokkos::Tools::Impl::begin_parallel_reduce<
+ typename return_value_adapter::reducer_type>(policy, functor_reducer,
label, kpID);
-
- Kokkos::Impl::shared_allocation_tracking_disable();
- Impl::ParallelReduce<FunctorType, PolicyType,
- typename return_value_adapter::reducer_type>
- closure(functor, inner_policy,
- return_value_adapter::return_value(return_value, functor));
- Kokkos::Impl::shared_allocation_tracking_enable();
+ const auto& inner_policy = response.policy;
+
+ auto closure = construct_with_shared_allocation_tracking_disabled<
+ Impl::ParallelReduce<CombinedFunctorReducerType, PolicyType,
+ typename Impl::FunctorPolicyExecutionSpace<
+ FunctorType, PolicyType>::execution_space>>(
+ functor_reducer, inner_policy,
+ return_value_adapter::return_value(return_value, functor));
closure.execute();
- Kokkos::Tools::Impl::end_parallel_reduce<
- typename return_value_adapter::reducer_type>(inner_policy, functor,
- label, kpID);
+ Kokkos::Tools::Impl::end_parallel_reduce<PassedReducerType>(
+ inner_policy, functor, label, kpID);
}
static constexpr bool is_array_reduction =
- Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
- FunctorType>::StaticValueSize == 0;
+ Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE, PolicyType, FunctorType,
+ typename return_value_adapter::value_type>::StaticValueSize == 0;
template <typename Dummy = ReturnType>
static inline std::enable_if_t<!(is_array_reduction &&
- std::is_pointer<Dummy>::value)>
+ std::is_pointer_v<Dummy>)>
execute(const std::string& label, const PolicyType& policy,
const FunctorType& functor, ReturnType& return_value) {
execute_impl(label, policy, functor, return_value);
}
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- template <typename Dummy = ReturnType>
- KOKKOS_DEPRECATED_WITH_COMMENT(
- "Array reductions with a raw pointer return type are deprecated. Use a "
- "Kokkos::View as return argument!")
- static inline std::
- enable_if_t<is_array_reduction && std::is_pointer<Dummy>::value> execute(
- const std::string& label, const PolicyType& policy,
- const FunctorType& functor, ReturnType& return_value) {
- execute_impl(label, policy, functor, return_value);
- }
-#endif
};
} // namespace Impl
static std::false_type test_func(...);
enum {
- value = std::is_same<std::true_type, decltype(test_func<T>(nullptr))>::value
+ value = std::is_same_v<std::true_type, decltype(test_func<T>(nullptr))>
};
};
template <class... ArgsDeduced>
static void fence(const ExecutionSpace& ex, const std::string& name,
ArgsDeduced&&... args) {
- if (Impl::parallel_reduce_needs_fence(ex, (ArgsDeduced &&) args...)) {
+ if (Impl::parallel_reduce_needs_fence(ex, (ArgsDeduced&&)args...)) {
ex.fence(name);
}
}
inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
!(Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value)>
+ std::is_pointer_v<ReturnType>)>
parallel_reduce(const std::string& label, const PolicyType& policy,
const FunctorType& functor, ReturnType& return_value) {
static_assert(
- !std::is_const<ReturnType>::value,
+ !std::is_const_v<ReturnType>,
"A const reduction result type is only allowed for a View, pointer or "
"reducer return type!");
inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
!(Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value)>
+ std::is_pointer_v<ReturnType>)>
parallel_reduce(const PolicyType& policy, const FunctorType& functor,
ReturnType& return_value) {
static_assert(
- !std::is_const<ReturnType>::value,
+ !std::is_const_v<ReturnType>,
"A const reduction result type is only allowed for a View, pointer or "
"reducer return type!");
template <class FunctorType, class ReturnType>
inline std::enable_if_t<!(Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value)>
+ std::is_pointer_v<ReturnType>)>
parallel_reduce(const size_t& policy, const FunctorType& functor,
ReturnType& return_value) {
static_assert(
- !std::is_const<ReturnType>::value,
+ !std::is_const_v<ReturnType>,
"A const reduction result type is only allowed for a View, pointer or "
"reducer return type!");
template <class FunctorType, class ReturnType>
inline std::enable_if_t<!(Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value)>
+ std::is_pointer_v<ReturnType>)>
parallel_reduce(const std::string& label, const size_t& policy,
const FunctorType& functor, ReturnType& return_value) {
static_assert(
- !std::is_const<ReturnType>::value,
+ !std::is_const_v<ReturnType>,
"A const reduction result type is only allowed for a View, pointer or "
"reducer return type!");
inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
(Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value)>
+ std::is_pointer_v<ReturnType>)>
parallel_reduce(const std::string& label, const PolicyType& policy,
const FunctorType& functor, const ReturnType& return_value) {
ReturnType return_value_impl = return_value;
inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
(Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value)>
+ std::is_pointer_v<ReturnType>)>
parallel_reduce(const PolicyType& policy, const FunctorType& functor,
const ReturnType& return_value) {
ReturnType return_value_impl = return_value;
template <class FunctorType, class ReturnType>
inline std::enable_if_t<Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value>
+ std::is_pointer_v<ReturnType>>
parallel_reduce(const size_t& policy, const FunctorType& functor,
const ReturnType& return_value) {
using policy_type =
template <class FunctorType, class ReturnType>
inline std::enable_if_t<Kokkos::is_view<ReturnType>::value ||
Kokkos::is_reducer<ReturnType>::value ||
- std::is_pointer<ReturnType>::value>
+ std::is_pointer_v<ReturnType>>
parallel_reduce(const std::string& label, const size_t& policy,
const FunctorType& functor, const ReturnType& return_value) {
using policy_type =
nullptr) {
using FunctorAnalysis =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
- FunctorType>;
+ FunctorType, void>;
using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
typename FunctorAnalysis::value_type,
typename FunctorAnalysis::pointer_type>;
nullptr) {
using FunctorAnalysis =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
- FunctorType>;
+ FunctorType, void>;
using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
typename FunctorAnalysis::value_type,
typename FunctorAnalysis::pointer_type>;
FunctorType>::policy_type;
using FunctorAnalysis =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, policy_type,
- FunctorType>;
+ FunctorType, void>;
using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
typename FunctorAnalysis::value_type,
typename FunctorAnalysis::pointer_type>;
FunctorType>::policy_type;
using FunctorAnalysis =
Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, policy_type,
- FunctorType>;
+ FunctorType, void>;
using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
typename FunctorAnalysis::value_type,
typename FunctorAnalysis::pointer_type>;
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_IMPL_POINTEROWNERSHIP_HPP
+#define KOKKOS_IMPL_POINTEROWNERSHIP_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/// Trivial wrapper for raw pointers that express ownership.
+template <class T>
+using OwningRawPtr = T*;
+
+/// Trivial wrapper for raw pointers that do not express ownership.
+template <class T>
+using ObservingRawPtr = T*;
+
+} // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_IMPL_POINTEROWNERSHIP_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_PRINTF_HPP
+#define KOKKOS_PRINTF_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_SYCL
+#include <sycl/sycl.hpp>
+#else
+#include <cstdio>
+#endif
+
+namespace Kokkos {
+
+// In contrast to std::printf, return void to get a consistent behavior across
+// backends. The GPU backends always return 1 and NVHPC only compiles if we
+// don't ask for the return value.
+#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(KOKKOS_ARCH_INTEL_GPU)
+using ::printf;
+#else
+template <typename... Args>
+KOKKOS_FORCEINLINE_FUNCTION void printf(const char* format, Args... args) {
+#ifdef KOKKOS_ENABLE_SYCL
+ // Some compilers warn if "args" is empty and format is not a string literal
+ if constexpr (sizeof...(Args) == 0)
+ sycl::ext::oneapi::experimental::printf("%s", format);
+ else
+ sycl::ext::oneapi::experimental::printf(format, args...);
+#else
+ if constexpr (sizeof...(Args) == 0)
+ ::printf("%s", format);
+ else
+ ::printf(format, args...);
+#endif
+}
+#endif
+
+} // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_PRINTF_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOSP_PROFILE_SECTION_HPP
+#define KOKKOSP_PROFILE_SECTION_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_PROFILING_PROFILESECTION
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+
+#include <string>
+
+namespace Kokkos::Profiling {
+
+class [[nodiscard]] ProfilingSection {
+ uint32_t sectionID;
+
+ public:
+ ProfilingSection(ProfilingSection const&) = delete;
+ ProfilingSection& operator=(ProfilingSection const&) = delete;
+
+#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
+ [[nodiscard]]
+#endif
+ explicit ProfilingSection(const std::string& sectionName) {
+ Kokkos::Profiling::createProfileSection(sectionName, §ionID);
+ }
+
+ void start() { Kokkos::Profiling::startSection(sectionID); }
+
+ void stop() { Kokkos::Profiling::stopSection(sectionID); }
+
+ ~ProfilingSection() { Kokkos::Profiling::destroyProfileSection(sectionID); }
+};
+
+} // namespace Kokkos::Profiling
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PROFILING_PROFILESECTION
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOSP_SCOPED_REGION_HPP
+#define KOKKOSP_SCOPED_REGION_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_PROFILING_SCOPEDREGION
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+
+#include <string>
+
+namespace Kokkos::Profiling {
+
+class [[nodiscard]] ScopedRegion {
+ public:
+ ScopedRegion(ScopedRegion const &) = delete;
+ ScopedRegion &operator=(ScopedRegion const &) = delete;
+
+#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
+ [[nodiscard]]
+#endif
+ explicit ScopedRegion(std::string const &name) {
+ Kokkos::Profiling::pushRegion(name);
+ }
+ ~ScopedRegion() { Kokkos::Profiling::popRegion(); }
+};
+
+} // namespace Kokkos::Profiling
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PROFILING_SCOPEDREGION
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_KOKKOS_RANK_HPP
+#define KOKKOS_KOKKOS_RANK_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Layout.hpp> // Iterate
+
+namespace Kokkos {
+
+// Iteration Pattern
+template <unsigned N, Iterate OuterDir = Iterate::Default,
+ Iterate InnerDir = Iterate::Default>
+struct Rank {
+ static_assert(N != 0u, "Kokkos Error: rank 0 undefined");
+ static_assert(N != 1u,
+ "Kokkos Error: rank 1 is not a multi-dimensional range");
+ static_assert(N < 9u, "Kokkos Error: Unsupported rank...");
+
+ using iteration_pattern = Rank<N, OuterDir, InnerDir>;
+
+ static constexpr int rank = N;
+ static constexpr Iterate outer_direction = OuterDir;
+ static constexpr Iterate inner_direction = InnerDir;
+};
+
+} // end namespace Kokkos
+
+#endif // KOKKOS_KOKKOS_RANK_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_REDUCTION_IDENTITY_HPP
+#define KOKKOS_REDUCTION_IDENTITY_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_REDUCTION_IDENTITY
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <cfloat>
+#include <climits>
+
+namespace Kokkos {
+
+template <class T>
+struct reduction_identity; /*{
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T sum() { return T(); } // 0
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T prod() // 1
+ { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom prod reduction type"); return T(); }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T max() // minimum value
+ { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom max reduction type"); return T(); }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T min() // maximum value
+ { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom min reduction type"); return T(); }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T bor() // 0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom bor reduction type"); return T(); }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T band() // !0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom band reduction type"); return T(); }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T lor() // 0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom lor reduction type"); return T(); }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static T land() // !0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom land reduction type"); return T(); }
+};*/
+
+template <>
+struct reduction_identity<char> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char sum() {
+ return static_cast<char>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char prod() {
+ return static_cast<char>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char max() { return CHAR_MIN; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char min() { return CHAR_MAX; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char bor() {
+ return static_cast<char>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char band() {
+ return ~static_cast<char>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char lor() {
+ return static_cast<char>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static char land() {
+ return static_cast<char>(1);
+ }
+};
+
+template <>
+struct reduction_identity<signed char> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char sum() {
+ return static_cast<signed char>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char prod() {
+ return static_cast<signed char>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char max() {
+ return SCHAR_MIN;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char min() {
+ return SCHAR_MAX;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char bor() {
+ return static_cast<signed char>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char band() {
+ return ~static_cast<signed char>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char lor() {
+ return static_cast<signed char>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char land() {
+ return static_cast<signed char>(1);
+ }
+};
+
+template <>
+struct reduction_identity<bool> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static bool lor() {
+ return static_cast<bool>(false);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static bool land() {
+ return static_cast<bool>(true);
+ }
+};
+
+template <>
+struct reduction_identity<short> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short sum() {
+ return static_cast<short>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short prod() {
+ return static_cast<short>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short max() { return SHRT_MIN; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short min() { return SHRT_MAX; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short bor() {
+ return static_cast<short>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short band() {
+ return ~static_cast<short>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short lor() {
+ return static_cast<short>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static short land() {
+ return static_cast<short>(1);
+ }
+};
+
+template <>
+struct reduction_identity<int> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int sum() {
+ return static_cast<int>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int prod() {
+ return static_cast<int>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int max() { return INT_MIN; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int min() { return INT_MAX; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int bor() {
+ return static_cast<int>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int band() {
+ return ~static_cast<int>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int lor() {
+ return static_cast<int>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static int land() {
+ return static_cast<int>(1);
+ }
+};
+
+template <>
+struct reduction_identity<long> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long sum() {
+ return static_cast<long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long prod() {
+ return static_cast<long>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long max() { return LONG_MIN; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long min() { return LONG_MAX; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long bor() {
+ return static_cast<long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long band() {
+ return ~static_cast<long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long lor() {
+ return static_cast<long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long land() {
+ return static_cast<long>(1);
+ }
+};
+
+template <>
+struct reduction_identity<long long> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long sum() {
+ return static_cast<long long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long prod() {
+ return static_cast<long long>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long max() {
+ return LLONG_MIN;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long min() {
+ return LLONG_MAX;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long bor() {
+ return static_cast<long long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long band() {
+ return ~static_cast<long long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long lor() {
+ return static_cast<long long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static long long land() {
+ return static_cast<long long>(1);
+ }
+};
+
+template <>
+struct reduction_identity<unsigned char> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char sum() {
+ return static_cast<unsigned char>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char prod() {
+ return static_cast<unsigned char>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char max() {
+ return static_cast<unsigned char>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char min() {
+ return UCHAR_MAX;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char bor() {
+ return static_cast<unsigned char>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char band() {
+ return ~static_cast<unsigned char>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char lor() {
+ return static_cast<unsigned char>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char land() {
+ return static_cast<unsigned char>(1);
+ }
+};
+
+template <>
+struct reduction_identity<unsigned short> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short sum() {
+ return static_cast<unsigned short>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short prod() {
+ return static_cast<unsigned short>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short max() {
+ return static_cast<unsigned short>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short min() {
+ return USHRT_MAX;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short bor() {
+ return static_cast<unsigned short>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short band() {
+ return ~static_cast<unsigned short>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short lor() {
+ return static_cast<unsigned short>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short land() {
+ return static_cast<unsigned short>(1);
+ }
+};
+
+template <>
+struct reduction_identity<unsigned int> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int sum() {
+ return static_cast<unsigned int>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int prod() {
+ return static_cast<unsigned int>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int max() {
+ return static_cast<unsigned int>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int min() {
+ return UINT_MAX;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int bor() {
+ return static_cast<unsigned int>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int band() {
+ return ~static_cast<unsigned int>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int lor() {
+ return static_cast<unsigned int>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int land() {
+ return static_cast<unsigned int>(1);
+ }
+};
+
+template <>
+struct reduction_identity<unsigned long> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long sum() {
+ return static_cast<unsigned long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long prod() {
+ return static_cast<unsigned long>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long max() {
+ return static_cast<unsigned long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long min() {
+ return ULONG_MAX;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long bor() {
+ return static_cast<unsigned long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long band() {
+ return ~static_cast<unsigned long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long lor() {
+ return static_cast<unsigned long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long land() {
+ return static_cast<unsigned long>(1);
+ }
+};
+
+template <>
+struct reduction_identity<unsigned long long> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long sum() {
+ return static_cast<unsigned long long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long prod() {
+ return static_cast<unsigned long long>(1);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long max() {
+ return static_cast<unsigned long long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long min() {
+ return ULLONG_MAX;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long bor() {
+ return static_cast<unsigned long long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long band() {
+ return ~static_cast<unsigned long long>(0x0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long lor() {
+ return static_cast<unsigned long long>(0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long land() {
+ return static_cast<unsigned long long>(1);
+ }
+};
+
+template <>
+struct reduction_identity<float> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float sum() {
+ return static_cast<float>(0.0f);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float prod() {
+ return static_cast<float>(1.0f);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float max() { return -FLT_MAX; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float min() { return FLT_MAX; }
+};
+
+template <>
+struct reduction_identity<double> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static double sum() {
+ return static_cast<double>(0.0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static double prod() {
+ return static_cast<double>(1.0);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static double max() { return -DBL_MAX; }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static double min() { return DBL_MAX; }
+};
+
+// No __host__ __device__ annotation because long double treated as double in
+// device code. May be revisited later if that is not true any more.
+template <>
+struct reduction_identity<long double> {
+ constexpr static long double sum() { return static_cast<long double>(0.0); }
+ constexpr static long double prod() { return static_cast<long double>(1.0); }
+ constexpr static long double max() { return -LDBL_MAX; }
+ constexpr static long double min() { return LDBL_MAX; }
+};
+
+} // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_REDUCTION_IDENTITY
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_REDUCTION_IDENTITY
+#endif
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_SCRATCHSPACE_HPP
#define KOKKOS_SCRATCHSPACE_HPP
"Instantiating ScratchMemorySpace on non-execution-space type.");
public:
- // Alignment of memory chunks returned by 'get'
- // must be a power of two
- enum { ALIGN = 8 };
+ // Minimal overalignment used by view scratch allocations
+ constexpr static int ALIGN = 8;
private:
mutable char* m_iter_L0 = nullptr;
mutable int m_offset = 0;
mutable int m_default_level = 0;
- enum { MASK = ALIGN - 1 }; // Alignment used by View::shmem_size
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ constexpr static int DEFAULT_ALIGNMENT_MASK = ALIGN - 1;
+#endif
public:
//! Tag this class as a memory space
static constexpr const char* name() { return "ScratchMemorySpace"; }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ // This function is unused
template <typename IntType>
- KOKKOS_INLINE_FUNCTION static IntType align(const IntType& size) {
- return (size + MASK) & ~MASK;
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION static constexpr IntType align(
+ const IntType& size) {
+ return (size + DEFAULT_ALIGNMENT_MASK) & ~DEFAULT_ALIGNMENT_MASK;
}
+#endif
template <typename IntType>
KOKKOS_INLINE_FUNCTION void* get_shmem(const IntType& size,
int level = -1) const {
- return get_shmem_common</*aligned*/ false>(size, 1, level);
+ return get_shmem_common</*alignment_requested*/ false>(size, 1, level);
}
template <typename IntType>
KOKKOS_INLINE_FUNCTION void* get_shmem_aligned(const IntType& size,
const ptrdiff_t alignment,
int level = -1) const {
- return get_shmem_common</*aligned*/ true>(size, alignment, level);
+ return get_shmem_common</*alignment_requested*/ true>(size, alignment,
+ level);
}
private:
- template <bool aligned, typename IntType>
- KOKKOS_INLINE_FUNCTION void* get_shmem_common(const IntType& size,
- const ptrdiff_t alignment,
- int level = -1) const {
+ template <bool alignment_requested, typename IntType>
+ KOKKOS_INLINE_FUNCTION void* get_shmem_common(
+ const IntType& size, [[maybe_unused]] const ptrdiff_t alignment,
+ int level = -1) const {
if (level == -1) level = m_default_level;
- auto& m_iter = (level == 0) ? m_iter_L0 : m_iter_L1;
- auto& m_end = (level == 0) ? m_end_L0 : m_end_L1;
- char* previous = m_iter;
- const ptrdiff_t missalign = size_t(m_iter) % alignment;
- if (missalign) m_iter += alignment - missalign;
-
- void* tmp = m_iter + m_offset * (aligned ? size : align(size));
- if (m_end < (m_iter += (aligned ? size : align(size)) * m_multiplier)) {
- m_iter = previous; // put it back like it was
+ auto& m_iter = (level == 0) ? m_iter_L0 : m_iter_L1;
+ auto m_iter_old = m_iter;
+ if constexpr (alignment_requested) {
+ const ptrdiff_t missalign = size_t(m_iter) % alignment;
+ if (missalign) m_iter += alignment - missalign;
+ }
+
+ // This is each thread's start pointer for its allocation
+ // Note: for team scratch m_offset is 0, since every
+ // thread will get back the same shared pointer
+ void* tmp = m_iter + m_offset * size;
+ uintptr_t increment = static_cast<uintptr_t>(size) * m_multiplier;
+
+ // Cast to uintptr_t to avoid problems with pointer arithmetic using SYCL
+ const auto end_iter =
+ reinterpret_cast<uintptr_t>((level == 0) ? m_end_L0 : m_end_L1);
+ auto current_iter = reinterpret_cast<uintptr_t>(m_iter);
+ auto capacity = end_iter - current_iter;
+
+ if (increment > capacity) {
+ // Request did overflow: return nullptr and reset m_iter
+ m_iter = m_iter_old;
+ tmp = nullptr;
#ifdef KOKKOS_ENABLE_DEBUG
// mfh 23 Jun 2015: printf call consumes 25 registers
// in a CUDA build, so only print in debug mode. The
// function still returns nullptr if not enough memory.
- KOKKOS_IMPL_DO_NOT_USE_PRINTF(
+ Kokkos::printf(
"ScratchMemorySpace<...>::get_shmem: Failed to allocate "
"%ld byte(s); remaining capacity is %ld byte(s)\n",
- long(size), long(m_end - m_iter));
+ long(size), long(capacity));
#endif // KOKKOS_ENABLE_DEBUG
- tmp = nullptr;
+ } else {
+ m_iter += increment;
}
return tmp;
}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SWAP_HPP
+#define KOKKOS_SWAP_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <cstddef>
+#include <type_traits>
+#include <utility>
+
+namespace Kokkos {
+
+template <class T>
+KOKKOS_FUNCTION constexpr std::enable_if_t<std::is_move_constructible_v<T> &&
+ std::is_move_assignable_v<T>>
+kokkos_swap(T& a, T& b) noexcept(std::is_nothrow_move_constructible_v<T>&&
+ std::is_nothrow_move_assignable_v<T>) {
+ T t(std::move(a));
+ a = std::move(b);
+ b = std::move(t);
+}
+
+namespace Impl {
+
+template <class T>
+struct is_swappable {
+ template <class U>
+ static decltype(kokkos_swap(std::declval<T&>(), std::declval<T&>()))
+ test_swap(int);
+ struct Nope;
+ template <class U>
+ static Nope test_swap(long);
+ static constexpr bool value =
+ !std::is_same_v<decltype(test_swap<T>(0)), Nope>;
+};
+
+template <class T>
+inline constexpr bool is_nothrow_swappable_v =
+ noexcept(kokkos_swap(std::declval<T&>(), std::declval<T&>()));
+
+} // namespace Impl
+
+template <class T, std::size_t N>
+KOKKOS_FUNCTION constexpr std::enable_if_t<Impl::is_swappable<T>::value>
+kokkos_swap(T (&a)[N], T (&b)[N]) noexcept(Impl::is_nothrow_swappable_v<T>) {
+ for (std::size_t i = 0; i < N; ++i) {
+ kokkos_swap(a[i], b[i]);
+ }
+}
+
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
#endif
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
#endif
+
#ifndef KOKKOS_TASKSCHEDULER_HPP
#define KOKKOS_TASKSCHEDULER_HPP
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
} // end namespace Impl
template <class ExecSpace, class QueueType>
-class BasicTaskScheduler : public Impl::TaskSchedulerBase {
+class KOKKOS_DEPRECATED BasicTaskScheduler : public Impl::TaskSchedulerBase {
public:
using scheduler_type = BasicTaskScheduler;
using execution_space = ExecSpace;
if (nullptr != t) {
// Increment reference count to track subsequent assignment.
// This likely has to be SeqCst
- Kokkos::Impl::desul_atomic_inc(&(t->m_ref_count),
- Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice());
+ desul::atomic_inc(&(t->m_ref_count), desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
if (q != static_cast<queue_type const*>(t->m_queue)) {
Kokkos::abort(
"Kokkos when_all Futures must be in the same scheduler");
//}
// Increment reference count to track subsequent assignment.
// This increment likely has to be SeqCst
- Kokkos::Impl::desul_atomic_inc(&(arg_f.m_task->m_ref_count),
- Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice());
+ desul::atomic_inc(&(arg_f.m_task->m_ref_count),
+ desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
dep[i] = arg_f.m_task;
}
}
// Construct a TaskTeam execution policy
template <class T, class Scheduler>
-Impl::TaskPolicyWithPredecessor<Impl::TaskType::TaskTeam,
- Kokkos::BasicFuture<T, Scheduler>>
+KOKKOS_DEPRECATED Impl::TaskPolicyWithPredecessor<
+ Impl::TaskType::TaskTeam, Kokkos::BasicFuture<T, Scheduler>>
KOKKOS_INLINE_FUNCTION
TaskTeam(Kokkos::BasicFuture<T, Scheduler> arg_future,
TaskPriority arg_priority = TaskPriority::Regular) {
}
template <class Scheduler>
-Impl::TaskPolicyWithScheduler<Impl::TaskType::TaskTeam, Scheduler>
+KOKKOS_DEPRECATED Impl::TaskPolicyWithScheduler<Impl::TaskType::TaskTeam,
+ Scheduler>
KOKKOS_INLINE_FUNCTION TaskTeam(
Scheduler arg_scheduler,
std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value, TaskPriority>
}
template <class Scheduler, class PredecessorFuture>
-Impl::TaskPolicyWithScheduler<Kokkos::Impl::TaskType::TaskTeam, Scheduler,
- PredecessorFuture>
+KOKKOS_DEPRECATED Impl::TaskPolicyWithScheduler<
+ Kokkos::Impl::TaskType::TaskTeam, Scheduler, PredecessorFuture>
KOKKOS_INLINE_FUNCTION
TaskTeam(Scheduler arg_scheduler, PredecessorFuture arg_future,
std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value &&
Kokkos::is_future<PredecessorFuture>::value,
TaskPriority>
arg_priority = TaskPriority::Regular) {
- static_assert(std::is_same<typename PredecessorFuture::scheduler_type,
- Scheduler>::value,
- "Can't create a task policy from a scheduler and a future from "
- "a different scheduler");
+ static_assert(
+ std::is_same_v<typename PredecessorFuture::scheduler_type, Scheduler>,
+ "Can't create a task policy from a scheduler and a future from "
+ "a different scheduler");
return {std::move(arg_scheduler), std::move(arg_future), arg_priority};
}
// Construct a TaskSingle execution policy
template <class T, class Scheduler>
-Impl::TaskPolicyWithPredecessor<Impl::TaskType::TaskSingle,
- Kokkos::BasicFuture<T, Scheduler>>
+KOKKOS_DEPRECATED Impl::TaskPolicyWithPredecessor<
+ Impl::TaskType::TaskSingle, Kokkos::BasicFuture<T, Scheduler>>
KOKKOS_INLINE_FUNCTION
TaskSingle(Kokkos::BasicFuture<T, Scheduler> arg_future,
TaskPriority arg_priority = TaskPriority::Regular) {
}
template <class Scheduler>
-Impl::TaskPolicyWithScheduler<Impl::TaskType::TaskSingle, Scheduler>
+KOKKOS_DEPRECATED Impl::TaskPolicyWithScheduler<Impl::TaskType::TaskSingle,
+ Scheduler>
KOKKOS_INLINE_FUNCTION TaskSingle(
Scheduler arg_scheduler,
std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value, TaskPriority>
}
template <class Scheduler, class PredecessorFuture>
-Impl::TaskPolicyWithScheduler<Kokkos::Impl::TaskType::TaskSingle, Scheduler,
- PredecessorFuture>
+KOKKOS_DEPRECATED Impl::TaskPolicyWithScheduler<
+ Kokkos::Impl::TaskType::TaskSingle, Scheduler, PredecessorFuture>
KOKKOS_INLINE_FUNCTION
TaskSingle(Scheduler arg_scheduler, PredecessorFuture arg_future,
std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value &&
Kokkos::is_future<PredecessorFuture>::value,
TaskPriority>
arg_priority = TaskPriority::Regular) {
- static_assert(std::is_same<typename PredecessorFuture::scheduler_type,
- Scheduler>::value,
- "Can't create a task policy from a scheduler and a future from "
- "a different scheduler");
+ static_assert(
+ std::is_same_v<typename PredecessorFuture::scheduler_type, Scheduler>,
+ "Can't create a task policy from a scheduler and a future from "
+ "a different scheduler");
return {std::move(arg_scheduler), std::move(arg_future), arg_priority};
}
*/
template <int TaskEnum, typename Scheduler, typename DepFutureType,
typename FunctorType>
-typename Scheduler::template future_type_for_functor<std::decay_t<FunctorType>>
+KOKKOS_DEPRECATED typename Scheduler::template future_type_for_functor<
+ std::decay_t<FunctorType>>
host_spawn(Impl::TaskPolicyWithScheduler<TaskEnum, Scheduler, DepFutureType>
arg_policy,
FunctorType&& arg_functor) {
*/
template <int TaskEnum, typename Scheduler, typename DepFutureType,
typename FunctorType>
-typename Scheduler::template future_type_for_functor<std::decay_t<FunctorType>>
+KOKKOS_DEPRECATED typename Scheduler::template future_type_for_functor<
+ std::decay_t<FunctorType>>
KOKKOS_INLINE_FUNCTION
task_spawn(Impl::TaskPolicyWithScheduler<TaskEnum, Scheduler, DepFutureType>
arg_policy,
* 2) High, Normal, or Low priority
*/
template <typename FunctorType, typename T>
-void KOKKOS_INLINE_FUNCTION
+KOKKOS_DEPRECATED void KOKKOS_INLINE_FUNCTION
respawn(FunctorType* arg_self, T const& arg,
TaskPriority const& arg_priority = TaskPriority::Regular) {
static_assert(Kokkos::is_future<T>::value || Kokkos::is_scheduler<T>::value,
// Wait for all runnable tasks to complete
template <class ExecSpace, class QueueType>
-inline void wait(BasicTaskScheduler<ExecSpace, QueueType> const& scheduler) {
+KOKKOS_DEPRECATED inline void wait(
+ BasicTaskScheduler<ExecSpace, QueueType> const& scheduler) {
using scheduler_type = BasicTaskScheduler<ExecSpace, QueueType>;
scheduler_type::specialization::execute(scheduler);
// scheduler.m_queue->execute();
} // namespace Kokkos
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_TASKSCHEDULER_FWD_HPP
#define KOKKOS_TASKSCHEDULER_FWD_HPP
#include <Kokkos_Core_fwd.hpp>
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
// Forward declarations used in Impl::TaskQueue
template <typename ValueType, typename Scheduler>
-class BasicFuture;
+class KOKKOS_DEPRECATED BasicFuture;
template <class Space, class Queue>
-class SimpleTaskScheduler;
+class KOKKOS_DEPRECATED SimpleTaskScheduler;
template <class Space, class Queue>
-class BasicTaskScheduler;
+class KOKKOS_DEPRECATED BasicTaskScheduler;
template <typename Space>
-struct is_scheduler : public std::false_type {};
+struct KOKKOS_DEPRECATED is_scheduler : public std::false_type {};
template <class Space, class Queue>
-struct is_scheduler<BasicTaskScheduler<Space, Queue>> : public std::true_type {
-};
+struct KOKKOS_DEPRECATED is_scheduler<BasicTaskScheduler<Space, Queue>>
+ : public std::true_type {};
template <class Space, class Queue>
-struct is_scheduler<SimpleTaskScheduler<Space, Queue>> : public std::true_type {
-};
+struct KOKKOS_DEPRECATED is_scheduler<SimpleTaskScheduler<Space, Queue>>
+ : public std::true_type {};
-enum class TaskPriority : int { High = 0, Regular = 1, Low = 2 };
+enum class KOKKOS_DEPRECATED TaskPriority : int {
+ High = 0,
+ Regular = 1,
+ Low = 2
+};
} // namespace Kokkos
namespace Kokkos {
template <typename Space>
-using DeprecatedTaskScheduler = BasicTaskScheduler<
+using DeprecatedTaskScheduler KOKKOS_DEPRECATED = BasicTaskScheduler<
Space,
Impl::TaskQueue<
Space,
Impl::default_tasking_memory_space_for_execution_space_t<Space>>>;
template <typename Space>
-using DeprecatedTaskSchedulerMultiple = BasicTaskScheduler<
+using DeprecatedTaskSchedulerMultiple KOKKOS_DEPRECATED = BasicTaskScheduler<
Space,
Impl::TaskQueueMultiple<
Space,
Impl::default_tasking_memory_space_for_execution_space_t<Space>>>;
template <typename Space>
-using TaskScheduler = SimpleTaskScheduler<
+using TaskScheduler KOKKOS_DEPRECATED = SimpleTaskScheduler<
Space,
Impl::SingleTaskQueue<
Space, Impl::default_tasking_memory_space_for_execution_space_t<Space>,
Impl::TaskQueueTraitsLockBased>>;
template <typename Space>
-using TaskSchedulerMultiple = SimpleTaskScheduler<
+using TaskSchedulerMultiple KOKKOS_DEPRECATED = SimpleTaskScheduler<
Space,
Impl::MultipleTaskQueue<
Space, Impl::default_tasking_memory_space_for_execution_space_t<Space>,
Impl::default_tasking_memory_space_for_execution_space_t<Space>>>>>;
template <typename Space>
-using ChaseLevTaskScheduler = SimpleTaskScheduler<
+using ChaseLevTaskScheduler KOKKOS_DEPRECATED = SimpleTaskScheduler<
Space,
Impl::MultipleTaskQueue<
Space, Impl::default_tasking_memory_space_for_execution_space_t<Space>,
Impl::default_tasking_memory_space_for_execution_space_t<Space>>>>>;
template <class Space, class QueueType>
-void wait(BasicTaskScheduler<Space, QueueType> const&);
+KOKKOS_DEPRECATED void wait(BasicTaskScheduler<Space, QueueType> const&);
namespace Impl {
} // namespace Kokkos
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_TIMER_HPP
#define KOKKOS_TIMER_HPP
inline Timer() { reset(); }
- Timer(const Timer&) = delete;
+ Timer(const Timer&) = delete;
Timer& operator=(const Timer&) = delete;
inline double seconds() const {
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_KOKKOS_TUNERS_HPP
#define KOKKOS_KOKKOS_TUNERS_HPP
VariableValue make_variable_value(size_t, double);
SetOrRange make_candidate_range(double lower, double upper, double step,
bool openLower, bool openUpper);
+SetOrRange make_candidate_range(int64_t lower, int64_t upper, int64_t step,
+ bool openLower, bool openUpper);
size_t get_new_context_id();
void begin_context(size_t context_id);
void end_context(size_t context_id);
template <typename PointType, typename ArrayType>
struct GetPoint;
-template <typename PointType, size_t X>
-struct GetPoint<PointType,
- std::array<Kokkos::Tools::Experimental::VariableValue, X>> {
+template <typename PointType, size_t ArraySize>
+struct GetPoint<
+ PointType,
+ std::array<Kokkos::Tools::Experimental::VariableValue, ArraySize>> {
using index_set_type =
- std::array<Kokkos::Tools::Experimental::VariableValue, X>;
+ std::array<Kokkos::Tools::Experimental::VariableValue, ArraySize>;
static auto build(const PointType& in, const index_set_type& indices) {
- return get_point_helper(in, indices, std::make_index_sequence<X>{});
+ return get_point_helper(in, indices, std::make_index_sequence<ArraySize>{});
}
};
TunerType tuner;
public:
- TeamSizeTuner() = default;
+ TeamSizeTuner() = default;
TeamSizeTuner& operator=(const TeamSizeTuner& other) = default;
TeamSizeTuner(const TeamSizeTuner& other) = default;
- TeamSizeTuner& operator=(TeamSizeTuner&& other) = default;
- TeamSizeTuner(TeamSizeTuner&& other) = default;
+ TeamSizeTuner& operator=(TeamSizeTuner&& other) = default;
+ TeamSizeTuner(TeamSizeTuner&& other) = default;
template <typename ViableConfigurationCalculator, typename Functor,
typename TagType, typename... Properties>
TeamSizeTuner(const std::string& name,
- Kokkos::TeamPolicy<Properties...>& policy,
+ const Kokkos::TeamPolicy<Properties...>& policy_in,
const Functor& functor, const TagType& tag,
ViableConfigurationCalculator calc) {
- using PolicyType = Kokkos::TeamPolicy<Properties...>;
+ using PolicyType = Kokkos::TeamPolicy<Properties...>;
+ PolicyType policy(policy_in);
auto initial_vector_length = policy.impl_vector_length();
if (initial_vector_length < 1) {
policy.impl_set_vector_length(1);
}
template <typename... Properties>
- void tune(Kokkos::TeamPolicy<Properties...>& policy) {
+ auto tune(const Kokkos::TeamPolicy<Properties...>& policy_in) {
+ Kokkos::TeamPolicy<Properties...> policy(policy_in);
if (Kokkos::Tools::Experimental::have_tuning_tool()) {
auto configuration = tuner.begin();
auto team_size = std::get<1>(configuration);
policy.impl_set_vector_length(vector_length);
}
}
+ return policy;
+ }
+ void end() {
+ if (Kokkos::Tools::Experimental::have_tuning_tool()) {
+ tuner.end();
+ }
+ }
+
+ TunerType get_tuner() const { return tuner; }
+};
+namespace Impl {
+template <class T>
+struct tuning_type_for;
+
+template <>
+struct tuning_type_for<double> {
+ static constexpr Kokkos::Tools::Experimental::ValueType value =
+ Kokkos::Tools::Experimental::ValueType::kokkos_value_double;
+ static double get(
+ const Kokkos::Tools::Experimental::VariableValue& value_struct) {
+ return value_struct.value.double_value;
+ }
+};
+template <>
+struct tuning_type_for<int64_t> {
+ static constexpr Kokkos::Tools::Experimental::ValueType value =
+ Kokkos::Tools::Experimental::ValueType::kokkos_value_int64;
+ static int64_t get(
+ const Kokkos::Tools::Experimental::VariableValue& value_struct) {
+ return value_struct.value.int_value;
+ }
+};
+} // namespace Impl
+template <class Bound>
+class SingleDimensionalRangeTuner {
+ size_t id;
+ size_t context;
+ using tuning_util = Impl::tuning_type_for<Bound>;
+
+ Bound default_value;
+
+ public:
+ SingleDimensionalRangeTuner() = default;
+ SingleDimensionalRangeTuner(
+ const std::string& name,
+ Kokkos::Tools::Experimental::StatisticalCategory category,
+ Bound default_val, Bound lower, Bound upper, Bound step = (Bound)0) {
+ default_value = default_val;
+ Kokkos::Tools::Experimental::VariableInfo info;
+ info.category = category;
+ info.candidates = make_candidate_range(
+ static_cast<Bound>(lower), static_cast<Bound>(upper),
+ static_cast<Bound>(step), false, false);
+ info.valueQuantity =
+ Kokkos::Tools::Experimental::CandidateValueType::kokkos_value_range;
+ info.type = tuning_util::value;
+ id = Kokkos::Tools::Experimental::declare_output_type(name, info);
+ }
+
+ Bound begin() {
+ context = Kokkos::Tools::Experimental::get_new_context_id();
+ Kokkos::Tools::Experimental::begin_context(context);
+ auto tuned_value =
+ Kokkos::Tools::Experimental::make_variable_value(id, default_value);
+ Kokkos::Tools::Experimental::request_output_values(context, 1,
+ &tuned_value);
+ return tuning_util::get(tuned_value);
+ }
+
+ void end() { Kokkos::Tools::Experimental::end_context(context); }
+
+ template <typename Functor>
+ void with_tuned_value(Functor& func) {
+ func(begin());
+ end();
+ }
+};
+
+class RangePolicyOccupancyTuner {
+ private:
+ using TunerType = SingleDimensionalRangeTuner<int64_t>;
+ TunerType tuner;
+
+ public:
+ RangePolicyOccupancyTuner() = default;
+ template <typename ViableConfigurationCalculator, typename Functor,
+ typename TagType, typename... Properties>
+ RangePolicyOccupancyTuner(const std::string& name,
+ const Kokkos::RangePolicy<Properties...>&,
+ const Functor&, const TagType&,
+ ViableConfigurationCalculator)
+ : tuner(TunerType(name,
+ Kokkos::Tools::Experimental::StatisticalCategory::
+ kokkos_value_ratio,
+ 100, 5, 100, 5)) {}
+
+ template <typename... Properties>
+ auto tune(const Kokkos::RangePolicy<Properties...>& policy_in) {
+ Kokkos::RangePolicy<Properties...> policy(policy_in);
+ if (Kokkos::Tools::Experimental::have_tuning_tool()) {
+ auto occupancy = tuner.begin();
+ policy.impl_set_desired_occupancy(
+ Kokkos::Experimental::DesiredOccupancy{static_cast<int>(occupancy)});
+ }
+ return policy;
}
void end() {
if (Kokkos::Tools::Experimental::have_tuning_tool()) {
policy.impl_change_tile_size({std::get<Indices>(tuple)...});
}
template <typename... Properties>
- void tune(Kokkos::MDRangePolicy<Properties...>& policy) {
+ auto tune(const Kokkos::MDRangePolicy<Properties...>& policy_in) {
+ Kokkos::MDRangePolicy<Properties...> policy(policy_in);
if (Kokkos::Tools::Experimental::have_tuning_tool()) {
auto configuration = tuner.begin();
set_policy_tile(policy, configuration, std::make_index_sequence<rank>{});
}
+ return policy;
}
void end() {
if (Kokkos::Tools::Experimental::have_tuning_tool()) {
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_TYPE_INFO_HPP
+#define KOKKOS_TYPE_INFO_HPP
+
+#include <array>
+#include <string_view>
+#include <utility>
+
+#include <Kokkos_Macros.hpp>
+
+// Intel C++ Compiler Classic version 2021.2.0 works but 2021.1.2 doesn't
+// Both have __INTEL_COMPILER defined to 2021 so using
+// __INTEL_COMPILER_BUILD_DATE to discriminate.
+// Experimenting on the compiler explorer gave
+// icc version | __INTEL_COMPILER | __INTEL_COMPILER_BUILD_DATE
+// 2021.1.2 | 2021 | 20201208
+// 2021.2.0 | 2021 | 20210228
+// NVCC versions less than 11.3.0 segfault when that header is included
+// NVCC+MSVC doesn't work at all - it simply reports "T" inside type_name
+#if (!defined(KOKKOS_COMPILER_INTEL) || \
+ (__INTEL_COMPILER_BUILD_DATE >= 20210228)) && \
+ (!defined(KOKKOS_COMPILER_NVCC) || (KOKKOS_COMPILER_NVCC >= 1130)) && \
+ (!(defined(KOKKOS_COMPILER_NVCC) && defined(KOKKOS_COMPILER_MSVC)))
+
+#define KOKKOS_ENABLE_IMPL_TYPEINFO
+
+namespace Kokkos::Impl {
+
+template <size_t N>
+constexpr std::array<char, N> to_array(std::string_view src) {
+ std::array<char, N> dst{};
+ for (size_t i = 0; i < N; ++i) {
+ dst[i] = src[i];
+ }
+ return dst;
+}
+
+template <class T>
+constexpr auto type_name() {
+#if defined(__clang__)
+ constexpr std::string_view func = __PRETTY_FUNCTION__;
+ constexpr std::string_view prefix{"[T = "};
+ constexpr std::string_view suffix{"]"};
+#elif defined(__GNUC__)
+ constexpr std::string_view func = __PRETTY_FUNCTION__;
+ constexpr std::string_view prefix{"[with T = "};
+ constexpr std::string_view suffix{"]"};
+#elif defined(_MSC_VER)
+ constexpr std::string_view func = __FUNCSIG__;
+ constexpr std::string_view prefix{"type_name<"};
+ constexpr std::string_view suffix{">(void)"};
+#else
+#error bug
+#endif
+ constexpr auto beg = func.find(prefix) + prefix.size();
+ constexpr auto end = func.rfind(suffix);
+ static_assert(beg != std::string_view::npos);
+ static_assert(end != std::string_view::npos);
+ return to_array<end - beg>(func.substr(beg, end));
+}
+
+template <class T>
+class TypeInfo {
+ static constexpr auto value_ = type_name<T>();
+
+ public:
+ static constexpr std::string_view name() noexcept {
+ return {value_.data(), value_.size()};
+ }
+};
+
+} // namespace Kokkos::Impl
+
+#else // out of luck, using Intel C++ Compiler Classic
+
+namespace Kokkos::Impl {
+
+template <class T>
+class TypeInfo {
+ public:
+ static constexpr std::string_view name() noexcept { return "not supported"; }
+};
+
+} // namespace Kokkos::Impl
+
+#endif
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_UNIQUE_TOKEN_HPP
#define KOKKOS_UNIQUE_TOKEN_HPP
};
/// \brief Instance scope UniqueToken allows for a max size other than
-/// execution_space::concurrency()
+/// execution_space().concurrency()
///
/// This object should behave like a ref-counted object, so that when the last
/// instance is destroyed, resources are free if needed
/// threads that will attempt to acquire the UniqueToken. This constructor is
/// most commonly useful when you:
/// 1) Have a loop bound that may be smaller than
- /// execution_space::concurrency().
+ /// execution_space().concurrency().
/// 2) Want a per-team unique token in the range [0,
- /// execution_space::concurrency() / team_size)
+ /// execution_space().concurrency() / team_size)
UniqueToken(size_type max_size, execution_space const& = execution_space());
};
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+/// \file Kokkos_Vectorization.hpp
+/// \brief Declaration and definition of Kokkos::Vectorization interface.
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_VECTORIZATION_HPP
+#define KOKKOS_VECTORIZATION_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Vectorization.hpp>
+#elif defined(KOKKOS_ENABLE_HIP)
+#include <HIP/Kokkos_HIP_Vectorization.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_VIEW_HPP
+#define KOKKOS_VIEW_HPP
+
+#if defined(KOKKOS_ENABLE_IMPL_MDSPAN) && !defined(KOKKOS_COMPILER_INTEL)
+#include <View/Kokkos_BasicView.hpp>
+#endif
+
+#include <View/Kokkos_ViewLegacy.hpp>
+
+#endif /* KOKKOS_VIEW_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_WORKGRAPHPOLICY_HPP
#define KOKKOS_WORKGRAPHPOLICY_HPP
void push_work(const std::int32_t w) const noexcept {
const std::int32_t N = m_graph.numRows();
- std::int32_t volatile* const ready_queue = &m_queue[0];
- std::int32_t volatile* const end_hint = &m_queue[2 * N + 1];
+ std::int32_t* const ready_queue = &m_queue[0];
+ std::int32_t* const end_hint = &m_queue[2 * N + 1];
// Push work to end of queue
const std::int32_t j = atomic_fetch_add(end_hint, 1);
std::int32_t pop_work() const noexcept {
const std::int32_t N = m_graph.numRows();
- std::int32_t volatile* const ready_queue = &m_queue[0];
- std::int32_t volatile* const begin_hint = &m_queue[2 * N];
+ std::int32_t* const ready_queue = &m_queue[0];
+ std::int32_t* const begin_hint = &m_queue[2 * N];
// begin hint is guaranteed to be less than or equal to
// actual begin location in the queue.
- for (std::int32_t i = *begin_hint; i < N; ++i) {
- const std::int32_t w = ready_queue[i];
+ for (std::int32_t i = Kokkos::atomic_load(begin_hint); i < N; ++i) {
+ const std::int32_t w = Kokkos::atomic_load(&ready_queue[i]);
if (w == END_TOKEN) {
return END_TOKEN;
(std::int32_t)BEGIN_TOKEN))) {
// Attempt to claim ready work index succeeded,
// update the hint and return work index
- atomic_increment(begin_hint);
+ atomic_inc(begin_hint);
return w;
}
// arrive here when ready_queue[i] == BEGIN_TOKEN
const std::int32_t N = m_graph.numRows();
- std::int32_t volatile* const count_queue = &m_queue[N];
+ std::int32_t* const count_queue = &m_queue[N];
const std::int32_t B = m_graph.row_map(w);
const std::int32_t E = m_graph.row_map(w + 1);
KOKKOS_INLINE_FUNCTION
void operator()(const TagCount, int i) const noexcept {
- std::int32_t volatile* const count_queue = &m_queue[m_graph.numRows()];
+ std::int32_t* const count_queue = &m_queue[m_graph.numRows()];
- atomic_increment(count_queue + m_graph.entries[i]);
+ atomic_inc(count_queue + m_graph.entries[i]);
}
KOKKOS_INLINE_FUNCTION
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_HWLOC_HPP
#define KOKKOS_HWLOC_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_Instance.hpp>
+#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+
+#if defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+#include <cuda_runtime.h>
+#elif defined(KOKKOS_ARCH_AMD_GPU)
+// FIXME_OPENACC - hip_runtime_api.h contains two implementations: one for AMD
+// GPUs and the other for NVIDIA GPUs; below macro is needed to choose AMD GPUs.
+#define __HIP_PLATFORM_AMD__
+#include <hip/hip_runtime_api.h>
+#elif defined(KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE)
+#include <thread>
+#endif
+
+#include <iostream>
+#include <sstream>
+
+Kokkos::Experimental::OpenACC::OpenACC()
+ : m_space_instance(
+ &Kokkos::Experimental::Impl::OpenACCInternal::singleton(),
+ [](Impl::OpenACCInternal*) {}) {
+ Impl::OpenACCInternal::singleton().verify_is_initialized(
+ "OpenACC instance constructor");
+}
+
+Kokkos::Experimental::OpenACC::OpenACC(int async_arg)
+ : m_space_instance(new Kokkos::Experimental::Impl::OpenACCInternal,
+ [](Impl::OpenACCInternal* ptr) {
+ ptr->finalize();
+ delete ptr;
+ }) {
+ Impl::OpenACCInternal::singleton().verify_is_initialized(
+ "OpenACC instance constructor");
+ m_space_instance->initialize(async_arg);
+}
+
+void Kokkos::Experimental::OpenACC::impl_initialize(
+ InitializationSettings const& settings) {
+ Impl::OpenACCInternal::m_concurrency =
+ 256000; // FIXME_OPENACC - random guess when cannot compute
+ if (Impl::OpenACC_Traits::may_fallback_to_host &&
+ acc_get_num_devices(Impl::OpenACC_Traits::dev_type) == 0 &&
+ !settings.has_device_id()) {
+ if (show_warnings()) {
+ std::cerr << "Warning: No GPU available for execution, falling back to"
+ " using the host!"
+ << std::endl;
+ }
+ acc_set_device_type(acc_device_host);
+ Impl::OpenACCInternal::m_acc_device_num =
+ acc_get_device_num(acc_device_host);
+ } else {
+ using Kokkos::Impl::get_visible_devices;
+ acc_set_device_type(Impl::OpenACC_Traits::dev_type);
+ std::vector<int> const& visible_devices = get_visible_devices();
+ using Kokkos::Impl::get_gpu;
+ int const dev_num = get_gpu(settings).value_or(visible_devices[0]);
+ acc_set_device_num(dev_num, Impl::OpenACC_Traits::dev_type);
+ Impl::OpenACCInternal::m_acc_device_num = dev_num;
+#if defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ cudaDeviceProp deviceProp;
+ cudaError error = cudaGetDeviceProperties(&deviceProp, dev_num);
+ if (error != cudaSuccess) {
+ std::ostringstream msg;
+ msg << "Error: During OpenACC backend initialization, failed to retrieve "
+ << "CUDA device properties: (" << cudaGetErrorName(error)
+ << "): " << cudaGetErrorString(error);
+ Kokkos::Impl::host_abort(msg.str().c_str());
+ }
+ Impl::OpenACCInternal::m_concurrency =
+ deviceProp.maxThreadsPerMultiProcessor * deviceProp.multiProcessorCount;
+#elif defined(KOKKOS_ARCH_AMD_GPU)
+ hipDeviceProp_t deviceProp;
+ hipError_t error = hipGetDeviceProperties(&deviceProp, dev_num);
+ if (error != hipSuccess) {
+ std::ostringstream msg;
+ msg << "Error: During OpenACC backend initialization, failed to retrieve "
+ << "HIP device properties: (" << hipGetErrorName(error)
+ << "): " << hipGetErrorString(error);
+ Kokkos::Impl::host_abort(msg.str().c_str());
+ }
+ Impl::OpenACCInternal::m_concurrency =
+ deviceProp.maxThreadsPerMultiProcessor * deviceProp.multiProcessorCount;
+#elif defined(KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE)
+ Impl::OpenACCInternal::m_concurrency = std::thread::hardware_concurrency();
+ if (Impl::OpenACCInternal::m_concurrency == 0) {
+ Kokkos::Impl::host_abort(
+ "Error: During OpenACC backend initialization, failed to retrieve "
+ "CPU hardware concurrency");
+ }
+#else
+ // FIXME_OPENACC: Compute Impl::OpenACCInternal::m_concurrency correctly.
+#endif
+ }
+ Impl::OpenACCInternal::singleton().initialize();
+}
+
+void Kokkos::Experimental::OpenACC::impl_finalize() {
+ Impl::OpenACCInternal::singleton().finalize();
+}
+
+bool Kokkos::Experimental::OpenACC::impl_is_initialized() {
+ return Impl::OpenACCInternal::singleton().is_initialized();
+}
+
+void Kokkos::Experimental::OpenACC::print_configuration(std::ostream& os,
+ bool verbose) const {
+ os << "Device Execution Space:\n";
+ os << " KOKKOS_ENABLE_OPENACC: yes\n";
+ os << "OpenACC Options:\n";
+ os << " KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS: ";
+#ifdef KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS
+ os << "yes\n";
+#else
+ os << "no\n";
+#endif
+ os << " KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE: ";
+#if defined(KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE)
+ os << "yes\n";
+#else
+ os << "no\n";
+#endif
+ m_space_instance->print_configuration(os, verbose);
+}
+
+void Kokkos::Experimental::OpenACC::fence(std::string const& name) const {
+ m_space_instance->fence(name);
+}
+
+void Kokkos::Experimental::OpenACC::impl_static_fence(std::string const& name) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<
+ Kokkos::Experimental::OpenACC>(
+ name,
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ GlobalDeviceSynchronization,
+ [&]() { acc_wait_all(); });
+}
+
+uint32_t Kokkos::Experimental::OpenACC::impl_instance_id() const noexcept {
+ return m_space_instance->instance_id();
+}
+
+int Kokkos::Experimental::OpenACC::acc_async_queue() const {
+ return m_space_instance->m_async_arg;
+}
+
+int Kokkos::Experimental::OpenACC::acc_device_number() const {
+ return Impl::OpenACCInternal::m_acc_device_num;
+}
+
+namespace Kokkos {
+namespace Impl {
+int g_openacc_space_factory_initialized =
+ initialize_space_factory<Experimental::OpenACC>("170_OpenACC");
+} // namespace Impl
+} // Namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_OPENACC_HPP
+#define KOKKOS_OPENACC_HPP
+
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+#include <Kokkos_Concepts.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+
+#include <openacc.h>
+
+#include <iosfwd>
+#include <string>
+
+// FIXME_OPENACC: Below macro is temporarily enabled to avoid issues on existing
+// OpenACC compilers not supporting lambda with parallel loops.
+// LLVM/Clacc compiler does not need this.
+#ifndef KOKKOS_COMPILER_CLANG
+#define KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS
+#define KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS
+#endif
+
+namespace Kokkos::Experimental::Impl {
+class OpenACCInternal;
+}
+
+namespace Kokkos::Experimental {
+
+class OpenACC {
+ Kokkos::Impl::HostSharedPtr<Impl::OpenACCInternal> m_space_instance;
+
+ friend bool operator==(OpenACC const& lhs, OpenACC const& rhs) {
+ return lhs.impl_internal_space_instance() ==
+ rhs.impl_internal_space_instance();
+ }
+ friend bool operator!=(OpenACC const& lhs, OpenACC const& rhs) {
+ return !(lhs == rhs);
+ }
+
+ public:
+ using execution_space = OpenACC;
+ using memory_space = OpenACCSpace;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+
+ using array_layout = LayoutLeft;
+ using size_type = memory_space::size_type;
+
+ using scratch_memory_space = ScratchMemorySpace<OpenACC>;
+
+ OpenACC();
+
+ explicit OpenACC(int async_arg);
+
+ static void impl_initialize(InitializationSettings const& settings);
+ static void impl_finalize();
+ static bool impl_is_initialized();
+
+ void print_configuration(std::ostream& os, bool verbose = false) const;
+
+ void fence(std::string const& name =
+ "Kokkos::OpenACC::fence(): Unnamed Instance Fence") const;
+ static void impl_static_fence(std::string const& name);
+
+ static char const* name() { return "OpenACC"; }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ static int concurrency();
+#else
+ int concurrency() const;
+#endif
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED static bool in_parallel() {
+ return acc_on_device(acc_device_not_host);
+ }
+#endif
+ uint32_t impl_instance_id() const noexcept;
+ Impl::OpenACCInternal* impl_internal_space_instance() const {
+ return m_space_instance.get();
+ }
+
+ int acc_async_queue() const;
+ int acc_device_number() const;
+};
+
+} // namespace Kokkos::Experimental
+
+template <>
+struct Kokkos::Tools::Experimental::DeviceTypeTraits<
+ ::Kokkos::Experimental::OpenACC> {
+ static constexpr DeviceType id =
+ ::Kokkos::Profiling::Experimental::DeviceType::OpenACC;
+ static int device_id(const Kokkos::Experimental::OpenACC& accInstance) {
+ return accInstance.acc_device_number();
+ }
+};
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+#include <OpenACC/Kokkos_OpenACC_DeepCopy.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+#include <openacc.h>
+
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+ const Kokkos::Experimental::OpenACC &exec_space,
+ const size_t arg_alloc_size) const {
+ return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+ const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+ const Kokkos::Experimental::OpenACC &exec_space, const char *arg_label,
+ const size_t arg_alloc_size, const size_t arg_logical_size) const {
+ return impl_allocate(exec_space, arg_label, arg_alloc_size, arg_logical_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+ const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::impl_allocate(
+ const Kokkos::Experimental::OpenACC &exec_space, const char *arg_label,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ (void)exec_space;
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size, arg_handle);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::impl_allocate(
+ const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ static_assert(sizeof(void *) == sizeof(uintptr_t),
+ "Error sizeof(void*) != sizeof(uintptr_t)");
+
+ void *ptr = nullptr;
+
+ ptr = acc_malloc(arg_alloc_size);
+
+ if (!ptr) {
+ Kokkos::Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+
+ return ptr;
+}
+
+void Kokkos::Experimental::OpenACCSpace::deallocate(
+ void *const arg_alloc_ptr, const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void Kokkos::Experimental::OpenACCSpace::deallocate(
+ const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size) const {
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+
+void Kokkos::Experimental::OpenACCSpace::impl_deallocate(
+ const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+
+ if (arg_alloc_ptr) {
+ acc_free(arg_alloc_ptr);
+ }
+}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_OPENACC_SPACE_HPP
+#define KOKKOS_OPENACC_SPACE_HPP
+
+#include <Kokkos_Concepts.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+#include <openacc.h>
+#include <iosfwd>
+
+namespace Kokkos::Experimental {
+
+class OpenACC;
+
+class OpenACCSpace {
+ public:
+ using memory_space = OpenACCSpace;
+ using execution_space = OpenACC;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+
+ using size_type = size_t;
+
+ OpenACCSpace() = default;
+
+ /**\brief Allocate untracked memory in the space */
+ void* allocate(const Kokkos::Experimental::OpenACC& exec_space,
+ const size_t arg_alloc_size) const;
+ void* allocate(const Kokkos::Experimental::OpenACC& exec_space,
+ const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+ void* allocate(const size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ /**\brief Deallocate untracked memory in the space */
+ void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ static constexpr char const* name() { return "OpenACCSpace"; }
+
+ private:
+ void* impl_allocate(const Kokkos::Experimental::OpenACC& exec_space,
+ const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+ void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+ void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+};
+
+} // namespace Kokkos::Experimental
+
+/*--------------------------------------------------------------------------*/
+
+template <>
+struct Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
+ Kokkos::Experimental::OpenACCSpace> {
+#if defined(KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE)
+ enum : bool{assignable = true};
+ enum : bool{accessible = true};
+#else
+ enum : bool { assignable = false };
+ enum : bool { accessible = false };
+#endif
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+ Kokkos::HostSpace> {
+#if defined(KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE)
+ enum : bool{assignable = true};
+ enum : bool{accessible = true};
+#else
+ enum : bool { assignable = false };
+ enum : bool { accessible = false };
+#endif
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+ Kokkos::Experimental::OpenACCSpace> {
+ enum : bool { assignable = true };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+/*--------------------------------------------------------------------------*/
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_DEEP_COPY_HPP
+#define KOKKOS_OPENACC_DEEP_COPY_HPP
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+
+#include <Kokkos_Concepts.hpp>
+
+#include <openacc.h>
+
+template <>
+struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
+ Kokkos::Experimental::OpenACCSpace,
+ Kokkos::Experimental::OpenACC> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ // The behavior of acc_memcpy_device when bytes argument is zero is
+ // clarified only in the latest OpenACC specification (V3.2), and thus the
+ // value checking is added as a safeguard. (The current NVHPC (V22.5)
+ // supports OpenACC V2.7.)
+ if (n > 0) {
+ acc_memcpy_device_async(dst, const_cast<void*>(src), n, acc_async_noval);
+ }
+ }
+ DeepCopy(const Kokkos::Experimental::OpenACC& exec, void* dst,
+ const void* src, size_t n) {
+ if (n > 0) {
+ acc_memcpy_device_async(dst, const_cast<void*>(src), n,
+ exec.acc_async_queue());
+ }
+ }
+};
+
+template <class ExecutionSpace>
+struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
+ Kokkos::Experimental::OpenACCSpace,
+ ExecutionSpace> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ if (n > 0) {
+ acc_memcpy_device_async(dst, const_cast<void*>(src), n, acc_async_noval);
+ }
+ }
+ DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+ exec.fence(
+ "Kokkos::Impl::DeepCopy<OpenACCSpace, OpenACCSpace, "
+ "ExecutionSpace>::DeepCopy: fence before copy");
+ if (n > 0) {
+ acc_memcpy_device_async(dst, const_cast<void*>(src), n, acc_async_noval);
+ }
+ }
+};
+
+template <>
+struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
+ Kokkos::HostSpace,
+ Kokkos::Experimental::OpenACC> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ if (n > 0)
+ acc_memcpy_to_device_async(dst, const_cast<void*>(src), n,
+ acc_async_noval);
+ }
+ DeepCopy(const Kokkos::Experimental::OpenACC& exec, void* dst,
+ const void* src, size_t n) {
+ if (n > 0)
+ acc_memcpy_to_device_async(dst, const_cast<void*>(src), n,
+ exec.acc_async_queue());
+ }
+};
+
+template <class ExecutionSpace>
+struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
+ Kokkos::HostSpace, ExecutionSpace> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ if (n > 0) {
+ acc_memcpy_to_device_async(dst, const_cast<void*>(src), n,
+ acc_async_noval);
+ }
+ }
+ DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+ exec.fence(
+ "Kokkos::Impl::DeepCopy<OpenACCSpace, HostSpace, "
+ "ExecutionSpace>::DeepCopy: fence before copy");
+ if (n > 0) {
+ acc_memcpy_to_device_async(dst, const_cast<void*>(src), n,
+ acc_async_noval);
+ }
+ }
+};
+
+template <>
+struct Kokkos::Impl::DeepCopy<Kokkos::HostSpace,
+ Kokkos::Experimental::OpenACCSpace,
+ Kokkos::Experimental::OpenACC> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ if (n > 0) {
+ acc_memcpy_from_device_async(dst, const_cast<void*>(src), n,
+ acc_async_noval);
+ }
+ }
+ DeepCopy(const Kokkos::Experimental::OpenACC& exec, void* dst,
+ const void* src, size_t n) {
+ if (n > 0) {
+ acc_memcpy_from_device_async(dst, const_cast<void*>(src), n,
+ exec.acc_async_queue());
+ }
+ }
+};
+
+template <class ExecutionSpace>
+struct Kokkos::Impl::DeepCopy<
+ Kokkos::HostSpace, Kokkos::Experimental::OpenACCSpace, ExecutionSpace> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ if (n > 0)
+ acc_memcpy_from_device_async(dst, const_cast<void*>(src), n,
+ acc_async_noval);
+ }
+ DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+ exec.fence(
+ "Kokkos::Impl::DeepCopy<HostSpace, OpenACCSpace, "
+ "ExecutionSpace>::DeepCopy: fence before copy");
+ if (n > 0) {
+ acc_memcpy_from_device_async(dst, const_cast<void*>(src), n,
+ acc_async_noval);
+ }
+ }
+};
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_FUNCTOR_ADAPTER_HPP
+#define KOKKOS_OPENACC_FUNCTOR_ADAPTER_HPP
+
+#include <OpenACC/Kokkos_OpenACC_Macros.hpp>
+#include <type_traits>
+
+namespace Kokkos::Experimental::Impl {
+
+enum class RoutineClause { worker, seq };
+
+template <class Functor, class Policy, RoutineClause>
+class FunctorAdapter;
+
+#define KOKKOS_IMPL_ACC_FUNCTOR_ADAPTER(CLAUSE) \
+ template <class Functor, class Policy> \
+ class FunctorAdapter<Functor, Policy, RoutineClause::CLAUSE> { \
+ Functor m_functor; \
+ using WorkTag = typename Policy::work_tag; \
+ \
+ public: \
+ FunctorAdapter(Functor const &functor) : m_functor(functor) {} \
+ \
+ KOKKOS_IMPL_ACC_PRAGMA(routine CLAUSE) \
+ template <class... Args> \
+ KOKKOS_FUNCTION void operator()(Args &&...args) const { \
+ if constexpr (std::is_void_v<WorkTag>) { \
+ m_functor(static_cast<Args &&>(args)...); \
+ } else { \
+ m_functor(WorkTag(), static_cast<Args &&>(args)...); \
+ } \
+ } \
+ }
+
+KOKKOS_IMPL_ACC_FUNCTOR_ADAPTER(worker);
+KOKKOS_IMPL_ACC_FUNCTOR_ADAPTER(seq);
+
+#undef KOKKOS_IMPL_ACC_FUNCTOR_ADAPTER
+
+} // namespace Kokkos::Experimental::Impl
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_Instance.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+
+#include <openacc.h>
+
+#include <iostream>
+
+// Arbitrary value to denote that we don't know yet what device to use.
+int Kokkos::Experimental::Impl::OpenACCInternal::m_acc_device_num = -1;
+int Kokkos::Experimental::Impl::OpenACCInternal::m_concurrency = -1;
+
+Kokkos::Experimental::Impl::OpenACCInternal&
+Kokkos::Experimental::Impl::OpenACCInternal::singleton() {
+ static OpenACCInternal self;
+ return self;
+}
+
+bool Kokkos::Experimental::Impl::OpenACCInternal::verify_is_initialized(
+ const char* const label) const {
+ if (!m_is_initialized) {
+ Kokkos::abort((std::string("Kokkos::Experimental::OpenACC::") + label +
+ " : ERROR device not initialized\n")
+ .c_str());
+ }
+ return m_is_initialized;
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::initialize(int async_arg) {
+ if ((async_arg < 0) && (async_arg != acc_async_sync) &&
+ (async_arg != acc_async_noval)) {
+ Kokkos::abort((std::string("Kokkos::Experimental::OpenACC::initialize()") +
+ " : ERROR async_arg should be a non-negative integer" +
+ " unless being a special value defined in OpenACC\n")
+ .c_str());
+ }
+ m_async_arg = async_arg;
+ m_is_initialized = true;
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::finalize() {
+ m_is_initialized = false;
+}
+
+bool Kokkos::Experimental::Impl::OpenACCInternal::is_initialized() const {
+ return m_is_initialized;
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::print_configuration(
+ std::ostream& os, bool /*verbose*/) const {
+ os << "Using OpenACC\n"; // FIXME_OPENACC
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::fence(
+ std::string const& name) const {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<
+ Kokkos::Experimental::OpenACC>(
+ name,
+ Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id()},
+ [&]() { acc_wait(m_async_arg); });
+}
+
+uint32_t Kokkos::Experimental::Impl::OpenACCInternal::instance_id()
+ const noexcept {
+ return Kokkos::Tools::Experimental::Impl::idForInstance<OpenACC>(
+ reinterpret_cast<uintptr_t>(this));
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+int Kokkos::Experimental::OpenACC::concurrency() {
+ return Impl::OpenACCInternal::m_concurrency;
+}
+#else
+int Kokkos::Experimental::OpenACC::concurrency() const {
+ return Impl::OpenACCInternal::m_concurrency;
+}
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_INSTANCE_HPP
+#define KOKKOS_OPENACC_INSTANCE_HPP
+
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+#include <openacc.h>
+
+#include <cstdint>
+#include <iosfwd>
+#include <string>
+
+namespace Kokkos::Experimental::Impl {
+
+class OpenACCInternal {
+ bool m_is_initialized = false;
+
+ OpenACCInternal(const OpenACCInternal&) = default;
+ OpenACCInternal& operator=(const OpenACCInternal&) = default;
+
+ public:
+ static int m_acc_device_num;
+ static int m_concurrency;
+ int m_async_arg = acc_async_noval;
+
+ OpenACCInternal() = default;
+
+ static OpenACCInternal& singleton();
+
+ bool verify_is_initialized(const char* const label) const;
+
+ void initialize(int async_arg = acc_async_noval);
+ void finalize();
+ bool is_initialized() const;
+
+ void print_configuration(std::ostream& os, bool verbose = false) const;
+
+ void fence(std::string const& name) const;
+
+ uint32_t instance_id() const noexcept;
+};
+
+} // namespace Kokkos::Experimental::Impl
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_MDRANGE_POLICY_HPP_
+#define KOKKOS_OPENACC_MDRANGE_POLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+template <>
+struct Kokkos::default_outer_direction<Kokkos::Experimental::OpenACC> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+template <>
+struct Kokkos::default_inner_direction<Kokkos::Experimental::OpenACC> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, Kokkos::Experimental::OpenACC,
+ ThreadAndVector>
+ : AcceleratorBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos::Experimental::Impl {
+
+struct OpenACCCollapse {};
+struct OpenACCTile {};
+using OpenACCIterateLeft = std::integral_constant<Iterate, Iterate::Left>;
+using OpenACCIterateRight = std::integral_constant<Iterate, Iterate::Right>;
+template <int N>
+using OpenACCMDRangeBegin = decltype(MDRangePolicy<OpenACC, Rank<N>>::m_lower);
+template <int N>
+using OpenACCMDRangeEnd = decltype(MDRangePolicy<OpenACC, Rank<N>>::m_upper);
+template <int N>
+using OpenACCMDRangeTile = decltype(MDRangePolicy<OpenACC, Rank<N>>::m_tile);
+
+} // namespace Kokkos::Experimental::Impl
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_MACROS_HPP
+#define KOKKOS_OPENACC_MACROS_HPP
+
+#define KOKKOS_IMPL_ACC_PRAGMA_HELPER(x) _Pragma(#x)
+#define KOKKOS_IMPL_ACC_PRAGMA(x) KOKKOS_IMPL_ACC_PRAGMA_HELPER(acc x)
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_PARALLEL_FOR_MDRANGE_HPP
+#define KOKKOS_OPENACC_PARALLEL_FOR_MDRANGE_HPP
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_FunctorAdapter.hpp>
+#include <OpenACC/Kokkos_OpenACC_MDRangePolicy.hpp>
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos::Experimental::Impl {
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<2> const& begin,
+ OpenACCMDRangeEnd<2> const& end,
+ int async_arg) {
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto i1 = m / dim0 + begin1;
+ auto i0 = m % dim0 + begin0;
+ functor(i0, i1);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(2) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1);
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<2> const& begin,
+ OpenACCMDRangeEnd<2> const& end,
+ int async_arg) {
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto i0 = m / dim1 + begin0;
+ auto i1 = m % dim1 + begin1;
+ functor(i0, i1);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(2) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ functor(i0, i1);
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<2> const& begin,
+ OpenACCMDRangeEnd<2> const& end,
+ OpenACCMDRangeTile<2> const& tile,
+ int async_arg) {
+ auto tile0 = tile[0];
+ auto tile1 = tile[1];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile0,tile1) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1);
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<2> const& begin,
+ OpenACCMDRangeEnd<2> const& end,
+ OpenACCMDRangeTile<2> const& tile,
+ int async_arg) {
+ auto tile1 = tile[1];
+ auto tile0 = tile[0];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile1,tile0) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ functor(i0, i1);
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<3> const& begin,
+ OpenACCMDRangeEnd<3> const& end,
+ int async_arg) {
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim1 * dim0;
+ auto i2 = m / tmp1 + begin2;
+ auto tmp2 = m % tmp1;
+ auto i1 = tmp2 / dim0 + begin1;
+ auto i0 = tmp2 % dim0 + begin0;
+ functor(i0, i1, i2);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(3) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2);
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<3> const& begin,
+ OpenACCMDRangeEnd<3> const& end,
+ int async_arg) {
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim2 * dim1;
+ auto i0 = m / tmp1 + begin0;
+ auto tmp2 = m % tmp1;
+ auto i1 = tmp2 / dim2 + begin1;
+ auto i2 = tmp2 % dim2 + begin2;
+ functor(i0, i1, i2);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(3) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ functor(i0, i1, i2);
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<3> const& begin,
+ OpenACCMDRangeEnd<3> const& end,
+ OpenACCMDRangeTile<3> const& tile,
+ int async_arg) {
+ auto tile0 = tile[0];
+ auto tile1 = tile[1];
+ auto tile2 = tile[2];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile0,tile1,tile2) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2);
+ }
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<3> const& begin,
+ OpenACCMDRangeEnd<3> const& end,
+ OpenACCMDRangeTile<3> const& tile,
+ int async_arg) {
+ auto tile2 = tile[2];
+ auto tile1 = tile[1];
+ auto tile0 = tile[0];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile2,tile1,tile0) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ functor(i0, i1, i2);
+ }
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<4> const& begin,
+ OpenACCMDRangeEnd<4> const& end,
+ int async_arg) {
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim3 = end3 - begin3;
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim3 * dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim2 * dim1 * dim0;
+ auto i3 = m / tmp1 + begin3;
+ auto tmp2 = m % tmp1;
+ tmp1 = dim1 * dim0;
+ auto i2 = tmp2 / tmp1 + begin2;
+ tmp2 = tmp2 % tmp1;
+ auto i1 = tmp2 / dim0 + begin1;
+ auto i0 = tmp2 % dim0 + begin0;
+ functor(i0, i1, i2, i3);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(4) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2, i3);
+ }
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<4> const& begin,
+ OpenACCMDRangeEnd<4> const& end,
+ int async_arg) {
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim3 = end3 - begin3;
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim3 * dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim3 * dim2 * dim1;
+ auto i0 = m / tmp1 + begin0;
+ auto tmp2 = m % tmp1;
+ tmp1 = dim3 * dim2;
+ auto i1 = tmp2 / tmp1 + begin1;
+ tmp2 = tmp2 % tmp1;
+ auto i2 = tmp2 / dim3 + begin2;
+ auto i3 = tmp2 % dim3 + begin3;
+ functor(i0, i1, i2, i3);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(4) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ functor(i0, i1, i2, i3);
+ }
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<4> const& begin,
+ OpenACCMDRangeEnd<4> const& end,
+ OpenACCMDRangeTile<4> const& tile,
+ int async_arg) {
+ auto tile0 = tile[0];
+ auto tile1 = tile[1];
+ auto tile2 = tile[2];
+ auto tile3 = tile[3];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile0,tile1,tile2,tile3) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2, i3);
+ }
+ }
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<4> const& begin,
+ OpenACCMDRangeEnd<4> const& end,
+ OpenACCMDRangeTile<4> const& tile,
+ int async_arg) {
+ auto tile3 = tile[3];
+ auto tile2 = tile[2];
+ auto tile1 = tile[1];
+ auto tile0 = tile[0];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile3,tile2,tile1,tile0) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ functor(i0, i1, i2, i3);
+ }
+ }
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<5> const& begin,
+ OpenACCMDRangeEnd<5> const& end,
+ int async_arg) {
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim4 = end4 - begin4;
+ auto dim3 = end3 - begin3;
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim4 * dim3 * dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim3 * dim2 * dim1 * dim0;
+ auto i4 = m / tmp1 + begin4;
+ auto tmp2 = m % tmp1;
+ tmp1 = dim2 * dim1 * dim0;
+ auto i3 = tmp2 / tmp1 + begin3;
+ tmp2 = tmp2 % tmp1;
+ tmp1 = dim1 * dim0;
+ auto i2 = tmp2 / tmp1 + begin2;
+ tmp2 = tmp2 % tmp1;
+ auto i1 = tmp2 / dim0 + begin1;
+ auto i0 = tmp2 % dim0 + begin0;
+ functor(i0, i1, i2, i3, i4);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(5) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2, i3, i4);
+ }
+ }
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<5> const& begin,
+ OpenACCMDRangeEnd<5> const& end,
+ int async_arg) {
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim4 = end4 - begin4;
+ auto dim3 = end3 - begin3;
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim4 * dim3 * dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim4 * dim3 * dim2 * dim1;
+ auto i0 = m / tmp1 + begin0;
+ auto tmp2 = m % tmp1;
+ tmp1 = dim4 * dim3 * dim2;
+ auto i1 = tmp2 / tmp1 + begin1;
+ tmp2 = tmp2 % tmp1;
+ tmp1 = dim4 * dim3;
+ auto i2 = tmp2 / tmp1 + begin2;
+ tmp2 = tmp2 % tmp1;
+ auto i3 = tmp2 / dim4 + begin3;
+ auto i4 = tmp2 % dim4 + begin4;
+ functor(i0, i1, i2, i3, i4);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(5) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ functor(i0, i1, i2, i3, i4);
+ }
+ }
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<5> const& begin,
+ OpenACCMDRangeEnd<5> const& end,
+ OpenACCMDRangeTile<5> const& tile,
+ int async_arg) {
+ auto tile0 = tile[0];
+ auto tile1 = tile[1];
+ auto tile2 = tile[2];
+ auto tile3 = tile[3];
+ auto tile4 = tile[4];
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile0,tile1,tile2,tile3,tile4) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2, i3, i4);
+ }
+ }
+ }
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<5> const& begin,
+ OpenACCMDRangeEnd<5> const& end,
+ OpenACCMDRangeTile<5> const& tile,
+ int async_arg) {
+ auto tile4 = tile[4];
+ auto tile3 = tile[3];
+ auto tile2 = tile[2];
+ auto tile1 = tile[1];
+ auto tile0 = tile[0];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile4,tile3,tile2,tile1,tile0) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ functor(i0, i1, i2, i3, i4);
+ }
+ }
+ }
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<6> const& begin,
+ OpenACCMDRangeEnd<6> const& end,
+ int async_arg) {
+ auto begin5 = begin[5];
+ auto end5 = end[5];
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim5 = end5 - begin5;
+ auto dim4 = end4 - begin4;
+ auto dim3 = end3 - begin3;
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim5 * dim4 * dim3 * dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim4 * dim3 * dim2 * dim1 * dim0;
+ auto i5 = m / tmp1 + begin5;
+ auto tmp2 = m % tmp1;
+ tmp1 = dim3 * dim2 * dim1 * dim0;
+ auto i4 = tmp2 / tmp1 + begin4;
+ tmp2 = tmp2 % tmp1;
+ tmp1 = dim2 * dim1 * dim0;
+ auto i3 = tmp2 / tmp1 + begin3;
+ tmp2 = tmp2 % tmp1;
+ tmp1 = dim1 * dim0;
+ auto i2 = tmp2 / tmp1 + begin2;
+ tmp2 = tmp2 % tmp1;
+ auto i1 = tmp2 / dim0 + begin1;
+ auto i0 = tmp2 % dim0 + begin0;
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(6) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i5 = begin5; i5 < end5; ++i5) {
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCCollapse, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<6> const& begin,
+ OpenACCMDRangeEnd<6> const& end,
+ int async_arg) {
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+ auto begin5 = begin[5];
+ auto end5 = end[5];
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+ auto dim5 = end5 - begin5;
+ auto dim4 = end4 - begin4;
+ auto dim3 = end3 - begin3;
+ auto dim2 = end2 - begin2;
+ auto dim1 = end1 - begin1;
+ auto dim0 = end0 - begin0;
+ auto nIter = dim5 * dim4 * dim3 * dim2 * dim1 * dim0;
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (decltype(nIter) m = 0; m < nIter; ++m) {
+ auto tmp1 = dim5 * dim4 * dim3 * dim2 * dim1;
+ auto i0 = m / tmp1 + begin0;
+ auto tmp2 = m % tmp1;
+ tmp1 = dim5 * dim4 * dim3 * dim2;
+ auto i1 = tmp2 / tmp1 + begin1;
+ tmp2 = tmp2 % tmp1;
+ tmp1 = dim5 * dim4 * dim3;
+ auto i2 = tmp2 / tmp1 + begin2;
+ tmp2 = tmp2 % tmp1;
+ tmp1 = dim5 * dim4;
+ auto i3 = tmp2 / tmp1 + begin3;
+ tmp2 = tmp2 % tmp1;
+ auto i4 = tmp2 / dim5 + begin4;
+ auto i5 = tmp2 % dim5 + begin5;
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+#else
+// clang-format off
+#pragma acc parallel loop gang vector collapse(6) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ for (auto i5 = begin5; i5 < end5; ++i5) {
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateLeft,
+ Functor const& functor,
+ OpenACCMDRangeBegin<6> const& begin,
+ OpenACCMDRangeEnd<6> const& end,
+ OpenACCMDRangeTile<6> const& tile,
+ int async_arg) {
+ auto tile0 = tile[0];
+ auto tile1 = tile[1];
+ auto tile2 = tile[2];
+ auto tile3 = tile[3];
+ auto tile4 = tile[4];
+ auto tile5 = tile[5];
+ auto begin5 = begin[5];
+ auto end5 = end[5];
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile0,tile1,tile2,tile3,tile4,tile5) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i5 = begin5; i5 < end5; ++i5) {
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+template <class Functor>
+void OpenACCParallelForMDRangePolicy(OpenACCTile, OpenACCIterateRight,
+ Functor const& functor,
+ OpenACCMDRangeBegin<6> const& begin,
+ OpenACCMDRangeEnd<6> const& end,
+ OpenACCMDRangeTile<6> const& tile,
+ int async_arg) {
+ auto tile5 = tile[5];
+ auto tile4 = tile[4];
+ auto tile3 = tile[3];
+ auto tile2 = tile[2];
+ auto tile1 = tile[1];
+ auto tile0 = tile[0];
+ auto begin0 = begin[0];
+ auto end0 = end[0];
+ auto begin1 = begin[1];
+ auto end1 = end[1];
+ auto begin2 = begin[2];
+ auto end2 = end[2];
+ auto begin3 = begin[3];
+ auto end3 = end[3];
+ auto begin4 = begin[4];
+ auto end4 = end[4];
+ auto begin5 = begin[5];
+ auto end5 = end[5];
+// clang-format off
+#pragma acc parallel loop gang vector tile(tile5,tile4,tile3,tile2,tile1,tile0) copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i0 = begin0; i0 < end0; ++i0) {
+ for (auto i1 = begin1; i1 < end1; ++i1) {
+ for (auto i2 = begin2; i2 < end2; ++i2) {
+ for (auto i3 = begin3; i3 < end3; ++i3) {
+ for (auto i4 = begin4; i4 < end4; ++i4) {
+ for (auto i5 = begin5; i5 < end5; ++i5) {
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace Kokkos::Experimental::Impl
+
+template <class Functor, class... Traits>
+class Kokkos::Impl::ParallelFor<Functor, Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Experimental::OpenACC> {
+ using Policy = MDRangePolicy<Traits...>;
+ Kokkos::Experimental::Impl::FunctorAdapter<
+ Functor, Policy, Kokkos::Experimental::Impl::RoutineClause::seq>
+ m_functor;
+ Policy m_policy;
+
+ public:
+ ParallelFor(Functor const& functor, Policy const& policy)
+ : m_functor(functor), m_policy(policy) {}
+
+ void execute() const {
+ static_assert(1 < Policy::rank && Policy::rank < 7);
+ static_assert(Policy::inner_direction == Iterate::Left ||
+ Policy::inner_direction == Iterate::Right);
+ constexpr int rank = Policy::rank;
+ for (int i = 0; i < rank; ++i) {
+ if (m_policy.m_lower[i] >= m_policy.m_upper[i]) {
+ return;
+ }
+ }
+ int const async_arg = m_policy.space().acc_async_queue();
+#if 0 // FIXME_OPENACC: OpenACC requires tile size to be constant.
+ for (int i = 0; i < rank; ++i) {
+ if (m_policy.m_tile[i] < 1) {
+ Kokkos::Experimental::Impl::OpenACCParallelForMDRangePolicy(
+ Kokkos::Experimental::Impl::OpenACCCollapse(),
+ std::integral_constant<Iterate, Policy::inner_direction>(),
+ m_functor, m_policy.m_lower, m_policy.m_upper, async_arg);
+ return;
+ }
+ }
+ Kokkos::Experimental::Impl::OpenACCParallelForMDRangePolicy(
+ Kokkos::Experimental::Impl::OpenACCTile(),
+ std::integral_constant<Iterate, Policy::inner_direction>(), m_functor,
+ m_policy.m_lower, m_policy.m_upper, m_policy.m_tile, async_arg);
+#else
+ Kokkos::Experimental::Impl::OpenACCParallelForMDRangePolicy(
+ Kokkos::Experimental::Impl::OpenACCCollapse(),
+ std::integral_constant<Iterate, Policy::inner_direction>(), m_functor,
+ m_policy.m_lower, m_policy.m_upper, async_arg);
+#endif
+ }
+};
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_PARALLEL_FOR_RANGE_HPP
+#define KOKKOS_OPENACC_PARALLEL_FOR_RANGE_HPP
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_FunctorAdapter.hpp>
+#include <OpenACC/Kokkos_OpenACC_ScheduleType.hpp>
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos::Experimental::Impl {
+template <class IndexType, class Functor>
+void OpenACCParallelForRangePolicy(Schedule<Static>, int chunk_size,
+ IndexType begin, IndexType end,
+ Functor afunctor, int async_arg) {
+ // FIXME_OPENACC FIXME_NVHPC workaround compiler bug (incorrect scope
+ // analysis)
+ // NVC++-S-1067-Cannot determine bounds for array - functor
+ auto const functor(afunctor);
+ if (chunk_size >= 1) {
+// clang-format off
+#pragma acc parallel loop gang(static:chunk_size) vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i = begin; i < end; ++i) {
+ functor(i);
+ }
+ } else {
+// clang-format off
+#pragma acc parallel loop gang(static:*) vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i = begin; i < end; ++i) {
+ functor(i);
+ }
+ }
+}
+
+template <class IndexType, class Functor>
+void OpenACCParallelForRangePolicy(Schedule<Dynamic>, int chunk_size,
+ IndexType begin, IndexType end,
+ Functor afunctor, int async_arg) {
+ // FIXME_OPENACC FIXME_NVHPC workaround compiler bug (incorrect scope
+ // analysis)
+ // NVC++-S-1067-Cannot determine bounds for array - functor
+ auto const functor(afunctor);
+ if (chunk_size >= 1) {
+// clang-format off
+#pragma acc parallel loop gang(static:chunk_size) vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i = begin; i < end; ++i) {
+ functor(i);
+ }
+ } else {
+// clang-format off
+#pragma acc parallel loop gang vector copyin(functor) async(async_arg)
+ // clang-format on
+ for (auto i = begin; i < end; ++i) {
+ functor(i);
+ }
+ }
+}
+} // namespace Kokkos::Experimental::Impl
+
+template <class Functor, class... Traits>
+class Kokkos::Impl::ParallelFor<Functor, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenACC> {
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ Kokkos::Experimental::Impl::FunctorAdapter<
+ Functor, Policy, Kokkos::Experimental::Impl::RoutineClause::seq>
+ m_functor;
+ Policy m_policy;
+ using ScheduleType = Kokkos::Experimental::Impl::OpenACCScheduleType<Policy>;
+
+ public:
+ ParallelFor(Functor const& functor, Policy const& policy)
+ : m_functor(functor), m_policy(policy) {}
+
+ void execute() const {
+ auto const begin = m_policy.begin();
+ auto const end = m_policy.end();
+
+ if (end <= begin) {
+ return;
+ }
+
+ int const async_arg = m_policy.space().acc_async_queue();
+ int const chunk_size = m_policy.chunk_size();
+
+ Kokkos::Experimental::Impl::OpenACCParallelForRangePolicy(
+ ScheduleType(), chunk_size, begin, end, m_functor, async_arg);
+ }
+};
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_PARALLEL_FOR_TEAM_HPP
+#define KOKKOS_OPENACC_PARALLEL_FOR_TEAM_HPP
+
+#include <OpenACC/Kokkos_OpenACC_Team.hpp>
+#include <OpenACC/Kokkos_OpenACC_FunctorAdapter.hpp>
+
+#ifdef KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// Hierarchical Parallelism -> Team level implementation
+template <class FunctorType, class... Properties>
+class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Experimental::OpenACC> {
+ private:
+ using Policy = Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenACC,
+ Properties...>;
+ Kokkos::Experimental::Impl::FunctorAdapter<
+ FunctorType, Policy, Kokkos::Experimental::Impl::RoutineClause::seq>
+ m_functor;
+ using Member = typename Policy::member_type;
+
+ const Policy m_policy;
+
+ public:
+ inline void execute() const {
+ auto league_size = m_policy.league_size();
+ auto team_size = m_policy.team_size();
+ auto vector_length = m_policy.impl_vector_length();
+
+ int const async_arg = m_policy.space().acc_async_queue();
+
+ auto const a_functor(m_functor);
+
+#pragma acc parallel loop gang vector num_gangs(league_size) \
+ vector_length(team_size* vector_length) copyin(a_functor) async(async_arg)
+ for (int i = 0; i < league_size * team_size * vector_length; i++) {
+ int league_id = i / (team_size * vector_length);
+ typename Policy::member_type team(league_id, league_size, team_size,
+ vector_length);
+ a_functor(team);
+ }
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+namespace Kokkos {
+
+// Hierarchical Parallelism -> Team thread level implementation
+#pragma acc routine seq
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>&
+ loop_boundaries,
+ const Lambda& lambda) {
+ iType j_start =
+ loop_boundaries.team.team_rank() / loop_boundaries.team.vector_length();
+ iType j_end = loop_boundaries.end;
+ iType j_step = loop_boundaries.team.team_size();
+ if (j_start >= loop_boundaries.start) {
+#pragma acc loop seq
+ for (iType j = j_start; j < j_end; j += j_step) {
+ lambda(j);
+ }
+ }
+}
+
+// Hierarchical Parallelism -> Thread vector level implementation
+#pragma acc routine seq
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda) {
+ iType j_start =
+ loop_boundaries.team.team_rank() % loop_boundaries.team.vector_length();
+ iType j_end = loop_boundaries.end;
+ iType j_step = loop_boundaries.team.vector_length();
+ if (j_start >= loop_boundaries.start) {
+#pragma acc loop seq
+ for (iType j = j_start; j < j_end; j += j_step) {
+ lambda(j);
+ }
+ }
+}
+
+// Hierarchical Parallelism -> Team vector level implementation
+#pragma acc routine seq
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::TeamVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>&
+ loop_boundaries,
+ const Lambda& lambda) {
+ iType j_start =
+ loop_boundaries.team.team_rank() % loop_boundaries.team.vector_length();
+ iType j_end = loop_boundaries.end;
+ iType j_step = loop_boundaries.team.vector_length();
+ if (j_start >= loop_boundaries.start) {
+#pragma acc loop seq
+ for (iType j = j_start; j < j_end; j += j_step) {
+ lambda(j);
+ }
+ }
+}
+
+} // namespace Kokkos
+
+#else // KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// Hierarchical Parallelism -> Team level implementation
+template <class FunctorType, class... Properties>
+class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Experimental::OpenACC> {
+ private:
+ using Policy = Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenACC,
+ Properties...>;
+ Kokkos::Experimental::Impl::FunctorAdapter<
+ FunctorType, Policy, Kokkos::Experimental::Impl::RoutineClause::worker>
+ m_functor;
+ using Member = typename Policy::member_type;
+
+ const Policy m_policy;
+
+ public:
+ inline void execute() const {
+ auto league_size = m_policy.league_size();
+ auto team_size = m_policy.team_size();
+ auto vector_length = m_policy.impl_vector_length();
+
+ int const async_arg = m_policy.space().acc_async_queue();
+
+ auto const a_functor(m_functor);
+
+#pragma acc parallel loop gang num_gangs(league_size) num_workers(team_size) \
+ vector_length(vector_length) copyin(a_functor) async(async_arg)
+ for (int i = 0; i < league_size; i++) {
+ int league_id = i;
+ typename Policy::member_type team(league_id, league_size, team_size,
+ vector_length);
+ a_functor(team);
+ }
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+namespace Kokkos {
+
+// Hierarchical Parallelism -> Team thread level implementation
+#pragma acc routine worker
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>&
+ loop_boundaries,
+ const Lambda& lambda) {
+#pragma acc loop worker
+ for (iType j = loop_boundaries.start; j < loop_boundaries.end; j++) {
+ lambda(j);
+ }
+}
+
+// Hierarchical Parallelism -> Thread vector level implementation
+#pragma acc routine vector
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda) {
+#pragma acc loop vector
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i);
+ }
+}
+
+// Hierarchical Parallelism -> Team vector level implementation
+#pragma acc routine vector
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::TeamVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>&
+ loop_boundaries,
+ const Lambda& lambda) {
+#pragma acc loop vector
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i);
+ }
+}
+
+} // namespace Kokkos
+
+#endif /* #ifdef KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS */
+
+#endif /* #ifndef KOKKOS_OPENACC_PARALLEL_FOR_TEAM_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_PARALLEL_REDUCE_MDRANGE_HPP
+#define KOKKOS_OPENACC_PARALLEL_REDUCE_MDRANGE_HPP
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_Macros.hpp>
+#include <OpenACC/Kokkos_OpenACC_FunctorAdapter.hpp>
+#include <OpenACC/Kokkos_OpenACC_MDRangePolicy.hpp>
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos::Experimental::Impl {
+
+// primary template: catch-all non-implemented custom reducers
+template <class Functor, class Reducer, class Policy,
+ bool = std::is_arithmetic_v<typename Reducer::value_type>>
+struct OpenACCParallelReduceMDRangeHelper {
+ OpenACCParallelReduceMDRangeHelper(Functor const&, Reducer const&,
+ Policy const&) {
+ static_assert(Kokkos::Impl::always_false<Functor>::value,
+ "not implemented");
+ }
+};
+} // namespace Kokkos::Experimental::Impl
+
+template <class CombinedFunctorReducerType, class... Traits>
+class Kokkos::Impl::ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Experimental::OpenACC> {
+ using Policy = MDRangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using Pointer = typename ReducerType::pointer_type;
+ using ValueType = typename ReducerType::value_type;
+
+ CombinedFunctorReducerType m_functor_reducer;
+ Policy m_policy;
+ Pointer m_result_ptr;
+ bool m_result_ptr_on_device;
+
+ public:
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& functor_reducer,
+ const Policy& policy, const ViewType& result)
+ : m_functor_reducer(functor_reducer),
+ m_policy(policy),
+ m_result_ptr(result.data()),
+ m_result_ptr_on_device(
+ MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+ typename ViewType::memory_space>::accessible) {}
+
+ void execute() const {
+ static_assert(1 < Policy::rank && Policy::rank < 7);
+ static_assert(Policy::inner_direction == Iterate::Left ||
+ Policy::inner_direction == Iterate::Right);
+ constexpr int rank = Policy::rank;
+ ValueType val;
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+ reducer.init(&val);
+
+ for (int i = 0; i < rank; ++i) {
+ if (m_policy.m_lower[i] >= m_policy.m_upper[i]) {
+ if (m_result_ptr_on_device) {
+ acc_memcpy_to_device(m_result_ptr, &val, sizeof(ValueType));
+ } else {
+ *m_result_ptr = val;
+ }
+ return;
+ }
+ }
+
+ int const async_arg = m_policy.space().acc_async_queue();
+
+ Kokkos::Experimental::Impl::OpenACCParallelReduceMDRangeHelper(
+ Kokkos::Experimental::Impl::FunctorAdapter<
+ FunctorType, Policy,
+ Kokkos::Experimental::Impl::RoutineClause::seq>(
+ m_functor_reducer.get_functor()),
+ std::conditional_t<
+ std::is_same_v<FunctorType, typename ReducerType::functor_type>,
+ Sum<ValueType>, typename ReducerType::functor_type>(val),
+ m_policy);
+
+ // OpenACC backend supports only built-in Reducer types; thus
+ // reducer.final() below is a no-op.
+ reducer.final(&val);
+ // acc_wait(async_arg) in the below if-else statements is needed because the
+ // above OpenACC compute kernel can be executed asynchronously and val is a
+ // local host variable.
+ if (m_result_ptr_on_device) {
+ acc_memcpy_to_device_async(m_result_ptr, &val, sizeof(ValueType),
+ async_arg);
+ acc_wait(async_arg);
+ } else {
+ acc_wait(async_arg);
+ *m_result_ptr = val;
+ }
+ }
+};
+
+#if defined(KOKKOS_ENABLE_OPENACC_COLLAPSE_MDRANGE_LOOPS)
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_ITERATE(REDUCER, \
+ OPERATOR) \
+ namespace Kokkos::Experimental::Impl { \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<2> const& begin, \
+ OpenACCMDRangeEnd<2> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto i1 = m / dim0 + begin1; \
+ auto i0 = m % dim0 + begin0; \
+ functor(i0, i1, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<2> const& begin, \
+ OpenACCMDRangeEnd<2> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto i0 = m / dim1 + begin0; \
+ auto i1 = m % dim1 + begin1; \
+ functor(i0, i1, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<3> const& begin, \
+ OpenACCMDRangeEnd<3> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim1 * dim0; \
+ auto i2 = m / tmp1 + begin2; \
+ auto tmp2 = m % tmp1; \
+ auto i1 = tmp2 / dim0 + begin1; \
+ auto i0 = tmp2 % dim0 + begin0; \
+ functor(i0, i1, i2, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<3> const& begin, \
+ OpenACCMDRangeEnd<3> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim2 * dim1; \
+ auto i0 = m / tmp1 + begin0; \
+ auto tmp2 = m % tmp1; \
+ auto i1 = tmp2 / dim2 + begin1; \
+ auto i2 = tmp2 % dim2 + begin2; \
+ functor(i0, i1, i2, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<4> const& begin, \
+ OpenACCMDRangeEnd<4> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto dim3 = end3 - begin3; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim3 * dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim2 * dim1 * dim0; \
+ auto i3 = m / tmp1 + begin3; \
+ auto tmp2 = m % tmp1; \
+ tmp1 = dim1 * dim0; \
+ auto i2 = tmp2 / tmp1 + begin2; \
+ tmp2 = tmp2 % tmp1; \
+ auto i1 = tmp2 / dim0 + begin1; \
+ auto i0 = tmp2 % dim0 + begin0; \
+ functor(i0, i1, i2, i3, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<4> const& begin, \
+ OpenACCMDRangeEnd<4> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto dim3 = end3 - begin3; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim3 * dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim3 * dim2 * dim1; \
+ auto i0 = m / tmp1 + begin0; \
+ auto tmp2 = m % tmp1; \
+ tmp1 = dim3 * dim2; \
+ auto i1 = tmp2 / tmp1 + begin1; \
+ tmp2 = tmp2 % tmp1; \
+ auto i2 = tmp2 / dim3 + begin2; \
+ auto i3 = tmp2 % dim3 + begin3; \
+ functor(i0, i1, i2, i3, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<5> const& begin, \
+ OpenACCMDRangeEnd<5> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto dim4 = end4 - begin4; \
+ auto dim3 = end3 - begin3; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim4 * dim3 * dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim3 * dim2 * dim1 * dim0; \
+ auto i4 = m / tmp1 + begin4; \
+ auto tmp2 = m % tmp1; \
+ tmp1 = dim2 * dim1 * dim0; \
+ auto i3 = tmp2 / tmp1 + begin3; \
+ tmp2 = tmp2 % tmp1; \
+ tmp1 = dim1 * dim0; \
+ auto i2 = tmp2 / tmp1 + begin2; \
+ tmp2 = tmp2 % tmp1; \
+ auto i1 = tmp2 / dim0 + begin1; \
+ auto i0 = tmp2 % dim0 + begin0; \
+ functor(i0, i1, i2, i3, i4, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<5> const& begin, \
+ OpenACCMDRangeEnd<5> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ auto dim4 = end4 - begin4; \
+ auto dim3 = end3 - begin3; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim4 * dim3 * dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim4 * dim3 * dim2 * dim1; \
+ auto i0 = m / tmp1 + begin0; \
+ auto tmp2 = m % tmp1; \
+ tmp1 = dim4 * dim3 * dim2; \
+ auto i1 = tmp2 / tmp1 + begin1; \
+ tmp2 = tmp2 % tmp1; \
+ tmp1 = dim4 * dim3; \
+ auto i2 = tmp2 / tmp1 + begin2; \
+ tmp2 = tmp2 % tmp1; \
+ auto i3 = tmp2 / dim4 + begin3; \
+ auto i4 = tmp2 % dim4 + begin4; \
+ functor(i0, i1, i2, i3, i4, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<6> const& begin, \
+ OpenACCMDRangeEnd<6> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin5 = begin[5]; \
+ auto end5 = end[5]; \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto dim5 = end5 - begin5; \
+ auto dim4 = end4 - begin4; \
+ auto dim3 = end3 - begin3; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim5 * dim4 * dim3 * dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim4 * dim3 * dim2 * dim1 * dim0; \
+ auto i5 = m / tmp1 + begin5; \
+ auto tmp2 = m % tmp1; \
+ tmp1 = dim3 * dim2 * dim1 * dim0; \
+ auto i4 = tmp2 / tmp1 + begin4; \
+ tmp2 = tmp2 % tmp1; \
+ tmp1 = dim2 * dim1 * dim0; \
+ auto i3 = tmp2 / tmp1 + begin3; \
+ tmp2 = tmp2 % tmp1; \
+ tmp1 = dim1 * dim0; \
+ auto i2 = tmp2 / tmp1 + begin2; \
+ tmp2 = tmp2 % tmp1; \
+ auto i1 = tmp2 / dim0 + begin1; \
+ auto i0 = tmp2 % dim0 + begin0; \
+ functor(i0, i1, i2, i3, i4, i5, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<6> const& begin, \
+ OpenACCMDRangeEnd<6> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ auto begin5 = begin[5]; \
+ auto end5 = end[5]; \
+ auto dim5 = end5 - begin5; \
+ auto dim4 = end4 - begin4; \
+ auto dim3 = end3 - begin3; \
+ auto dim2 = end2 - begin2; \
+ auto dim1 = end1 - begin1; \
+ auto dim0 = end0 - begin0; \
+ auto nIter = dim5 * dim4 * dim3 * dim2 * dim1 * dim0; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (decltype(nIter) m = 0; m < nIter; ++m) { \
+ auto tmp1 = dim5 * dim4 * dim3 * dim2 * dim1; \
+ auto i0 = m / tmp1 + begin0; \
+ auto tmp2 = m % tmp1; \
+ tmp1 = dim5 * dim4 * dim3 * dim2; \
+ auto i1 = tmp2 / tmp1 + begin1; \
+ tmp2 = tmp2 % tmp1; \
+ tmp1 = dim5 * dim4 * dim3; \
+ auto i2 = tmp2 / tmp1 + begin2; \
+ tmp2 = tmp2 % tmp1; \
+ tmp1 = dim5 * dim4; \
+ auto i3 = tmp2 / tmp1 + begin3; \
+ tmp2 = tmp2 % tmp1; \
+ auto i4 = tmp2 / dim5 + begin4; \
+ auto i5 = tmp2 % dim5 + begin5; \
+ functor(i0, i1, i2, i3, i4, i5, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ } // namespace Kokkos::Experimental::Impl
+
+#else
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_ITERATE(REDUCER, \
+ OPERATOR) \
+ namespace Kokkos::Experimental::Impl { \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<2> const& begin, \
+ OpenACCMDRangeEnd<2> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(2) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ functor(i0, i1, val); \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<2> const& begin, \
+ OpenACCMDRangeEnd<2> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(2) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ functor(i0, i1, val); \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<3> const& begin, \
+ OpenACCMDRangeEnd<3> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(3) reduction( \
+ OPERATOR \
+ : val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ functor(i0, i1, i2, val); \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<3> const& begin, \
+ OpenACCMDRangeEnd<3> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(3) reduction( \
+ OPERATOR \
+ : val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ functor(i0, i1, i2, val); \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<4> const& begin, \
+ OpenACCMDRangeEnd<4> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(4) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i3 = begin3; i3 < end3; ++i3) { \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ functor(i0, i1, i2, i3, val); \
+ } \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<4> const& begin, \
+ OpenACCMDRangeEnd<4> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(4) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ for (auto i3 = begin3; i3 < end3; ++i3) { \
+ functor(i0, i1, i2, i3, val); \
+ } \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<5> const& begin, \
+ OpenACCMDRangeEnd<5> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(5) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i4 = begin4; i4 < end4; ++i4) { \
+ for (auto i3 = begin3; i3 < end3; ++i3) { \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ functor(i0, i1, i2, i3, i4, val); \
+ } \
+ } \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<5> const& begin, \
+ OpenACCMDRangeEnd<5> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(5) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ for (auto i3 = begin3; i3 < end3; ++i3) { \
+ for (auto i4 = begin4; i4 < end4; ++i4) { \
+ functor(i0, i1, i2, i3, i4, val); \
+ } \
+ } \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateLeft, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<6> const& begin, \
+ OpenACCMDRangeEnd<6> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin5 = begin[5]; \
+ auto end5 = end[5]; \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(6) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i5 = begin5; i5 < end5; ++i5) { \
+ for (auto i4 = begin4; i4 < end4; ++i4) { \
+ for (auto i3 = begin3; i3 < end3; ++i3) { \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ functor(i0, i1, i2, i3, i4, i5, val); \
+ } \
+ } \
+ } \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(OpenACCIterateRight, ValueType& aval, \
+ Functor const& afunctor, \
+ OpenACCMDRangeBegin<6> const& begin, \
+ OpenACCMDRangeEnd<6> const& end, \
+ int async_arg) { \
+ auto val = aval; \
+ auto const functor(afunctor); \
+ auto begin0 = begin[0]; \
+ auto end0 = end[0]; \
+ auto begin1 = begin[1]; \
+ auto end1 = end[1]; \
+ auto begin2 = begin[2]; \
+ auto end2 = end[2]; \
+ auto begin3 = begin[3]; \
+ auto end3 = end[3]; \
+ auto begin4 = begin[4]; \
+ auto end4 = end[4]; \
+ auto begin5 = begin[5]; \
+ auto end5 = end[5]; \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector collapse(6) reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i0 = begin0; i0 < end0; ++i0) { \
+ for (auto i1 = begin1; i1 < end1; ++i1) { \
+ for (auto i2 = begin2; i2 < end2; ++i2) { \
+ for (auto i3 = begin3; i3 < end3; ++i3) { \
+ for (auto i4 = begin4; i4 < end4; ++i4) { \
+ for (auto i5 = begin5; i5 < end5; ++i5) { \
+ functor(i0, i1, i2, i3, i4, i5, val); \
+ } \
+ } \
+ } \
+ } \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ } // namespace Kokkos::Experimental::Impl
+
+#endif
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(REDUCER, OPERATOR) \
+ KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_ITERATE(REDUCER, OPERATOR) \
+ template <class Functor, class Scalar, class Space, class... Traits> \
+ struct Kokkos::Experimental::Impl::OpenACCParallelReduceMDRangeHelper< \
+ Functor, Kokkos::REDUCER<Scalar, Space>, \
+ Kokkos::MDRangePolicy<Traits...>, true> { \
+ using Policy = MDRangePolicy<Traits...>; \
+ using Reducer = REDUCER<Scalar, Space>; \
+ using ValueType = typename Reducer::value_type; \
+ \
+ OpenACCParallelReduceMDRangeHelper(Functor const& functor, \
+ Reducer const& reducer, \
+ Policy const& policy) { \
+ ValueType val; \
+ reducer.init(val); \
+ \
+ int const async_arg = policy.space().acc_async_queue(); \
+ \
+ OpenACCParallelReduce##REDUCER( \
+ std::integral_constant<Iterate, Policy::inner_direction>(), val, \
+ functor, policy.m_lower, policy.m_upper, async_arg); \
+ \
+ reducer.reference() = val; \
+ } \
+ }
+
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(Sum, +);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(Prod, *);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(Min, min);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(Max, max);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(LAnd, &&);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(LOr, ||);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(BAnd, &);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER(BOr, |);
+
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_MDRANGE_HELPER
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_ITERATE
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_PARALLEL_REDUCE_RANGE_HPP
+#define KOKKOS_OPENACC_PARALLEL_REDUCE_RANGE_HPP
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_Macros.hpp>
+#include <OpenACC/Kokkos_OpenACC_FunctorAdapter.hpp>
+#include <OpenACC/Kokkos_OpenACC_ScheduleType.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <type_traits>
+
+namespace Kokkos::Experimental::Impl {
+
+// primary template: catch-all non-implemented custom reducers
+template <class Functor, class Reducer, class Policy,
+ bool = std::is_arithmetic_v<typename Reducer::value_type>>
+struct OpenACCParallelReduceHelper {
+ OpenACCParallelReduceHelper(Functor const&, Reducer const&, Policy const&) {
+ static_assert(Kokkos::Impl::always_false<Functor>::value,
+ "not implemented");
+ }
+};
+
+} // namespace Kokkos::Experimental::Impl
+
+template <class CombinedFunctorReducerType, class... Traits>
+class Kokkos::Impl::ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenACC> {
+ using Policy = RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using Pointer = typename ReducerType::pointer_type;
+ using ValueType = typename ReducerType::value_type;
+
+ CombinedFunctorReducerType m_functor_reducer;
+ Policy m_policy;
+ Pointer m_result_ptr;
+ bool m_result_ptr_on_device;
+
+ public:
+ template <class ViewType>
+ ParallelReduce(CombinedFunctorReducerType const& functor_reducer,
+ Policy const& policy, ViewType const& result)
+ : m_functor_reducer(functor_reducer),
+ m_policy(policy),
+ m_result_ptr(result.data()),
+ m_result_ptr_on_device(
+ MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+ typename ViewType::memory_space>::accessible) {}
+
+ void execute() const {
+ auto const begin = m_policy.begin();
+ auto const end = m_policy.end();
+
+ ValueType val;
+ ReducerType const& reducer = m_functor_reducer.get_reducer();
+ reducer.init(&val);
+
+ if (end <= begin) {
+ if (m_result_ptr_on_device == false) {
+ *m_result_ptr = val;
+ } else {
+ acc_memcpy_to_device(m_result_ptr, &val, sizeof(ValueType));
+ }
+ return;
+ }
+
+ int const async_arg = m_policy.space().acc_async_queue();
+
+ Kokkos::Experimental::Impl::OpenACCParallelReduceHelper(
+ Kokkos::Experimental::Impl::FunctorAdapter<
+ FunctorType, Policy,
+ Kokkos::Experimental::Impl::RoutineClause::seq>(
+ m_functor_reducer.get_functor()),
+ std::conditional_t<
+ std::is_same_v<FunctorType, typename ReducerType::functor_type>,
+ Sum<ValueType>, typename ReducerType::functor_type>(val),
+ m_policy);
+
+ // OpenACC backend supports only built-in Reducer types; thus
+ // reducer.final() below is a no-op.
+ reducer.final(&val);
+ // acc_wait(async_arg) in the below if-else statements is needed because the
+ // above OpenACC compute kernel can be executed asynchronously and val is a
+ // local host variable.
+ if (m_result_ptr_on_device == false) {
+ acc_wait(async_arg);
+ *m_result_ptr = val;
+ } else {
+ acc_memcpy_to_device_async(m_result_ptr, &val, sizeof(ValueType),
+ async_arg);
+ acc_wait(async_arg);
+ }
+ }
+};
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_SCHEDULE(REDUCER, \
+ OPERATOR) \
+ namespace Kokkos::Experimental::Impl { \
+ template <class IndexType, class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(Schedule<Static>, int chunk_size, \
+ IndexType begin, IndexType end, \
+ ValueType& aval, \
+ Functor const& afunctor, \
+ int async_arg) { \
+ /* FIXME_OPENACC FIXME_NVHPC workaround compiler bug (incorrect scope \
+ analysis) \
+ NVC++-S-1067-Cannot determine bounds for array - functor */ \
+ auto const functor(afunctor); \
+ auto val = aval; \
+ if (chunk_size >= 1) { \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang(static:chunk_size) vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i = begin; i < end; i++) { \
+ functor(i, val); \
+ } \
+ } else { \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang(static:*) vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i = begin; i < end; i++) { \
+ functor(i, val); \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ \
+ template <class IndexType, class ValueType, class Functor> \
+ void OpenACCParallelReduce##REDUCER(Schedule<Dynamic>, int chunk_size, \
+ IndexType begin, IndexType end, \
+ ValueType& aval, \
+ Functor const& afunctor, \
+ int async_arg) { \
+ /* FIXME_OPENACC FIXME_NVHPC workaround compiler bug (incorrect scope \
+ analysis) \
+ NVC++-S-1067-Cannot determine bounds for array - functor */ \
+ auto const functor(afunctor); \
+ auto val = aval; \
+ if (chunk_size >= 1) { \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang(static:chunk_size) vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i = begin; i < end; i++) { \
+ functor(i, val); \
+ } \
+ } else { \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector reduction(OPERATOR:val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (auto i = begin; i < end; i++) { \
+ functor(i, val); \
+ } \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ } // namespace Kokkos::Experimental::Impl
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(REDUCER, OPERATOR) \
+ KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_SCHEDULE(REDUCER, OPERATOR) \
+ template <class Functor, class Scalar, class Space, class... Traits> \
+ struct Kokkos::Experimental::Impl::OpenACCParallelReduceHelper< \
+ Functor, Kokkos::REDUCER<Scalar, Space>, Kokkos::RangePolicy<Traits...>, \
+ true> { \
+ using Policy = RangePolicy<Traits...>; \
+ using ScheduleType = \
+ Kokkos::Experimental::Impl::OpenACCScheduleType<Policy>; \
+ using Reducer = REDUCER<Scalar, Space>; \
+ using ValueType = typename Reducer::value_type; \
+ \
+ OpenACCParallelReduceHelper(Functor const& functor, \
+ Reducer const& reducer, \
+ Policy const& policy) { \
+ auto const begin = policy.begin(); \
+ auto const end = policy.end(); \
+ \
+ if (end <= begin) { \
+ return; \
+ } \
+ \
+ ValueType val; \
+ reducer.init(val); \
+ \
+ int const async_arg = policy.space().acc_async_queue(); \
+ int const chunk_size = policy.chunk_size(); \
+ \
+ OpenACCParallelReduce##REDUCER(ScheduleType(), chunk_size, begin, end, \
+ val, functor, async_arg); \
+ \
+ reducer.reference() = val; \
+ } \
+ }
+
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(Sum, +);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(Prod, *);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(Min, min);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(Max, max);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(LAnd, &&);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(LOr, ||);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(BAnd, &);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER(BOr, |);
+
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_HELPER
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_SCHEDULE
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_PARALLEL_REDUCE_TEAM_HPP
+#define KOKKOS_OPENACC_PARALLEL_REDUCE_TEAM_HPP
+
+#include <OpenACC/Kokkos_OpenACC_Team.hpp>
+#include <OpenACC/Kokkos_OpenACC_FunctorAdapter.hpp>
+#include <OpenACC/Kokkos_OpenACC_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS
+#define KOKKOS_IMPL_OPENACC_LOOP_CLAUSE \
+ Kokkos::Experimental::Impl::RoutineClause::seq
+#else
+#define KOKKOS_IMPL_OPENACC_LOOP_CLAUSE \
+ Kokkos::Experimental::Impl::RoutineClause::worker
+#endif
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// Hierarchical Parallelism -> Team level implementation
+namespace Kokkos::Experimental::Impl {
+
+// primary template: catch-all non-implemented custom reducers
+template <class Functor, class Reducer, class Policy,
+ bool = std::is_arithmetic_v<typename Reducer::value_type>>
+struct OpenACCParallelReduceTeamHelper {
+ OpenACCParallelReduceTeamHelper(Functor const&, Reducer const&,
+ Policy const&) {
+ static_assert(Kokkos::Impl::always_false<Functor>::value,
+ "not implemented");
+ }
+};
+
+} // namespace Kokkos::Experimental::Impl
+
+template <class CombinedFunctorReducerType, class... Properties>
+class Kokkos::Impl::ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Experimental::OpenACC> {
+ private:
+ using Policy =
+ TeamPolicyInternal<Kokkos::Experimental::OpenACC, Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using value_type = typename ReducerType::value_type;
+ using pointer_type = typename ReducerType::pointer_type;
+
+ CombinedFunctorReducerType m_functor_reducer;
+ Policy m_policy;
+ pointer_type m_result_ptr;
+ bool m_result_ptr_on_device;
+
+ public:
+ void execute() const {
+ auto league_size = m_policy.league_size();
+ auto team_size = m_policy.team_size();
+ auto vector_length = m_policy.impl_vector_length();
+
+ int const async_arg = m_policy.space().acc_async_queue();
+ value_type val;
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+ reducer.init(&val);
+ if (league_size <= 0) {
+ if (m_result_ptr_on_device == false) {
+ *m_result_ptr = val;
+ } else {
+ acc_memcpy_to_device(m_result_ptr, &val, sizeof(value_type));
+ }
+ return;
+ }
+
+ Kokkos::Experimental::Impl::OpenACCParallelReduceTeamHelper(
+ Kokkos::Experimental::Impl::FunctorAdapter<
+ FunctorType, Policy, KOKKOS_IMPL_OPENACC_LOOP_CLAUSE>(
+ m_functor_reducer.get_functor()),
+ std::conditional_t<
+ std::is_same_v<FunctorType, typename ReducerType::functor_type>,
+ Sum<value_type>, typename ReducerType::functor_type>(val),
+ m_policy);
+
+ // OpenACC backend supports only built-in Reducer types; thus
+ // reducer.final() below is a no-op.
+ reducer.final(&val);
+ // acc_wait(async_arg) in the below if-else statements is needed because the
+ // above OpenACC compute kernel can be executed asynchronously and val is a
+ // local host variable.
+ if (m_result_ptr_on_device == false) {
+ acc_wait(async_arg);
+ *m_result_ptr = val;
+ } else {
+ acc_memcpy_to_device_async(m_result_ptr, &val, sizeof(value_type),
+ async_arg);
+ acc_wait(async_arg);
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result_view)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_view.data()),
+ m_result_ptr_on_device(
+ MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+ typename ViewType::memory_space>::accessible) {}
+};
+
+namespace Kokkos {
+
+// Hierarchical Parallelism -> Team thread level implementation
+// FIXME_OPENACC: custom reduction is not implemented.
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>&
+ loop_boundaries,
+ const Lambda& lambda, const JoinType& join, ValueType& init_result) {
+ static_assert(Kokkos::Impl::always_false<Lambda>::value,
+ "custom reduction is not implemented");
+}
+
+// Hierarchical Parallelism -> Thread vector level implementation
+// FIXME_OPENACC: custom reduction is not implemented.
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda, const JoinType& join, ValueType& init_result) {
+ static_assert(Kokkos::Impl::always_false<Lambda>::value,
+ "custom reduction is not implemented");
+}
+
+} // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS
+
+#define KOKKOS_IMPL_ACC_REDUCE_TEAM_PRAGMA \
+ vector vector_length(team_size* vector_length)
+#define KOKKOS_IMPL_ACC_REDUCE_TEAM_ITRS league_size* team_size* vector_length
+#define KOKKOS_IMPL_ACC_REDUCE_TEAM_LEAGUE_ID_INIT \
+ i / (team_size * vector_length)
+
+namespace Kokkos {
+
+// Hierarchical Parallelism -> Team thread level implementation
+#pragma acc routine seq
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer_v<ValueType>>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+ iType j_start =
+ loop_boundaries.team.team_rank() / loop_boundaries.team.vector_length();
+ if (j_start == 0) {
+#pragma acc loop seq
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++)
+ lambda(i, tmp);
+ wrapped_reducer.final(&tmp);
+ result = tmp;
+ }
+}
+
+#pragma acc routine seq
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer_v<ReducerType>>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda, const ReducerType& reducer) {
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>,
+ ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+ iType j_start =
+ loop_boundaries.team.team_rank() / loop_boundaries.team.vector_length();
+ if (j_start == 0) {
+#pragma acc loop seq
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++)
+ lambda(i, tmp);
+
+ wrapped_reducer.final(&tmp);
+ reducer.reference() = tmp;
+ }
+}
+
+// Hierarchical Parallelism -> Thread vector level implementation
+#pragma acc routine seq
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer_v<ValueType>>
+parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+ iType j_start =
+ loop_boundaries.team.team_rank() % loop_boundaries.team.vector_length();
+ if (j_start == 0) {
+#pragma acc loop seq
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, tmp);
+ }
+ wrapped_reducer.final(&tmp);
+ result = tmp;
+ }
+}
+
+#pragma acc routine seq
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer_v<ReducerType>>
+parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda, const ReducerType& reducer) {
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>,
+ ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+ iType j_start =
+ loop_boundaries.team.team_rank() % loop_boundaries.team.vector_length();
+ if (j_start == 0) {
+#pragma acc loop seq
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, tmp);
+ }
+
+ wrapped_reducer.final(&tmp);
+ reducer.reference() = tmp;
+ }
+}
+
+// Hierarchical Parallelism -> Team vector level implementation
+#pragma acc routine seq
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::TeamVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>&
+ loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+ iType j_start =
+ loop_boundaries.team.team_rank() % loop_boundaries.team.vector_length();
+ if (j_start == 0) {
+#pragma acc loop seq
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, tmp);
+ }
+ wrapped_reducer.final(&tmp);
+ result = tmp;
+ }
+}
+
+} // namespace Kokkos
+
+#else /* #ifdef KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS */
+
+#define KOKKOS_IMPL_ACC_REDUCE_TEAM_PRAGMA \
+ num_workers(team_size) vector_length(vector_length)
+#define KOKKOS_IMPL_ACC_REDUCE_TEAM_ITRS league_size
+#define KOKKOS_IMPL_ACC_REDUCE_TEAM_LEAGUE_ID_INIT i
+
+// FIXME_OPENACC: below implementation conforms to the OpenACC standard, but
+// the NVHPC compiler (V22.11) fails due to the lack of support for lambda
+// expressions containing parallel loops.
+
+namespace Kokkos {
+
+// Hierarchical Parallelism -> Team thread level implementation
+#pragma acc routine worker
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer_v<ValueType>>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+ ValueType tmp = ValueType();
+#pragma acc loop worker reduction(+ : tmp)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++)
+ lambda(i, tmp);
+
+ wrapped_reducer.final(&tmp);
+ result = tmp;
+}
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(REDUCER, OPERATOR) \
+ KOKKOS_IMPL_ACC_PRAGMA(routine worker) \
+ template <typename iType, class Lambda, class Scalar, class Space> \
+ KOKKOS_INLINE_FUNCTION \
+ std::enable_if_t<Kokkos::is_reducer_v<Kokkos::REDUCER<Scalar, Space>>> \
+ parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct< \
+ iType, Impl::OpenACCTeamMember>& loop_boundaries, \
+ const Lambda& lambda, \
+ const Kokkos::REDUCER<Scalar, Space>& reducer) { \
+ using ValueType = typename Kokkos::REDUCER<Scalar, Space>::value_type; \
+ ValueType tmp = ValueType(); \
+ reducer.init(tmp); \
+ KOKKOS_IMPL_ACC_PRAGMA(loop worker reduction(OPERATOR : tmp)) \
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) \
+ lambda(i, tmp); \
+ reducer.reference() = tmp; \
+ }
+
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(Sum, +);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(Prod, *);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(Min, min);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(Max, max);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(LAnd, &&);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(LOr, ||);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(BAnd, &);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD(BOr, |);
+
+// Hierarchical Parallelism -> Thread vector level implementation
+#pragma acc routine vector
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer_v<ValueType>>
+parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenACCTeamMember>& loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+#pragma acc loop vector reduction(+ : tmp)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, tmp);
+ }
+ wrapped_reducer.final(&tmp);
+ result = tmp;
+}
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(REDUCER, OPERATOR) \
+ KOKKOS_IMPL_ACC_PRAGMA(routine vector) \
+ template <typename iType, class Lambda, class Scalar, class Space> \
+ KOKKOS_INLINE_FUNCTION \
+ std::enable_if_t<Kokkos::is_reducer_v<Kokkos::REDUCER<Scalar, Space>>> \
+ parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct< \
+ iType, Impl::OpenACCTeamMember>& loop_boundaries, \
+ const Lambda& lambda, \
+ const Kokkos::REDUCER<Scalar, Space>& reducer) { \
+ using ValueType = typename Kokkos::REDUCER<Scalar, Space>::value_type; \
+ ValueType tmp; \
+ reducer.init(tmp); \
+ KOKKOS_IMPL_ACC_PRAGMA(loop vector reduction(OPERATOR : tmp)) \
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) { \
+ lambda(i, tmp); \
+ } \
+ reducer.reference() = tmp; \
+ }
+
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(Sum, +);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(Prod, *);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(Min, min);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(Max, max);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(LAnd, &&);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(LOr, ||);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(BAnd, &);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR(BOr, |);
+
+// Hierarchical Parallelism -> Team vector level implementation
+#pragma acc routine vector
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::TeamVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>&
+ loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::OpenACCTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type tmp;
+ wrapped_reducer.init(&tmp);
+
+#pragma acc loop vector reduction(+ : tmp)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, tmp);
+ }
+
+ wrapped_reducer.final(&tmp);
+ result = tmp;
+}
+
+} // namespace Kokkos
+
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_THREAD
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_THREAD_VECTOR
+
+#endif /* #ifdef KOKKOS_ENABLE_OPENACC_COLLAPSE_HIERARCHICAL_CONSTRUCTS */
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_SCHEDULE(REDUCER, \
+ OPERATOR) \
+ namespace Kokkos::Experimental::Impl { \
+ template <class Policy, class ValueType, class Functor> \
+ void OpenACCParallelReduceTeam##REDUCER(Policy const policy, \
+ ValueType& aval, \
+ Functor const& afunctor, \
+ int async_arg) { \
+ auto const functor = afunctor; \
+ auto val = aval; \
+ auto const league_size = policy.league_size(); \
+ auto const team_size = policy.team_size(); \
+ auto const vector_length = policy.impl_vector_length(); \
+ /* clang-format off */ \
+ KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang num_gangs(league_size) KOKKOS_IMPL_ACC_REDUCE_TEAM_PRAGMA reduction(OPERATOR : val) copyin(functor) async(async_arg)) \
+ /* clang-format on */ \
+ for (int i = 0; i < KOKKOS_IMPL_ACC_REDUCE_TEAM_ITRS; i++) { \
+ int league_id = KOKKOS_IMPL_ACC_REDUCE_TEAM_LEAGUE_ID_INIT; \
+ typename Policy::member_type team(league_id, league_size, team_size, \
+ vector_length); \
+ functor(team, val); \
+ } \
+ acc_wait(async_arg); \
+ aval = val; \
+ } \
+ } // namespace Kokkos::Experimental::Impl
+
+#define KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(REDUCER, OPERATOR) \
+ KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_SCHEDULE(REDUCER, OPERATOR) \
+ \
+ template <class Functor, class Scalar, class Space, class... Traits> \
+ struct Kokkos::Experimental::Impl::OpenACCParallelReduceTeamHelper< \
+ Functor, Kokkos::REDUCER<Scalar, Space>, \
+ Kokkos::Impl::TeamPolicyInternal<Traits...>, true> { \
+ using Policy = Kokkos::Impl::TeamPolicyInternal<Traits...>; \
+ using Reducer = REDUCER<Scalar, Space>; \
+ using ValueType = typename Reducer::value_type; \
+ \
+ OpenACCParallelReduceTeamHelper(Functor const& functor, \
+ Reducer const& reducer, \
+ Policy const& policy) { \
+ auto league_size = policy.league_size(); \
+ auto team_size = policy.team_size(); \
+ auto vector_length = policy.impl_vector_length(); \
+ \
+ if (league_size <= 0) { \
+ return; \
+ } \
+ \
+ ValueType val; \
+ reducer.init(val); \
+ \
+ int const async_arg = policy.space().acc_async_queue(); \
+ \
+ OpenACCParallelReduceTeam##REDUCER(policy, val, functor, async_arg); \
+ \
+ reducer.reference() = val; \
+ } \
+ }
+
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(Sum, +);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(Prod, *);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(Min, min);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(Max, max);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(LAnd, &&);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(LOr, ||);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(BAnd, &);
+KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER(BOr, |);
+
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_TEAM_HELPER
+#undef KOKKOS_IMPL_OPENACC_PARALLEL_REDUCE_DISPATCH_SCHEDULE
+#undef KOKKOS_IMPL_ACC_REDUCE_TEAM_PRAGMA
+#undef KOKKOS_IMPL_ACC_REDUCE_TEAM_ITRS
+#undef KOKKOS_IMPL_ACC_REDUCE_TEAM_LEAGUE_ID_INIT
+
+#endif /* #ifndef KOKKOS_OPENACC_PARALLEL_REDUCE_TEAM_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_PARALLEL_SCAN_RANGE_HPP
+#define KOKKOS_OPENACC_PARALLEL_SCAN_RANGE_HPP
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_FunctorAdapter.hpp>
+#include <OpenACC/Kokkos_OpenACC_Macros.hpp>
+#include <Kokkos_Parallel.hpp>
+
+// Clacc uses an alternative implementation to work around not-yet-implemented
+// OpenACC features: Clacc does not fully support private clauses for
+// gang-private variables, and the alternative implementation allocates
+// the gang-private arrays on GPU global memory using array expansion,
+// instead of using the private clause.
+/* clang-format off */
+#ifdef KOKKOS_COMPILER_CLANG
+#define KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(THREADID) \
+ element_values[team_id * 2 * chunk_size + THREADID]
+#define KOKKOS_IMPL_ACC_ELEMENT_VALUES_CLAUSE create(element_values [0:num_elements])
+#else
+#define KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(THREADID) element_values[THREADID]
+#define KOKKOS_IMPL_ACC_ELEMENT_VALUES_CLAUSE private(element_values [0:num_elements])
+#endif
+/* clang-format on */
+
+namespace Kokkos::Impl {
+
+template <class Functor, class GivenValueType, class... Traits>
+class ParallelScanOpenACCBase {
+ protected:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using Analysis =
+ Kokkos::Impl::FunctorAnalysis<Kokkos::Impl::FunctorPatternInterface::SCAN,
+ Policy, Functor, GivenValueType>;
+ using PointerType = typename Analysis::pointer_type;
+ using ValueType = typename Analysis::value_type;
+ using MemberType = typename Policy::member_type;
+ using IndexType = typename Policy::index_type;
+ Functor m_functor;
+ Policy m_policy;
+ ValueType* m_result_ptr;
+ bool m_result_ptr_device_accessible;
+ static constexpr MemberType default_scan_chunk_size = 128;
+
+ public:
+ ParallelScanOpenACCBase(Functor const& arg_functor, Policy const& arg_policy,
+ ValueType* arg_result_ptr,
+ bool arg_result_ptr_device_accessible)
+ : m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_ptr),
+ m_result_ptr_device_accessible(arg_result_ptr_device_accessible) {}
+
+ // This function implements the parallel scan alogithm based on the parallel
+ // prefix sum algorithm proposed by Hillis and Steele (doi:10.1145/7902.7903),
+ // which offers a shorter span and more parallelism but may not be
+ // work-efficient.
+ void OpenACCParallelScanRangePolicy(const IndexType begin,
+ const IndexType end, IndexType chunk_size,
+ const int async_arg) const {
+ if (chunk_size > 1) {
+ if (!Impl::is_integral_power_of_two(chunk_size))
+ Kokkos::abort(
+ "RangePolicy blocking granularity must be power of two to be used "
+ "with OpenACC parallel_scan()");
+ } else {
+ chunk_size = default_scan_chunk_size;
+ }
+ const Kokkos::Experimental::Impl::FunctorAdapter<
+ Functor, Policy, Kokkos::Experimental::Impl::RoutineClause::seq>
+ functor(m_functor);
+ const IndexType N = end - begin;
+ const IndexType n_chunks = (N + chunk_size - 1) / chunk_size;
+#ifdef KOKKOS_COMPILER_CLANG
+ int const num_elements = n_chunks * 2 * chunk_size;
+#else
+ int const num_elements = 2 * chunk_size;
+#endif
+ Kokkos::View<ValueType*, Kokkos::Experimental::OpenACCSpace> chunk_values(
+ "Kokkos::OpenACCParallelScan::chunk_values", n_chunks);
+ Kokkos::View<ValueType*, Kokkos::Experimental::OpenACCSpace> offset_values(
+ "Kokkos::OpenACCParallelScan::offset_values", n_chunks);
+ Kokkos::View<ValueType, Kokkos::Experimental::OpenACCSpace> m_result_total(
+ "Kokkos::OpenACCParallelScan::m_result_total");
+ std::unique_ptr<ValueType[]> element_values_owner(
+ new ValueType[num_elements]);
+ ValueType* element_values = element_values_owner.get();
+ typename Analysis::Reducer final_reducer(m_functor);
+
+#pragma acc enter data copyin(functor, final_reducer) \
+ copyin(chunk_values, offset_values) async(async_arg)
+
+ /* clang-format off */
+KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector_length(chunk_size) KOKKOS_IMPL_ACC_ELEMENT_VALUES_CLAUSE present(functor, chunk_values, final_reducer) async(async_arg))
+ /* clang-format on */
+ for (IndexType team_id = 0; team_id < n_chunks; ++team_id) {
+ IndexType current_step = 0;
+ IndexType next_step = 1;
+ IndexType temp;
+#pragma acc loop vector
+ for (IndexType thread_id = 0; thread_id < chunk_size; ++thread_id) {
+ const IndexType local_offset = team_id * chunk_size;
+ const IndexType idx = local_offset + thread_id;
+ ValueType update;
+ final_reducer.init(&update);
+ if ((idx > 0) && (idx < N)) functor(idx - 1, update, false);
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(thread_id) = update;
+ }
+ for (IndexType step_size = 1; step_size < chunk_size; step_size *= 2) {
+#pragma acc loop vector
+ for (IndexType thread_id = 0; thread_id < chunk_size; ++thread_id) {
+ if (thread_id < step_size) {
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(next_step * chunk_size +
+ thread_id) =
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(current_step * chunk_size +
+ thread_id);
+ } else {
+ ValueType localValue = KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(
+ current_step * chunk_size + thread_id);
+ final_reducer.join(&localValue, &KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(
+ current_step * chunk_size +
+ thread_id - step_size));
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(next_step * chunk_size +
+ thread_id) = localValue;
+ }
+ }
+ temp = current_step;
+ current_step = next_step;
+ next_step = temp;
+ }
+ chunk_values(team_id) = KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(
+ current_step * chunk_size + chunk_size - 1);
+ }
+
+ ValueType tempValue;
+#pragma acc parallel loop seq num_gangs(1) num_workers(1) vector_length(1) \
+ present(chunk_values, offset_values, final_reducer) async(async_arg)
+ for (IndexType team_id = 0; team_id < n_chunks; ++team_id) {
+ if (team_id == 0) {
+ final_reducer.init(&offset_values(0));
+ final_reducer.init(&tempValue);
+ } else {
+ final_reducer.join(&tempValue, &chunk_values(team_id - 1));
+ offset_values(team_id) = tempValue;
+ }
+ }
+
+ /* clang-format off */
+KOKKOS_IMPL_ACC_PRAGMA(parallel loop gang vector_length(chunk_size) KOKKOS_IMPL_ACC_ELEMENT_VALUES_CLAUSE present(functor, offset_values, final_reducer) copyin(m_result_total) async(async_arg))
+ /* clang-format on */
+ for (IndexType team_id = 0; team_id < n_chunks; ++team_id) {
+ IndexType current_step = 0;
+ IndexType next_step = 1;
+ IndexType temp;
+#pragma acc loop vector
+ for (IndexType thread_id = 0; thread_id < chunk_size; ++thread_id) {
+ const IndexType local_offset = team_id * chunk_size;
+ const IndexType idx = local_offset + thread_id;
+ ValueType update;
+ final_reducer.init(&update);
+ if (thread_id == 0) {
+ final_reducer.join(&update, &offset_values(team_id));
+ }
+ if ((idx > 0) && (idx < N)) functor(idx - 1, update, false);
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(thread_id) = update;
+ }
+ for (IndexType step_size = 1; step_size < chunk_size; step_size *= 2) {
+#pragma acc loop vector
+ for (IndexType thread_id = 0; thread_id < chunk_size; ++thread_id) {
+ if (thread_id < step_size) {
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(next_step * chunk_size +
+ thread_id) =
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(current_step * chunk_size +
+ thread_id);
+ } else {
+ ValueType localValue = KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(
+ current_step * chunk_size + thread_id);
+ final_reducer.join(&localValue, &KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(
+ current_step * chunk_size +
+ thread_id - step_size));
+ KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(next_step * chunk_size +
+ thread_id) = localValue;
+ }
+ }
+ temp = current_step;
+ current_step = next_step;
+ next_step = temp;
+ }
+#pragma acc loop vector
+ for (IndexType thread_id = 0; thread_id < chunk_size; ++thread_id) {
+ const IndexType local_offset = team_id * chunk_size;
+ const IndexType idx = local_offset + thread_id;
+ ValueType update = KOKKOS_IMPL_ACC_ACCESS_ELEMENTS(
+ current_step * chunk_size + thread_id);
+ if (idx < N) functor(idx, update, true);
+ if (idx == N - 1) {
+ if (m_result_ptr_device_accessible) {
+ *m_result_ptr = update;
+ } else {
+ m_result_total() = update;
+ }
+ }
+ }
+ }
+ if (!m_result_ptr_device_accessible && m_result_ptr != nullptr) {
+ DeepCopy<HostSpace, Kokkos::Experimental::OpenACCSpace,
+ Kokkos::Experimental::OpenACC>(m_policy.space(), m_result_ptr,
+ m_result_total.data(),
+ sizeof(ValueType));
+ }
+
+#pragma acc exit data delete (functor, chunk_values, offset_values, \
+ final_reducer)async(async_arg)
+ acc_wait(async_arg);
+ }
+
+ void execute() const {
+ const IndexType begin = m_policy.begin();
+ const IndexType end = m_policy.end();
+ IndexType chunk_size = m_policy.chunk_size();
+
+ if (end <= begin) {
+ if (!m_result_ptr_device_accessible && m_result_ptr != nullptr) {
+ *m_result_ptr = 0;
+ }
+ return;
+ }
+
+ int const async_arg = m_policy.space().acc_async_queue();
+
+ OpenACCParallelScanRangePolicy(begin, end, chunk_size, async_arg);
+ }
+};
+
+} // namespace Kokkos::Impl
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class Functor, class... Traits>
+class Kokkos::Impl::ParallelScan<Functor, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenACC>
+ : public ParallelScanOpenACCBase<Functor, void, Traits...> {
+ using base_t = ParallelScanOpenACCBase<Functor, void, Traits...>;
+ using IndexType = typename base_t::IndexType;
+
+ public:
+ void execute() const {
+ const IndexType begin = base_t::m_policy.begin();
+ const IndexType end = base_t::m_policy.end();
+ IndexType chunk_size = base_t::m_policy.chunk_size();
+
+ int const async_arg = base_t::m_policy.space().acc_async_queue();
+
+ base_t::OpenACCParallelScanRangePolicy(begin, end, chunk_size, async_arg);
+ }
+
+ ParallelScan(const Functor& arg_functor,
+ const typename base_t::Policy& arg_policy)
+ : base_t(arg_functor, arg_policy, nullptr, false) {}
+};
+
+template <class FunctorType, class ReturnType, class... Traits>
+class Kokkos::Impl::ParallelScanWithTotal<
+ FunctorType, Kokkos::RangePolicy<Traits...>, ReturnType,
+ Kokkos::Experimental::OpenACC>
+ : public ParallelScanOpenACCBase<FunctorType, ReturnType, Traits...> {
+ using base_t = ParallelScanOpenACCBase<FunctorType, ReturnType, Traits...>;
+ using IndexType = typename base_t::IndexType;
+
+ public:
+ void execute() const {
+ const IndexType begin = base_t::m_policy.begin();
+ const IndexType end = base_t::m_policy.end();
+ IndexType chunk_size = base_t::m_policy.chunk_size();
+
+ if (end <= begin) {
+ if (!base_t::m_result_ptr_device_accessible &&
+ base_t::m_result_ptr != nullptr) {
+ *base_t::m_result_ptr = 0;
+ }
+ return;
+ }
+
+ int const async_arg = base_t::m_policy.space().acc_async_queue();
+
+ base_t::OpenACCParallelScanRangePolicy(begin, end, chunk_size, async_arg);
+ }
+
+ template <class ViewType>
+ ParallelScanWithTotal(const FunctorType& arg_functor,
+ const typename base_t::Policy& arg_policy,
+ const ViewType& arg_result_view)
+ : base_t(arg_functor, arg_policy, arg_result_view.data(),
+ MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+ typename ViewType::memory_space>::accessible) {
+ }
+};
+
+#undef KOKKOS_IMPL_ACC_ACCESS_ELEMENTS
+#undef KOKKOS_IMPL_ACC_ELEMENT_VALUES_CLAUSE
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_SCHEDULE_TYPE_HPP
+#define KOKKOS_OPENACC_SCHEDULE_TYPE_HPP
+
+#include <Kokkos_Concepts.hpp>
+#include <type_traits>
+
+namespace Kokkos::Experimental::Impl {
+
+template <class Policy, class ScheduleType = typename Policy::schedule_type>
+struct OpenACCSchedule {
+ static_assert(is_execution_policy_v<Policy>);
+ static_assert(std::is_void_v<ScheduleType> ||
+ std::is_same_v<ScheduleType, Schedule<Static>> ||
+ std::is_same_v<ScheduleType, Schedule<Dynamic>>);
+ using type =
+ std::conditional_t<std::is_same_v<ScheduleType, Schedule<Static>>,
+ Schedule<Static>, Schedule<Dynamic>>;
+};
+
+template <class Policy>
+using OpenACCScheduleType = typename OpenACCSchedule<Policy>::type;
+
+} // namespace Kokkos::Experimental::Impl
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_DeepCopy.hpp>
+#include <OpenACC/Kokkos_OpenACC_SharedAllocationRecord.hpp>
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::Experimental::OpenACCSpace);
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_SHARED_ALLOCATION_RECORD_HPP
+#define KOKKOS_OPENACC_SHARED_ALLOCATION_RECORD_HPP
+
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_SPECIALIZATION(
+ Kokkos::Experimental::OpenACCSpace);
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_TEAM_HPP
+#define KOKKOS_OPENACC_TEAM_HPP
+
+#include <openacc.h>
+#include <impl/Kokkos_Traits.hpp>
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+class OpenACCTeamMember {
+ public:
+ constexpr static int TEAM_REDUCE_SIZE = 512;
+ // FIXME_OPENACC: default-team-size macros are temporarily used for
+ // team_size_max and team_size_recommended APIs
+ constexpr static int DEFAULT_TEAM_SIZE_MAX = 512;
+ constexpr static int DEFAULT_TEAM_SIZE_REC = 128;
+
+ using execution_space = Kokkos::Experimental::OpenACC;
+ using scratch_memory_space = execution_space::scratch_memory_space;
+ using team_handle = OpenACCTeamMember;
+
+ scratch_memory_space m_team_shared;
+ int m_team_scratch_size[2];
+ int m_team_rank;
+ int m_team_size;
+ int m_league_rank;
+ int m_league_size;
+ int m_vector_length;
+
+ public:
+ KOKKOS_FUNCTION
+ const execution_space::scratch_memory_space& team_shmem() const {
+ return m_team_shared.set_team_thread_mode(0, 1, 0);
+ }
+
+ KOKKOS_FUNCTION
+ const execution_space::scratch_memory_space& team_scratch(int level) const {
+ return m_team_shared.set_team_thread_mode(level, 1,
+ m_team_scratch_size[level]);
+ }
+
+ KOKKOS_FUNCTION
+ const execution_space::scratch_memory_space& thread_scratch(int level) const {
+ return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
+ }
+
+ KOKKOS_FUNCTION int league_rank() const { return m_league_rank; }
+ KOKKOS_FUNCTION int league_size() const { return m_league_size; }
+ KOKKOS_FUNCTION int team_rank() const { return m_team_rank; }
+ KOKKOS_FUNCTION int vector_length() const { return m_vector_length; }
+ KOKKOS_FUNCTION int team_size() const { return m_team_size; }
+
+ // FIXME_OPENACC: OpenACC does not provide any explicit barrier constructs
+ // for device kernels.
+ KOKKOS_FUNCTION void team_barrier() const {
+ Kokkos::abort(
+ "Kokkos::Experimental::OpenACC ERROR: OpenACC does not provide any "
+ "explicit barrier constructs for device kernels; exit!");
+ }
+
+ // FIXME_OPENACC: team_broadcast() is not implemented.
+ template <class ValueType>
+ KOKKOS_FUNCTION void team_broadcast(ValueType& value, int thread_id) const {
+ static_assert(Kokkos::Impl::always_false<ValueType>::value,
+ "Kokkos Error: team_broadcast() is not implemented for the "
+ "OpenACC backend");
+ return ValueType();
+ }
+
+ template <class Closure, class ValueType>
+ KOKKOS_FUNCTION void team_broadcast(const Closure& f, ValueType& value,
+ int thread_id) const {
+ f(value);
+ team_broadcast(value, thread_id);
+ }
+
+ // FIXME_OPENACC: team_reduce() is not implemented.
+ template <class ValueType, class JoinOp>
+ KOKKOS_FUNCTION ValueType team_reduce(const ValueType& value,
+ const JoinOp& op_in) const {
+ static_assert(Kokkos::Impl::always_false<ValueType>::value,
+ "Kokkos Error: team_reduce() is not implemented for the "
+ "OpenACC backend");
+ return ValueType();
+ }
+
+ // FIXME_OPENACC: team_scan() is not implemented.
+ template <typename ArgType>
+ KOKKOS_FUNCTION ArgType team_scan(const ArgType& /*value*/,
+ ArgType* const /*global_accum*/) const {
+ static_assert(
+ Kokkos::Impl::always_false<ArgType>::value,
+ "Kokkos Error: team_scan() is not implemented for the OpenACC backend");
+ return ArgType();
+ }
+
+ template <typename Type>
+ KOKKOS_FUNCTION Type team_scan(const Type& value) const {
+ return this->template team_scan<Type>(value, 0);
+ }
+
+ //----------------------------------------
+ // Private for the driver
+
+ private:
+ using space = execution_space::scratch_memory_space;
+
+ public:
+ // FIXME_OPENACC - 512(16*32) bytes at the begining of the scratch space
+ // for each league is saved for reduction. It should actually be based on the
+ // ValueType of the reduction variable.
+ OpenACCTeamMember(const int league_rank, const int league_size,
+ const int team_size,
+ const int vector_length) // const TeamPolicyInternal<
+ // OpenACC, Properties ...> & team
+ : m_team_size(team_size),
+ m_league_rank(league_rank),
+ m_league_size(league_size),
+ m_vector_length(vector_length) {
+#ifdef KOKKOS_COMPILER_NVHPC
+ m_team_rank = __pgi_vectoridx();
+#else
+ m_team_rank = 0;
+#endif
+ }
+
+ static int team_reduce_size() { return TEAM_REDUCE_SIZE; }
+};
+
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Experimental::OpenACC, Properties...>
+ : public PolicyTraits<Properties...> {
+ public:
+ //! Tag this class as a kokkos execution policy
+ using execution_policy = TeamPolicyInternal;
+
+ using traits = PolicyTraits<Properties...>;
+
+ //----------------------------------------
+
+ // FIXME_OPENACC: update team_size_max() APIs with realistic
+ // implementations.
+ template <class FunctorType>
+ static int team_size_max(const FunctorType&, const ParallelForTag&) {
+ return default_team_size_max;
+ }
+
+ template <class FunctorType>
+ static int team_size_max(const FunctorType&, const ParallelReduceTag&) {
+ return default_team_size_max;
+ }
+
+ template <class FunctorType, class ReducerType>
+ static int team_size_max(const FunctorType&, const ReducerType&,
+ const ParallelReduceTag&) {
+ return default_team_size_max;
+ }
+
+ // FIXME_OPENACC: update team_size_recommended() APIs with realistic
+ // implementations.
+ template <class FunctorType>
+ static int team_size_recommended(const FunctorType&, const ParallelForTag&) {
+ return default_team_size;
+ }
+
+ template <class FunctorType>
+ static int team_size_recommended(const FunctorType&,
+ const ParallelReduceTag&) {
+ return default_team_size;
+ }
+
+ template <class FunctorType, class ReducerType>
+ static int team_size_recommended(const FunctorType&, const ReducerType&,
+ const ParallelReduceTag&) {
+ return default_team_size;
+ }
+
+ //----------------------------------------
+
+ private:
+ int m_league_size;
+ int m_team_size;
+ int m_vector_length;
+ int m_team_alloc;
+ int m_team_iter;
+ std::array<size_t, 2> m_team_scratch_size;
+ std::array<size_t, 2> m_thread_scratch_size;
+ bool m_tune_team_size;
+ bool m_tune_vector_length;
+ constexpr static int default_team_size_max =
+ OpenACCTeamMember::DEFAULT_TEAM_SIZE_MAX;
+ constexpr static int default_team_size =
+ OpenACCTeamMember::DEFAULT_TEAM_SIZE_REC;
+ int m_chunk_size;
+
+ void init(const int league_size_request, const int team_size_request,
+ const int vector_length_request) {
+ m_league_size = league_size_request;
+ m_team_size = team_size_request;
+ m_vector_length = vector_length_request;
+ set_auto_chunk_size();
+ }
+
+ template <typename ExecSpace, typename... OtherProperties>
+ friend class TeamPolicyInternal;
+
+ public:
+ bool impl_auto_team_size() const { return m_tune_team_size; }
+ bool impl_auto_vector_length() const { return m_tune_vector_length; }
+ void impl_set_team_size(const int size) { m_team_size = size; }
+ void impl_set_vector_length(const int length) {
+ m_tune_vector_length = length;
+ }
+ int impl_vector_length() const { return m_vector_length; }
+ int team_size() const { return m_team_size; }
+ int league_size() const { return m_league_size; }
+ size_t scratch_size(const int& level, int team_size_ = -1) const {
+ if (team_size_ < 0) team_size_ = m_team_size;
+ return m_team_scratch_size[level] +
+ team_size_ * m_thread_scratch_size[level];
+ }
+
+ Kokkos::Experimental::OpenACC space() const {
+ return Kokkos::Experimental::OpenACC();
+ }
+
+ template <class... OtherProperties>
+ TeamPolicyInternal(const TeamPolicyInternal<OtherProperties...>& p)
+ : m_league_size(p.m_league_size),
+ m_team_size(p.m_team_size),
+ m_vector_length(p.m_vector_length),
+ m_team_alloc(p.m_team_alloc),
+ m_team_iter(p.m_team_iter),
+ m_team_scratch_size(p.m_team_scratch_size),
+ m_thread_scratch_size(p.m_thread_scratch_size),
+ m_tune_team_size(p.m_tune_team_size),
+ m_tune_vector_length(p.m_tune_vector_length),
+ m_chunk_size(p.m_chunk_size) {}
+
+ /** \brief Specify league size, request team size */
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request, int team_size_request,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, vector_length_request);
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size / vector_length_request,
+ vector_length_request);
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size, 1);
+ }
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request, int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, 1);
+ }
+
+ TeamPolicyInternal(int league_size_request, int team_size_request,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, vector_length_request);
+ }
+
+ TeamPolicyInternal(int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size / vector_length_request,
+ vector_length_request);
+ }
+
+ TeamPolicyInternal(int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size, 1);
+ }
+ TeamPolicyInternal(int league_size_request, int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, 1);
+ }
+ static int vector_length_max() {
+ return 32; /* TODO: this is bad. Need logic that is compiler and backend
+ aware */
+ }
+ int team_alloc() const { return m_team_alloc; }
+ int team_iter() const { return m_team_iter; }
+
+ int chunk_size() const { return m_chunk_size; }
+
+ /** \brief set chunk_size to a discrete value*/
+ TeamPolicyInternal& set_chunk_size(typename traits::index_type chunk_size_) {
+ m_chunk_size = chunk_size_;
+ return *this;
+ }
+
+ /** \brief set per team scratch size for a specific level of the scratch
+ * hierarchy */
+ TeamPolicyInternal& set_scratch_size(const int& level,
+ const PerTeamValue& per_team) {
+ m_team_scratch_size[level] = per_team.value;
+ return *this;
+ }
+
+ /** \brief set per thread scratch size for a specific level of the scratch
+ * hierarchy */
+ TeamPolicyInternal& set_scratch_size(const int& level,
+ const PerThreadValue& per_thread) {
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ /** \brief set per thread and per team scratch size for a specific level of
+ * the scratch hierarchy */
+ TeamPolicyInternal& set_scratch_size(const int& level,
+ const PerTeamValue& per_team,
+ const PerThreadValue& per_thread) {
+ m_team_scratch_size[level] = per_team.value;
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ private:
+ /** \brief finalize chunk_size if it was set to AUTO*/
+ void set_auto_chunk_size() {
+ int concurrency = 2048 * default_team_size;
+
+ if (m_chunk_size > 0) {
+ if (!Impl::is_integral_power_of_two(m_chunk_size))
+ Kokkos::abort("TeamPolicy blocking granularity must be power of two");
+ }
+
+ int new_chunk_size = 1;
+ while (new_chunk_size * 100 * concurrency < m_league_size)
+ new_chunk_size *= 2;
+ if (new_chunk_size < default_team_size) {
+ new_chunk_size = 1;
+ while ((new_chunk_size * 40 * concurrency < m_league_size) &&
+ (new_chunk_size < default_team_size))
+ new_chunk_size *= 2;
+ }
+ m_chunk_size = new_chunk_size;
+ }
+
+ public:
+ using member_type = Impl::OpenACCTeamMember;
+};
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename iType>
+struct TeamThreadRangeBoundariesStruct<iType, OpenACCTeamMember> {
+ using index_type = iType;
+ const iType start;
+ const iType end;
+ const OpenACCTeamMember& team;
+
+ TeamThreadRangeBoundariesStruct(const OpenACCTeamMember& thread_, iType count)
+ : start(0), end(count), team(thread_) {}
+ TeamThreadRangeBoundariesStruct(const OpenACCTeamMember& thread_,
+ iType begin_, iType end_)
+ : start(begin_), end(end_), team(thread_) {}
+};
+
+template <typename iType>
+struct ThreadVectorRangeBoundariesStruct<iType, OpenACCTeamMember> {
+ using index_type = iType;
+ const index_type start;
+ const index_type end;
+ const OpenACCTeamMember& team;
+
+ ThreadVectorRangeBoundariesStruct(const OpenACCTeamMember& thread_,
+ index_type count)
+ : start(0), end(count), team(thread_) {}
+ ThreadVectorRangeBoundariesStruct(const OpenACCTeamMember& thread_,
+ index_type begin_, index_type end_)
+ : start(begin_), end(end_), team(thread_) {}
+};
+
+template <typename iType>
+struct TeamVectorRangeBoundariesStruct<iType, OpenACCTeamMember> {
+ using index_type = iType;
+ const index_type start;
+ const index_type end;
+ const OpenACCTeamMember& team;
+
+ TeamVectorRangeBoundariesStruct(const OpenACCTeamMember& thread_,
+ index_type count)
+ : start(0), end(count), team(thread_) {}
+ TeamVectorRangeBoundariesStruct(const OpenACCTeamMember& thread_,
+ index_type begin_, index_type end_)
+ : start(begin_), end(end_), team(thread_) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+ Impl::TeamThreadRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>
+ TeamThreadRange(const Impl::OpenACCTeamMember& thread, const iType& count) {
+ return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>(
+ thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+ typename std::common_type<iType1, iType2>::type, Impl::OpenACCTeamMember>
+TeamThreadRange(const Impl::OpenACCTeamMember& thread, const iType1& begin,
+ const iType2& end) {
+ using iType = typename std::common_type<iType1, iType2>::type;
+ return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>(
+ thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+ Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>
+ ThreadVectorRange(const Impl::OpenACCTeamMember& thread,
+ const iType& count) {
+ return Impl::ThreadVectorRangeBoundariesStruct<iType,
+ Impl::OpenACCTeamMember>(
+ thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+ typename std::common_type<iType1, iType2>::type, Impl::OpenACCTeamMember>
+ThreadVectorRange(const Impl::OpenACCTeamMember& thread,
+ const iType1& arg_begin, const iType2& arg_end) {
+ using iType = typename std::common_type<iType1, iType2>::type;
+ return Impl::ThreadVectorRangeBoundariesStruct<iType,
+ Impl::OpenACCTeamMember>(
+ thread, iType(arg_begin), iType(arg_end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+ Impl::TeamVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>
+ TeamVectorRange(const Impl::OpenACCTeamMember& thread, const iType& count) {
+ return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>(
+ thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+ typename std::common_type<iType1, iType2>::type, Impl::OpenACCTeamMember>
+TeamVectorRange(const Impl::OpenACCTeamMember& thread, const iType1& arg_begin,
+ const iType2& arg_end) {
+ using iType = typename std::common_type<iType1, iType2>::type;
+ return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::OpenACCTeamMember>(
+ thread, iType(arg_begin), iType(arg_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::OpenACCTeamMember> PerTeam(
+ const Impl::OpenACCTeamMember& thread) {
+ return Impl::ThreadSingleStruct<Impl::OpenACCTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::OpenACCTeamMember> PerThread(
+ const Impl::OpenACCTeamMember& thread) {
+ return Impl::VectorSingleStruct<Impl::OpenACCTeamMember>(thread);
+}
+} // namespace Kokkos
+
+namespace Kokkos {
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::VectorSingleStruct<Impl::OpenACCTeamMember>&
+ /*single_struct*/,
+ const FunctorType& lambda) {
+ lambda();
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::ThreadSingleStruct<Impl::OpenACCTeamMember>& single_struct,
+ const FunctorType& lambda) {
+ if (single_struct.team_member.team_rank() == 0) lambda();
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::VectorSingleStruct<Impl::OpenACCTeamMember>&
+ /*single_struct*/,
+ const FunctorType& lambda, ValueType& val) {
+ lambda(val);
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::ThreadSingleStruct<Impl::OpenACCTeamMember>& single_struct,
+ const FunctorType& lambda, ValueType& val) {
+ if (single_struct.team_member.team_rank() == 0) {
+ lambda(val);
+ }
+ single_struct.team_member.team_broadcast(val, 0);
+}
+} // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_OPENACC_TEAM_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_TRAITS_HPP
+#define KOKKOS_OPENACC_TRAITS_HPP
+
+#include <openacc.h>
+
+namespace Kokkos::Experimental::Impl {
+
+struct OpenACC_Traits {
+#if defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ static constexpr acc_device_t dev_type = acc_device_nvidia;
+ static constexpr bool may_fallback_to_host = false;
+#elif defined(KOKKOS_ARCH_AMD_GPU)
+ static constexpr acc_device_t dev_type = acc_device_radeon;
+ static constexpr bool may_fallback_to_host = false;
+#elif defined(KOKKOS_ENABLE_OPENACC_FORCE_HOST_AS_DEVICE)
+ static constexpr acc_device_t dev_type = acc_device_host;
+ static constexpr bool may_fallback_to_host = true;
+#else
+ static constexpr acc_device_t dev_type = acc_device_default;
+ static constexpr bool may_fallback_to_host = true;
+#endif
+};
+
+} // namespace Kokkos::Experimental::Impl
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <iostream>
+
+#include <OpenMP/Kokkos_OpenMP.hpp>
+#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
+
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+namespace Kokkos {
+
+OpenMP::OpenMP()
+ : m_space_instance(&Impl::OpenMPInternal::singleton(),
+ [](Impl::OpenMPInternal *) {}) {
+ Impl::OpenMPInternal::singleton().verify_is_initialized(
+ "OpenMP instance constructor");
+}
+
+OpenMP::OpenMP(int pool_size)
+ : m_space_instance(new Impl::OpenMPInternal(pool_size),
+ [](Impl::OpenMPInternal *ptr) {
+ ptr->finalize();
+ delete ptr;
+ }) {
+ Impl::OpenMPInternal::singleton().verify_is_initialized(
+ "OpenMP instance constructor");
+}
+
+int OpenMP::impl_get_current_max_threads() noexcept {
+ return Impl::OpenMPInternal::get_current_max_threads();
+}
+
+void OpenMP::impl_initialize(InitializationSettings const &settings) {
+ Impl::OpenMPInternal::singleton().initialize(
+ settings.has_num_threads() ? settings.get_num_threads() : -1);
+}
+
+void OpenMP::impl_finalize() { Impl::OpenMPInternal::singleton().finalize(); }
+
+void OpenMP::print_configuration(std::ostream &os, bool /*verbose*/) const {
+ os << "Host Parallel Execution Space:\n";
+ os << " KOKKOS_ENABLE_OPENMP: yes\n";
+
+ os << "\nOpenMP Runtime Configuration:\n";
+
+ m_space_instance->print_configuration(os);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+int OpenMP::concurrency(OpenMP const &instance) {
+ return instance.impl_thread_pool_size();
+}
+#else
+int OpenMP::concurrency() const { return impl_thread_pool_size(); }
+#endif
+
+void OpenMP::impl_static_fence(std::string const &name) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::OpenMP>(
+ name,
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ GlobalDeviceSynchronization,
+ []() {
+ std::lock_guard<std::mutex> lock_all_instances(
+ Impl::OpenMPInternal::all_instances_mutex);
+ for (auto *instance_ptr : Impl::OpenMPInternal::all_instances) {
+ std::lock_guard<std::mutex> lock_instance(
+ instance_ptr->m_instance_mutex);
+ }
+ });
+}
+
+void OpenMP::fence(const std::string &name) const {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::OpenMP>(
+ name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1},
+ [this]() {
+ auto *internal_instance = this->impl_internal_space_instance();
+ std::lock_guard<std::mutex> lock(internal_instance->m_instance_mutex);
+ });
+}
+
+bool OpenMP::impl_is_initialized() noexcept {
+ return Impl::OpenMPInternal::singleton().is_initialized();
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+KOKKOS_DEPRECATED bool OpenMP::in_parallel(OpenMP const &exec_space) noexcept {
+ return exec_space.impl_internal_space_instance()->m_level < omp_get_level();
+}
+#endif
+
+int OpenMP::impl_thread_pool_size() const noexcept {
+ return (impl_internal_space_instance()->get_level() < omp_get_level())
+ ? omp_get_num_threads()
+ : impl_internal_space_instance()->m_pool_size;
+}
+
+int OpenMP::impl_max_hardware_threads() noexcept {
+ return Impl::OpenMPInternal::max_hardware_threads();
+}
+
+namespace Impl {
+
+int g_openmp_space_factory_initialized =
+ initialize_space_factory<OpenMP>("050_OpenMP");
+
+} // namespace Impl
+
+} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_OPENMP_HPP
#define KOKKOS_OPENMP_HPP
#include <Kokkos_Core_fwd.hpp>
-#include <cstddef>
-#include <iosfwd>
#include <Kokkos_HostSpace.hpp>
-
-#ifdef KOKKOS_ENABLE_HBWSPACE
-#include <Kokkos_HBWSpace.hpp>
-#endif
-
#include <Kokkos_ScratchSpace.hpp>
#include <Kokkos_Parallel.hpp>
-#include <Kokkos_TaskScheduler.hpp>
#include <Kokkos_Layout.hpp>
#include <impl/Kokkos_HostSharedPtr.hpp>
#include <impl/Kokkos_Profiling_Interface.hpp>
#include <impl/Kokkos_InitializationSettings.hpp>
+#include <omp.h>
+
+#include <cstddef>
+#include <iosfwd>
#include <vector>
/*--------------------------------------------------------------------------*/
namespace Impl {
class OpenMPInternal;
-}
+} // namespace Impl
/// \class OpenMP
/// \brief Kokkos device for multicore processors in the host memory space.
//! Tag this class as a kokkos execution space
using execution_space = OpenMP;
- using memory_space =
-#ifdef KOKKOS_ENABLE_HBWSPACE
- Experimental::HBWSpace;
-#else
- HostSpace;
-#endif
+ using memory_space = HostSpace;
//! This execution space preferred device_type
using device_type = Kokkos::Device<execution_space, memory_space>;
OpenMP();
+ explicit OpenMP(int pool_size);
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ template <typename T = void>
+ KOKKOS_DEPRECATED_WITH_COMMENT(
+ "OpenMP execution space should be constructed explicitly.")
+ OpenMP(int pool_size)
+ : OpenMP(pool_size) {}
+#endif
+
/// \brief Print configuration information to the given output stream.
void print_configuration(std::ostream& os, bool verbose = false) const;
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
/// \brief is the instance running a parallel algorithm
- inline static bool in_parallel(OpenMP const& = OpenMP()) noexcept;
+ KOKKOS_DEPRECATED static bool in_parallel(OpenMP const& = OpenMP()) noexcept;
+#endif
/// \brief Wait until all dispatched functors complete on the given instance
///
void fence(std::string const& name =
"Kokkos::OpenMP::fence: Unnamed Instance Fence") const;
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
/// \brief Does the given instance return immediately after launching
/// a parallel algorithm
///
/// This always returns false on OpenMP
- inline static bool is_asynchronous(OpenMP const& = OpenMP()) noexcept;
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- /// \brief Partition the default instance into new instances without creating
- /// new masters
- ///
- /// This is a no-op on OpenMP since the default instance cannot be partitioned
- /// without promoting other threads to 'master'
- static std::vector<OpenMP> partition(...);
-
- /// Non-default instances should be ref-counted so that when the last
- /// is destroyed the instance resources are released
- ///
- /// This is a no-op on OpenMP since a non default instance cannot be created
- static OpenMP create_instance(...);
-
- /// \brief Partition the default instance and call 'f' on each new 'master'
- /// thread
- ///
- /// Func is a functor with the following signiture
- /// void( int partition_id, int num_partitions )
- template <typename F>
- KOKKOS_DEPRECATED static void partition_master(
- F const& f, int requested_num_partitions = 0,
- int requested_partition_size = 0);
+ KOKKOS_DEPRECATED inline static bool is_asynchronous(
+ OpenMP const& = OpenMP()) noexcept {
+ return false;
+ }
#endif
- // use UniqueToken
- static int concurrency();
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ static int concurrency(OpenMP const& = OpenMP());
+#else
+ int concurrency() const;
+#endif
static void impl_initialize(InitializationSettings const&);
/// \brief Free any resources being consumed by the default execution space
static void impl_finalize();
- inline static int impl_thread_pool_size() noexcept;
+ int impl_thread_pool_size() const noexcept;
- /** \brief The rank of the executing thread in this thread pool */
- KOKKOS_INLINE_FUNCTION
- static int impl_thread_pool_rank() noexcept;
+ int impl_thread_pool_size(int depth) const;
- inline static int impl_thread_pool_size(int depth);
+ /** \brief The rank of the executing thread in this thread pool */
+ inline static int impl_thread_pool_rank() noexcept;
// use UniqueToken
- inline static int impl_max_hardware_threads() noexcept;
+ static int impl_max_hardware_threads() noexcept;
// use UniqueToken
KOKKOS_INLINE_FUNCTION
static int impl_get_current_max_threads() noexcept;
Impl::OpenMPInternal* impl_internal_space_instance() const {
-#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
- return m_space_instance;
-#else
return m_space_instance.get();
-#endif
}
static constexpr const char* name() noexcept { return "OpenMP"; }
uint32_t impl_instance_id() const noexcept { return 1; }
private:
-#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
- Impl::OpenMPInternal* m_space_instance;
-#else
+ friend bool operator==(OpenMP const& lhs, OpenMP const& rhs) {
+ return lhs.impl_internal_space_instance() ==
+ rhs.impl_internal_space_instance();
+ }
+ friend bool operator!=(OpenMP const& lhs, OpenMP const& rhs) {
+ return !(lhs == rhs);
+ }
Kokkos::Impl::HostSharedPtr<Impl::OpenMPInternal> m_space_instance;
-#endif
};
+inline int OpenMP::impl_thread_pool_rank() noexcept {
+ KOKKOS_IF_ON_HOST((return omp_get_thread_num();))
+
+ KOKKOS_IF_ON_DEVICE((return -1;))
+}
+
+inline int OpenMP::impl_thread_pool_size(int depth) const {
+ return depth < 2 ? impl_thread_pool_size() : 1;
+}
+
+KOKKOS_INLINE_FUNCTION
+int OpenMP::impl_hardware_thread_id() noexcept {
+ KOKKOS_IF_ON_HOST((return omp_get_thread_num();))
+
+ KOKKOS_IF_ON_DEVICE((return -1;))
+}
+
namespace Tools {
namespace Experimental {
template <>
#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
#include <OpenMP/Kokkos_OpenMP_Team.hpp>
-#include <OpenMP/Kokkos_OpenMP_Parallel.hpp>
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
#include <OpenMP/Kokkos_OpenMP_Task.hpp>
+#endif
#include <KokkosExp_MDRangePolicy.hpp>
/*--------------------------------------------------------------------------*/
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_CPUDiscovery.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <cstdlib>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <thread>
+
+namespace {
+int g_openmp_hardware_max_threads = 1;
+}
+
+namespace Kokkos {
+namespace Impl {
+
+std::vector<OpenMPInternal *> OpenMPInternal::all_instances;
+std::mutex OpenMPInternal::all_instances_mutex;
+
+int OpenMPInternal::max_hardware_threads() noexcept {
+ return g_openmp_hardware_max_threads;
+}
+
+void OpenMPInternal::clear_thread_data() {
+ const size_t member_bytes =
+ sizeof(int64_t) *
+ HostThreadTeamData::align_to_int64(sizeof(HostThreadTeamData));
+
+ const int old_alloc_bytes =
+ m_pool[0] ? (member_bytes + m_pool[0]->scratch_bytes()) : 0;
+
+ OpenMP::memory_space space;
+
+#pragma omp parallel num_threads(m_pool_size)
+ {
+ const int rank = omp_get_thread_num();
+
+ if (nullptr != m_pool[rank]) {
+ m_pool[rank]->disband_pool();
+
+ space.deallocate(m_pool[rank], old_alloc_bytes);
+
+ m_pool[rank] = nullptr;
+ }
+ }
+ /* END #pragma omp parallel */
+}
+
+void OpenMPInternal::resize_thread_data(size_t pool_reduce_bytes,
+ size_t team_reduce_bytes,
+ size_t team_shared_bytes,
+ size_t thread_local_bytes) {
+ const size_t member_bytes =
+ sizeof(int64_t) *
+ HostThreadTeamData::align_to_int64(sizeof(HostThreadTeamData));
+
+ HostThreadTeamData *root = m_pool[0];
+
+ const size_t old_pool_reduce = root ? root->pool_reduce_bytes() : 0;
+ const size_t old_team_reduce = root ? root->team_reduce_bytes() : 0;
+ const size_t old_team_shared = root ? root->team_shared_bytes() : 0;
+ const size_t old_thread_local = root ? root->thread_local_bytes() : 0;
+ const size_t old_alloc_bytes =
+ root ? (member_bytes + root->scratch_bytes()) : 0;
+
+ // Allocate if any of the old allocation is tool small:
+
+ const bool allocate = (old_pool_reduce < pool_reduce_bytes) ||
+ (old_team_reduce < team_reduce_bytes) ||
+ (old_team_shared < team_shared_bytes) ||
+ (old_thread_local < thread_local_bytes);
+
+ if (allocate) {
+ if (pool_reduce_bytes < old_pool_reduce) {
+ pool_reduce_bytes = old_pool_reduce;
+ }
+ if (team_reduce_bytes < old_team_reduce) {
+ team_reduce_bytes = old_team_reduce;
+ }
+ if (team_shared_bytes < old_team_shared) {
+ team_shared_bytes = old_team_shared;
+ }
+ if (thread_local_bytes < old_thread_local) {
+ thread_local_bytes = old_thread_local;
+ }
+
+ const size_t alloc_bytes =
+ member_bytes +
+ HostThreadTeamData::scratch_size(pool_reduce_bytes, team_reduce_bytes,
+ team_shared_bytes, thread_local_bytes);
+
+ OpenMP::memory_space space;
+
+ memory_fence();
+
+ for (int rank = 0; rank < m_pool_size; ++rank) {
+ if (nullptr != m_pool[rank]) {
+ m_pool[rank]->disband_pool();
+
+ // impl_deallocate to not fence here
+ space.impl_deallocate("[unlabeled]", m_pool[rank], old_alloc_bytes);
+ }
+
+ void *ptr = space.allocate("Kokkos::OpenMP::scratch_mem", alloc_bytes);
+
+ m_pool[rank] = new (ptr) HostThreadTeamData();
+
+ m_pool[rank]->scratch_assign(((char *)ptr) + member_bytes, alloc_bytes,
+ pool_reduce_bytes, team_reduce_bytes,
+ team_shared_bytes, thread_local_bytes);
+ }
+
+ HostThreadTeamData::organize_pool(m_pool, m_pool_size);
+ }
+}
+
+OpenMPInternal &OpenMPInternal::singleton() {
+ static OpenMPInternal *self = nullptr;
+ if (self == nullptr) {
+ self = new OpenMPInternal(get_current_max_threads());
+ }
+
+ return *self;
+}
+
+int OpenMPInternal::get_current_max_threads() noexcept {
+ // Using omp_get_max_threads(); is problematic in conjunction with
+ // Hwloc on Intel (essentially an initial call to the OpenMP runtime
+ // without a parallel region before will set a process mask for a single core
+ // The runtime will than bind threads for a parallel region to other cores on
+ // the entering the first parallel region and make the process mask the
+ // aggregate of the thread masks. The intend seems to be to make serial code
+ // run fast, if you compile with OpenMP enabled but don't actually use
+ // parallel regions or so static int omp_max_threads = omp_get_max_threads();
+
+ int count = 0;
+#pragma omp parallel
+ {
+#pragma omp atomic
+ ++count;
+ }
+ return count;
+}
+
+void OpenMPInternal::initialize(int thread_count) {
+ if (m_initialized) {
+ Kokkos::abort(
+ "Calling OpenMP::initialize after OpenMP::finalize is illegal\n");
+ }
+
+ if (omp_in_parallel()) {
+ std::string msg("Kokkos::OpenMP::initialize ERROR : in parallel");
+ Kokkos::Impl::throw_runtime_exception(msg);
+ }
+
+ {
+ if (Kokkos::show_warnings() && !std::getenv("OMP_PROC_BIND")) {
+ std::cerr
+ << R"WARNING(Kokkos::OpenMP::initialize WARNING: OMP_PROC_BIND environment variable not set
+ In general, for best performance with OpenMP 4.0 or better set OMP_PROC_BIND=spread and OMP_PLACES=threads
+ For best performance with OpenMP 3.1 set OMP_PROC_BIND=true
+ For unit testing set OMP_PROC_BIND=false
+)WARNING" << std::endl;
+
+ if (mpi_detected()) {
+ std::cerr
+ << R"WARNING(MPI detected: For OpenMP binding to work as intended, MPI ranks must be bound to exclusive CPU sets.
+)WARNING" << std::endl;
+ }
+ }
+
+ // Before any other call to OMP query the maximum number of threads
+ // and save the value for re-initialization unit testing.
+
+ g_openmp_hardware_max_threads = get_current_max_threads();
+
+ int process_num_threads = g_openmp_hardware_max_threads;
+
+ if (Kokkos::hwloc::available()) {
+ process_num_threads = Kokkos::hwloc::get_available_numa_count() *
+ Kokkos::hwloc::get_available_cores_per_numa() *
+ Kokkos::hwloc::get_available_threads_per_core();
+ }
+
+ // if thread_count < 0, use g_openmp_hardware_max_threads;
+ // if thread_count == 0, set g_openmp_hardware_max_threads to
+ // process_num_threads if thread_count > 0, set
+ // g_openmp_hardware_max_threads to thread_count
+ if (thread_count < 0) {
+ thread_count = g_openmp_hardware_max_threads;
+ } else if (thread_count == 0) {
+ if (g_openmp_hardware_max_threads != process_num_threads) {
+ g_openmp_hardware_max_threads = process_num_threads;
+ omp_set_num_threads(g_openmp_hardware_max_threads);
+ }
+ } else {
+ if (Kokkos::show_warnings() && thread_count > process_num_threads) {
+ std::cerr << "Kokkos::OpenMP::initialize WARNING: You are likely "
+ "oversubscribing your CPU cores.\n"
+ << " process threads available : " << std::setw(3)
+ << process_num_threads
+ << ", requested thread : " << std::setw(3) << thread_count
+ << std::endl;
+ }
+ g_openmp_hardware_max_threads = thread_count;
+ omp_set_num_threads(g_openmp_hardware_max_threads);
+ }
+
+// setup thread local
+#pragma omp parallel num_threads(g_openmp_hardware_max_threads)
+ { Impl::SharedAllocationRecord<void, void>::tracking_enable(); }
+
+ auto &instance = OpenMPInternal::singleton();
+ instance.m_pool_size = g_openmp_hardware_max_threads;
+
+ // New, unified host thread team data:
+ {
+ size_t pool_reduce_bytes = 32 * thread_count;
+ size_t team_reduce_bytes = 32 * thread_count;
+ size_t team_shared_bytes = 1024 * thread_count;
+ size_t thread_local_bytes = 1024;
+
+ instance.resize_thread_data(pool_reduce_bytes, team_reduce_bytes,
+ team_shared_bytes, thread_local_bytes);
+ }
+ }
+
+ // Check for over-subscription
+ auto const reported_ranks = mpi_ranks_per_node();
+ auto const mpi_local_size = reported_ranks < 0 ? 1 : reported_ranks;
+ int const procs_per_node = std::thread::hardware_concurrency();
+ if (Kokkos::show_warnings() &&
+ (mpi_local_size * long(thread_count) > procs_per_node)) {
+ std::cerr << "Kokkos::OpenMP::initialize WARNING: You are likely "
+ "oversubscribing your CPU cores."
+ << std::endl;
+ std::cerr << " Detected: "
+ << procs_per_node << " cores per node." << std::endl;
+ std::cerr << " Detected: "
+ << mpi_local_size << " MPI_ranks per node." << std::endl;
+ std::cerr << " Requested: "
+ << thread_count << " threads per process." << std::endl;
+ }
+
+ m_initialized = true;
+}
+
+void OpenMPInternal::finalize() {
+ if (omp_in_parallel()) {
+ std::string msg("Kokkos::OpenMP::finalize ERROR ");
+ if (this != &singleton()) msg.append(": not initialized");
+ if (omp_in_parallel()) msg.append(": in parallel");
+ Kokkos::Impl::throw_runtime_exception(msg);
+ }
+
+ if (this == &singleton()) {
+ auto const &instance = singleton();
+ // Silence Cuda Warning
+ const int nthreads = instance.m_pool_size <= g_openmp_hardware_max_threads
+ ? g_openmp_hardware_max_threads
+ : instance.m_pool_size;
+ (void)nthreads;
+
+#pragma omp parallel num_threads(nthreads)
+ { Impl::SharedAllocationRecord<void, void>::tracking_disable(); }
+
+ // allow main thread to track
+ Impl::SharedAllocationRecord<void, void>::tracking_enable();
+
+ g_openmp_hardware_max_threads = 1;
+ }
+
+ m_initialized = false;
+
+ // guard erasing from all_instances
+ {
+ std::scoped_lock lock(all_instances_mutex);
+
+ auto it = std::find(all_instances.begin(), all_instances.end(), this);
+ if (it == all_instances.end())
+ Kokkos::abort(
+ "Execution space instance to be removed couldn't be found!");
+ *it = all_instances.back();
+ all_instances.pop_back();
+ }
+}
+
+void OpenMPInternal::print_configuration(std::ostream &s) const {
+ s << "Kokkos::OpenMP";
+
+ if (m_initialized) {
+ const int numa_count = 1;
+ const int core_per_numa = g_openmp_hardware_max_threads;
+ const int thread_per_core = 1;
+
+ s << " thread_pool_topology[ " << numa_count << " x " << core_per_numa
+ << " x " << thread_per_core << " ]" << std::endl;
+ } else {
+ s << " not initialized" << std::endl;
+ }
+}
+
+bool OpenMPInternal::verify_is_initialized(const char *const label) const {
+ if (!m_initialized) {
+ std::cerr << "Kokkos::OpenMP " << label
+ << " : ERROR OpenMP is not initialized" << std::endl;
+ }
+ return m_initialized;
+}
+} // namespace Impl
+} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_INSTANCE_HPP
+#define KOKKOS_OPENMP_INSTANCE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if !defined(_OPENMP) && !defined(__CUDA_ARCH__) && \
+ !defined(__HIP_DEVICE_COMPILE__) && !defined(__SYCL_DEVICE_ONLY__)
+#error \
+ "You enabled Kokkos OpenMP support without enabling OpenMP in the compiler!"
+#endif
+
+#include <OpenMP/Kokkos_OpenMP.hpp>
+
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+
+#include <Kokkos_Atomic.hpp>
+
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+
+#include <omp.h>
+
+#include <mutex>
+#include <numeric>
+#include <type_traits>
+#include <vector>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+class OpenMPInternal;
+
+struct OpenMPTraits {
+ static constexpr int MAX_THREAD_COUNT = 512;
+};
+
+class OpenMPInternal {
+ private:
+ OpenMPInternal(int arg_pool_size)
+ : m_pool_size{arg_pool_size}, m_level{omp_get_level()}, m_pool() {
+ // guard pushing to all_instances
+ {
+ std::scoped_lock lock(all_instances_mutex);
+ all_instances.push_back(this);
+ }
+ }
+
+ ~OpenMPInternal() { clear_thread_data(); }
+
+ static int get_current_max_threads() noexcept;
+
+ bool m_initialized = false;
+
+ int m_pool_size;
+ int m_level;
+
+ HostThreadTeamData* m_pool[OpenMPTraits::MAX_THREAD_COUNT];
+
+ public:
+ friend class Kokkos::OpenMP;
+
+ static OpenMPInternal& singleton();
+
+ void initialize(int thread_cound);
+
+ void finalize();
+
+ void clear_thread_data();
+
+ static int max_hardware_threads() noexcept;
+
+ int thread_pool_size() const { return m_pool_size; }
+
+ void resize_thread_data(size_t pool_reduce_bytes, size_t team_reduce_bytes,
+ size_t team_shared_bytes, size_t thread_local_bytes);
+
+ HostThreadTeamData* get_thread_data() const noexcept {
+ return m_pool[m_level == omp_get_level() ? 0 : omp_get_thread_num()];
+ }
+
+ HostThreadTeamData* get_thread_data(int i) const noexcept {
+ return m_pool[i];
+ }
+
+ int get_level() const { return m_level; }
+
+ bool is_initialized() const { return m_initialized; }
+
+ bool verify_is_initialized(const char* const label) const;
+
+ void print_configuration(std::ostream& s) const;
+
+ std::mutex m_instance_mutex;
+
+ static std::vector<OpenMPInternal*> all_instances;
+ static std::mutex all_instances_mutex;
+};
+
+inline bool execute_in_serial(OpenMP const& space = OpenMP()) {
+// The default value returned by `omp_get_max_active_levels` with gcc version
+// lower than 11.1.0 is 2147483647 instead of 1.
+#if (!defined(KOKKOS_COMPILER_GNU) || KOKKOS_COMPILER_GNU >= 1110) && \
+ _OPENMP >= 201511
+ bool is_nested = omp_get_max_active_levels() > 1;
+#else
+ bool is_nested = static_cast<bool>(omp_get_nested());
+#endif
+ return (space.impl_internal_space_instance()->get_level() < omp_get_level() &&
+ !(is_nested && (omp_get_level() == 1)));
+}
+
+} // namespace Impl
+
+namespace Experimental {
+namespace Impl {
+// Partitioning an Execution Space: expects space and integer arguments for
+// relative weight
+template <typename T>
+inline std::vector<OpenMP> create_OpenMP_instances(
+ OpenMP const& main_instance, std::vector<T> const& weights) {
+ static_assert(
+ std::is_arithmetic<T>::value,
+ "Kokkos Error: partitioning arguments must be integers or floats");
+ if (weights.size() == 0) {
+ Kokkos::abort("Kokkos::abort: Partition weights vector is empty.");
+ }
+ std::vector<OpenMP> instances(weights.size());
+ double total_weight = std::accumulate(weights.begin(), weights.end(), 0.);
+ int const main_pool_size =
+ main_instance.impl_internal_space_instance()->thread_pool_size();
+
+ int resources_left = main_pool_size;
+ for (unsigned int i = 0; i < weights.size() - 1; ++i) {
+ int instance_pool_size = (weights[i] / total_weight) * main_pool_size;
+ if (instance_pool_size == 0) {
+ Kokkos::abort("Kokkos::abort: Instance has no resource allocated to it");
+ }
+ instances[i] = OpenMP(instance_pool_size);
+ resources_left -= instance_pool_size;
+ }
+ // Last instance get all resources left
+ if (resources_left <= 0) {
+ Kokkos::abort(
+ "Kokkos::abort: Partition not enough resources left to create the last "
+ "instance.");
+ }
+ instances[weights.size() - 1] = OpenMP(resources_left);
+
+ return instances;
+}
+} // namespace Impl
+
+template <typename... Args>
+std::vector<OpenMP> partition_space(OpenMP const& main_instance, Args... args) {
+ // Unpack the arguments and create the weight vector. Note that if not all of
+ // the types are the same, you will get a narrowing warning.
+ std::vector<std::common_type_t<Args...>> const weights = {args...};
+ return Impl::create_OpenMP_instances(main_instance, weights);
+}
+
+template <typename T>
+std::vector<OpenMP> partition_space(OpenMP const& main_instance,
+ std::vector<T> const& weights) {
+ return Impl::create_OpenMP_instances(main_instance, weights);
+}
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_MDRANGEPOLICY_HPP_
+#define KOKKOS_OPENMP_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// Settings for TeamMDRangePolicy
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, OpenMP, ThreadAndVector>
+ : HostBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_PARALLEL_FOR_HPP
+#define KOKKOS_OPENMP_PARALLEL_FOR_HPP
+
+#include <omp.h>
+#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#define KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#undef KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+#define KOKKOS_PRAGMA_IVDEP_IF_ENABLED _Pragma("ivdep")
+#endif
+
+#ifndef KOKKOS_COMPILER_NVHPC
+#define KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE , m_policy.chunk_size()
+#else
+#define KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>, Kokkos::OpenMP> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+
+ OpenMPInternal* m_instance;
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ inline static void exec_range(const FunctorType& functor, const Member ibeg,
+ const Member iend) {
+ KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+ for (auto iwork = ibeg; iwork < iend; ++iwork) {
+ exec_work(functor, iwork);
+ }
+ }
+
+ template <class Enable = WorkTag>
+ inline static std::enable_if_t<std::is_void<WorkTag>::value &&
+ std::is_same<Enable, WorkTag>::value>
+ exec_work(const FunctorType& functor, const Member iwork) {
+ functor(iwork);
+ }
+
+ template <class Enable = WorkTag>
+ inline static std::enable_if_t<!std::is_void<WorkTag>::value &&
+ std::is_same<Enable, WorkTag>::value>
+ exec_work(const FunctorType& functor, const Member iwork) {
+ functor(WorkTag{}, iwork);
+ }
+
+ template <class Policy>
+ std::enable_if_t<std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value>
+ execute_parallel() const {
+ // prevent bug in NVHPC 21.9/CUDA 11.4 (entering zero iterations loop)
+ if (m_policy.begin() >= m_policy.end()) return;
+#pragma omp parallel for schedule(dynamic KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
+ num_threads(m_instance->thread_pool_size())
+ KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+ for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
+ exec_work(m_functor, iwork);
+ }
+ }
+
+ template <class Policy>
+ std::enable_if_t<!std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value>
+ execute_parallel() const {
+// Specifying an chunksize with GCC compiler leads to performance regression
+// with static schedule.
+#ifdef KOKKOS_COMPILER_GNU
+#pragma omp parallel for schedule(static) \
+ num_threads(m_instance->thread_pool_size())
+#else
+#pragma omp parallel for schedule(static KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
+ num_threads(m_instance->thread_pool_size())
+#endif
+ KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+ for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
+ exec_work(m_functor, iwork);
+ }
+ }
+
+ public:
+ inline void execute() const {
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+ if (execute_in_serial(m_policy.space())) {
+ exec_range(m_functor, m_policy.begin(), m_policy.end());
+ return;
+ }
+
+#ifndef KOKKOS_INTERNAL_DISABLE_NATIVE_OPENMP
+ execute_parallel<Policy>();
+#else
+ constexpr bool is_dynamic =
+ std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value;
+#pragma omp parallel num_threads(m_instance->thread_pool_size())
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+ data.set_work_partition(m_policy.end() - m_policy.begin(),
+ m_policy.chunk_size());
+
+ if (is_dynamic) {
+ // Make sure work partition is set before stealing
+ if (data.pool_rendezvous()) data.pool_rendezvous_release();
+ }
+
+ std::pair<int64_t, int64_t> range(0, 0);
+
+ do {
+ range = is_dynamic ? data.get_work_stealing_chunk()
+ : data.get_work_partition();
+
+ exec_range(m_functor, range.first + m_policy.begin(),
+ range.second + m_policy.begin());
+
+ } while (is_dynamic && 0 <= range.first);
+ }
+#endif
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+ : m_instance(nullptr), m_functor(arg_functor), m_policy(arg_policy) {
+ m_instance = arg_policy.space().impl_internal_space_instance();
+ }
+};
+
+// MDRangePolicy impl
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::OpenMP> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = typename MDRangePolicy::impl_range_policy;
+ using WorkTag = typename MDRangePolicy::work_tag;
+
+ using Member = typename Policy::member_type;
+
+ using index_type = typename Policy::index_type;
+ using iterate_type = typename Kokkos::Impl::HostIterateTile<
+ MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
+
+ OpenMPInternal* m_instance;
+ const iterate_type m_iter;
+
+ inline void exec_range(const Member ibeg, const Member iend) const {
+ KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ m_iter(iwork);
+ }
+ }
+
+ template <class Policy>
+ typename std::enable_if_t<std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value>
+ execute_parallel() const {
+#pragma omp parallel for schedule(dynamic, 1) \
+ num_threads(m_instance->thread_pool_size())
+ KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+ for (index_type iwork = 0; iwork < m_iter.m_rp.m_num_tiles; ++iwork) {
+ m_iter(iwork);
+ }
+ }
+
+ template <class Policy>
+ typename std::enable_if<!std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value>::type
+ execute_parallel() const {
+#pragma omp parallel for schedule(static, 1) \
+ num_threads(m_instance->thread_pool_size())
+ KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+ for (index_type iwork = 0; iwork < m_iter.m_rp.m_num_tiles; ++iwork) {
+ m_iter(iwork);
+ }
+ }
+
+ public:
+ inline void execute() const {
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+
+#ifndef KOKKOS_COMPILER_INTEL
+ if (execute_in_serial(m_iter.m_rp.space())) {
+ exec_range(0, m_iter.m_rp.m_num_tiles);
+ return;
+ }
+#endif
+
+#ifndef KOKKOS_INTERNAL_DISABLE_NATIVE_OPENMP
+ execute_parallel<Policy>();
+#else
+ constexpr bool is_dynamic =
+ std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value;
+
+#pragma omp parallel num_threads(m_instance->thread_pool_size())
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+ data.set_work_partition(m_iter.m_rp.m_num_tiles, 1);
+
+ if (is_dynamic) {
+ // Make sure work partition is set before stealing
+ if (data.pool_rendezvous()) data.pool_rendezvous_release();
+ }
+
+ std::pair<int64_t, int64_t> range(0, 0);
+
+ do {
+ range = is_dynamic ? data.get_work_stealing_chunk()
+ : data.get_work_partition();
+
+ exec_range(range.first, range.second);
+
+ } while (is_dynamic && 0 <= range.first);
+ }
+ // END #pragma omp parallel
+#endif
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, MDRangePolicy arg_policy)
+ : m_instance(nullptr), m_iter(arg_policy, arg_functor) {
+ m_instance = arg_policy.space().impl_internal_space_instance();
+ }
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+ Kokkos::OpenMP> {
+ private:
+ enum { TEAM_REDUCE_SIZE = 512 };
+
+ using Policy =
+ Kokkos::Impl::TeamPolicyInternal<Kokkos::OpenMP, Properties...>;
+ using WorkTag = typename Policy::work_tag;
+ using SchedTag = typename Policy::schedule_type::type;
+ using Member = typename Policy::member_type;
+
+ OpenMPInternal* m_instance;
+ const FunctorType m_functor;
+ const Policy m_policy;
+ const size_t m_shmem_size;
+
+ template <class TagType>
+ inline static std::enable_if_t<(std::is_void<TagType>::value)> exec_team(
+ const FunctorType& functor, HostThreadTeamData& data,
+ const int league_rank_begin, const int league_rank_end,
+ const int league_size) {
+ for (int r = league_rank_begin; r < league_rank_end;) {
+ functor(Member(data, r, league_size));
+
+ if (++r < league_rank_end) {
+ // Don't allow team members to lap one another
+ // so that they don't overwrite shared memory.
+ if (data.team_rendezvous()) {
+ data.team_rendezvous_release();
+ }
+ }
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<(!std::is_void<TagType>::value)> exec_team(
+ const FunctorType& functor, HostThreadTeamData& data,
+ const int league_rank_begin, const int league_rank_end,
+ const int league_size) {
+ const TagType t{};
+
+ for (int r = league_rank_begin; r < league_rank_end;) {
+ functor(t, Member(data, r, league_size));
+
+ if (++r < league_rank_end) {
+ // Don't allow team members to lap one another
+ // so that they don't overwrite shared memory.
+ if (data.team_rendezvous()) {
+ data.team_rendezvous_release();
+ }
+ }
+ }
+ }
+
+ public:
+ inline void execute() const {
+ enum { is_dynamic = std::is_same<SchedTag, Kokkos::Dynamic>::value };
+
+ const size_t pool_reduce_size = 0; // Never shrinks
+ const size_t team_reduce_size = TEAM_REDUCE_SIZE * m_policy.team_size();
+ const size_t team_shared_size = m_shmem_size;
+ const size_t thread_local_size = 0; // Never shrinks
+
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+
+ m_instance->resize_thread_data(pool_reduce_size, team_reduce_size,
+ team_shared_size, thread_local_size);
+
+ if (execute_in_serial(m_policy.space())) {
+ ParallelFor::template exec_team<WorkTag>(
+ m_functor, *(m_instance->get_thread_data()), 0,
+ m_policy.league_size(), m_policy.league_size());
+
+ return;
+ }
+
+#pragma omp parallel num_threads(m_instance->thread_pool_size())
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+ const int active = data.organize_team(m_policy.team_size());
+
+ if (active) {
+ data.set_work_partition(
+ m_policy.league_size(),
+ (0 < m_policy.chunk_size() ? m_policy.chunk_size()
+ : m_policy.team_iter()));
+ }
+
+ if (is_dynamic) {
+ // Must synchronize to make sure each team has set its
+ // partition before beginning the work stealing loop.
+ if (data.pool_rendezvous()) data.pool_rendezvous_release();
+ }
+
+ if (active) {
+ std::pair<int64_t, int64_t> range(0, 0);
+
+ do {
+ range = is_dynamic ? data.get_work_stealing_chunk()
+ : data.get_work_partition();
+
+ ParallelFor::template exec_team<WorkTag>(m_functor, data, range.first,
+ range.second,
+ m_policy.league_size());
+
+ } while (is_dynamic && 0 <= range.first);
+ }
+
+ data.disband_team();
+ }
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_instance(nullptr),
+ m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor, arg_policy.team_size())) {
+ m_instance = arg_policy.space().impl_internal_space_instance();
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#undef KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+#undef KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE
+
+#endif /* KOKKOS_OPENMP_PARALLEL_FOR_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_PARALLEL_REDUCE_HPP
+#define KOKKOS_OPENMP_PARALLEL_REDUCE_HPP
+
+#include <omp.h>
+#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::OpenMP> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ OpenMPInternal* m_instance;
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+ const FunctorType& functor, const Member ibeg, const Member iend,
+ reference_type update) {
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ functor(iwork, update);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+ const FunctorType& functor, const Member ibeg, const Member iend,
+ reference_type update) {
+ const TagType t{};
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ functor(t, iwork, update);
+ }
+ }
+
+ public:
+ inline void execute() const {
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ if (m_policy.end() <= m_policy.begin()) {
+ if (m_result_ptr) {
+ reducer.init(m_result_ptr);
+ reducer.final(m_result_ptr);
+ }
+ return;
+ }
+ enum {
+ is_dynamic = std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value
+ };
+
+ const size_t pool_reduce_bytes = reducer.value_size();
+
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+
+ m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
+ ,
+ 0 // team_shared_bytes
+ ,
+ 0 // thread_local_bytes
+ );
+
+ if (execute_in_serial(m_policy.space())) {
+ const pointer_type ptr =
+ m_result_ptr
+ ? m_result_ptr
+ : pointer_type(
+ m_instance->get_thread_data(0)->pool_reduce_local());
+
+ reference_type update = reducer.init(ptr);
+
+ ParallelReduce::template exec_range<WorkTag>(
+ m_functor_reducer.get_functor(), m_policy.begin(), m_policy.end(),
+ update);
+
+ reducer.final(ptr);
+
+ return;
+ }
+ const int pool_size = m_instance->thread_pool_size();
+#pragma omp parallel num_threads(pool_size)
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+ data.set_work_partition(m_policy.end() - m_policy.begin(),
+ m_policy.chunk_size());
+
+ if (is_dynamic) {
+ // Make sure work partition is set before stealing
+ if (data.pool_rendezvous()) data.pool_rendezvous_release();
+ }
+
+ reference_type update = reducer.init(
+ reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+ std::pair<int64_t, int64_t> range(0, 0);
+
+ do {
+ range = is_dynamic ? data.get_work_stealing_chunk()
+ : data.get_work_partition();
+
+ ParallelReduce::template exec_range<WorkTag>(
+ m_functor_reducer.get_functor(), range.first + m_policy.begin(),
+ range.second + m_policy.begin(), update);
+
+ } while (is_dynamic && 0 <= range.first);
+ }
+
+ // Reduction:
+
+ const pointer_type ptr =
+ pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
+
+ for (int i = 1; i < pool_size; ++i) {
+ reducer.join(ptr,
+ reinterpret_cast<pointer_type>(
+ m_instance->get_thread_data(i)->pool_reduce_local()));
+ }
+
+ reducer.final(ptr);
+
+ if (m_result_ptr) {
+ const int n = reducer.value_count();
+
+ for (int j = 0; j < n; ++j) {
+ m_result_ptr[j] = ptr[j];
+ }
+ }
+ }
+
+ //----------------------------------------
+
+ template <class ViewType>
+ inline ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ Policy arg_policy, const ViewType& arg_view)
+ : m_instance(nullptr),
+ m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_view.data()) {
+ m_instance = arg_policy.space().impl_internal_space_instance();
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::OpenMP reduce result must be a View accessible from "
+ "HostSpace");
+ }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+// MDRangePolicy impl
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>, Kokkos::OpenMP> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = typename MDRangePolicy::impl_range_policy;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using WorkTag = typename MDRangePolicy::work_tag;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ using iterate_type = typename Kokkos::Impl::HostIterateTile<
+ MDRangePolicy, CombinedFunctorReducerType, WorkTag, reference_type>;
+
+ OpenMPInternal* m_instance;
+ const iterate_type m_iter;
+ const pointer_type m_result_ptr;
+
+ inline void exec_range(const Member ibeg, const Member iend,
+ reference_type update) const {
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ m_iter(iwork, update);
+ }
+ }
+
+ public:
+ inline void execute() const {
+ const ReducerType& reducer = m_iter.m_func.get_reducer();
+ const size_t pool_reduce_bytes = reducer.value_size();
+
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+
+ m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
+ ,
+ 0 // team_shared_bytes
+ ,
+ 0 // thread_local_bytes
+ );
+
+#ifndef KOKKOS_COMPILER_INTEL
+ if (execute_in_serial(m_iter.m_rp.space())) {
+ const pointer_type ptr =
+ m_result_ptr
+ ? m_result_ptr
+ : pointer_type(
+ m_instance->get_thread_data(0)->pool_reduce_local());
+
+ reference_type update = reducer.init(ptr);
+
+ ParallelReduce::exec_range(0, m_iter.m_rp.m_num_tiles, update);
+
+ reducer.final(ptr);
+
+ return;
+ }
+#endif
+
+ enum {
+ is_dynamic = std::is_same<typename Policy::schedule_type::type,
+ Kokkos::Dynamic>::value
+ };
+
+ const int pool_size = m_instance->thread_pool_size();
+#pragma omp parallel num_threads(pool_size)
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+ data.set_work_partition(m_iter.m_rp.m_num_tiles, 1);
+
+ if (is_dynamic) {
+ // Make sure work partition is set before stealing
+ if (data.pool_rendezvous()) data.pool_rendezvous_release();
+ }
+
+ reference_type update = reducer.init(
+ reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+ std::pair<int64_t, int64_t> range(0, 0);
+
+ do {
+ range = is_dynamic ? data.get_work_stealing_chunk()
+ : data.get_work_partition();
+
+ ParallelReduce::exec_range(range.first, range.second, update);
+
+ } while (is_dynamic && 0 <= range.first);
+ }
+ // END #pragma omp parallel
+
+ // Reduction:
+
+ const pointer_type ptr =
+ pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
+
+ for (int i = 1; i < pool_size; ++i) {
+ reducer.join(ptr,
+ reinterpret_cast<pointer_type>(
+ m_instance->get_thread_data(i)->pool_reduce_local()));
+ }
+
+ reducer.final(ptr);
+
+ if (m_result_ptr) {
+ const int n = reducer.value_count();
+
+ for (int j = 0; j < n; ++j) {
+ m_result_ptr[j] = ptr[j];
+ }
+ }
+ }
+
+ //----------------------------------------
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ MDRangePolicy arg_policy, const ViewType& arg_view)
+ : m_instance(nullptr),
+ m_iter(arg_policy, arg_functor_reducer),
+ m_result_ptr(arg_view.data()) {
+ m_instance = arg_policy.space().impl_internal_space_instance();
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::OpenMP reduce result must be a View accessible from "
+ "HostSpace");
+ }
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class CombinedFunctorReducerType, class... Properties>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>, Kokkos::OpenMP> {
+ private:
+ enum { TEAM_REDUCE_SIZE = 512 };
+
+ using Policy =
+ Kokkos::Impl::TeamPolicyInternal<Kokkos::OpenMP, Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using WorkTag = typename Policy::work_tag;
+ using SchedTag = typename Policy::schedule_type::type;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ OpenMPInternal* m_instance;
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const int m_shmem_size;
+
+ template <class TagType>
+ inline static std::enable_if_t<(std::is_void<TagType>::value)> exec_team(
+ const FunctorType& functor, HostThreadTeamData& data,
+ reference_type& update, const int league_rank_begin,
+ const int league_rank_end, const int league_size) {
+ for (int r = league_rank_begin; r < league_rank_end;) {
+ functor(Member(data, r, league_size), update);
+
+ if (++r < league_rank_end) {
+ // Don't allow team members to lap one another
+ // so that they don't overwrite shared memory.
+ if (data.team_rendezvous()) {
+ data.team_rendezvous_release();
+ }
+ }
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<(!std::is_void<TagType>::value)> exec_team(
+ const FunctorType& functor, HostThreadTeamData& data,
+ reference_type& update, const int league_rank_begin,
+ const int league_rank_end, const int league_size) {
+ const TagType t{};
+
+ for (int r = league_rank_begin; r < league_rank_end;) {
+ functor(t, Member(data, r, league_size), update);
+
+ if (++r < league_rank_end) {
+ // Don't allow team members to lap one another
+ // so that they don't overwrite shared memory.
+ if (data.team_rendezvous()) {
+ data.team_rendezvous_release();
+ }
+ }
+ }
+ }
+
+ public:
+ inline void execute() const {
+ enum { is_dynamic = std::is_same<SchedTag, Kokkos::Dynamic>::value };
+
+ const ReducerType& reducer = m_functor_reducer.get_reducer();
+
+ if (m_policy.league_size() == 0 || m_policy.team_size() == 0) {
+ if (m_result_ptr) {
+ reducer.init(m_result_ptr);
+ reducer.final(m_result_ptr);
+ }
+ return;
+ }
+
+ const size_t pool_reduce_size = reducer.value_size();
+
+ const size_t team_reduce_size = TEAM_REDUCE_SIZE * m_policy.team_size();
+ const size_t team_shared_size = m_shmem_size + m_policy.scratch_size(1);
+ const size_t thread_local_size = 0; // Never shrinks
+
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+
+ m_instance->resize_thread_data(pool_reduce_size, team_reduce_size,
+ team_shared_size, thread_local_size);
+
+ if (execute_in_serial(m_policy.space())) {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+ pointer_type ptr =
+ m_result_ptr ? m_result_ptr : pointer_type(data.pool_reduce_local());
+ reference_type update = reducer.init(ptr);
+ const int league_rank_begin = 0;
+ const int league_rank_end = m_policy.league_size();
+ ParallelReduce::template exec_team<WorkTag>(
+ m_functor_reducer.get_functor(), data, update, league_rank_begin,
+ league_rank_end, m_policy.league_size());
+
+ reducer.final(ptr);
+
+ return;
+ }
+
+ const int pool_size = m_instance->thread_pool_size();
+#pragma omp parallel num_threads(pool_size)
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+ const int active = data.organize_team(m_policy.team_size());
+
+ if (active) {
+ data.set_work_partition(
+ m_policy.league_size(),
+ (0 < m_policy.chunk_size() ? m_policy.chunk_size()
+ : m_policy.team_iter()));
+ }
+
+ if (is_dynamic) {
+ // Must synchronize to make sure each team has set its
+ // partition before beginning the work stealing loop.
+ if (data.pool_rendezvous()) data.pool_rendezvous_release();
+ }
+
+ if (active) {
+ reference_type update = reducer.init(
+ reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+ std::pair<int64_t, int64_t> range(0, 0);
+
+ do {
+ range = is_dynamic ? data.get_work_stealing_chunk()
+ : data.get_work_partition();
+
+ ParallelReduce::template exec_team<WorkTag>(
+ m_functor_reducer.get_functor(), data, update, range.first,
+ range.second, m_policy.league_size());
+
+ } while (is_dynamic && 0 <= range.first);
+ } else {
+ reducer.init(reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+ }
+
+ data.disband_team();
+
+ // This thread has updated 'pool_reduce_local()' with its
+ // contributions to the reduction. The parallel region is
+ // about to terminate and the master thread will load and
+ // reduce each 'pool_reduce_local()' contribution.
+ // Must 'memory_fence()' to guarantee that storing the update to
+ // 'pool_reduce_local()' will complete before this thread
+ // exits the parallel region.
+
+ memory_fence();
+ }
+
+ // Reduction:
+
+ const pointer_type ptr =
+ pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
+
+ for (int i = 1; i < pool_size; ++i) {
+ reducer.join(ptr,
+ reinterpret_cast<pointer_type>(
+ m_instance->get_thread_data(i)->pool_reduce_local()));
+ }
+
+ reducer.final(ptr);
+
+ if (m_result_ptr) {
+ const int n = reducer.value_count();
+
+ for (int j = 0; j < n; ++j) {
+ m_result_ptr[j] = ptr[j];
+ }
+ }
+ }
+
+ //----------------------------------------
+
+ template <class ViewType>
+ inline ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_instance(nullptr),
+ m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result.data()),
+ m_shmem_size(
+ arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor_reducer.get_functor(), arg_policy.team_size())) {
+ m_instance = arg_policy.space().impl_internal_space_instance();
+
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::OpenMP reduce result must be a View accessible from "
+ "HostSpace");
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* KOKKOS_OPENMP_PARALLEL_REDUCE_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_PARALLEL_SCAN_HPP
+#define KOKKOS_OPENMP_PARALLEL_SCAN_HPP
+
+#include <omp.h>
+#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::OpenMP> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+
+ using Analysis =
+ FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType, void>;
+
+ using WorkTag = typename Policy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+
+ OpenMPInternal* m_instance;
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+ const FunctorType& functor, const Member ibeg, const Member iend,
+ reference_type update, const bool final) {
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ functor(iwork, update, final);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+ const FunctorType& functor, const Member ibeg, const Member iend,
+ reference_type update, const bool final) {
+ const TagType t{};
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ functor(t, iwork, update, final);
+ }
+ }
+
+ public:
+ inline void execute() const {
+ const int value_count = Analysis::value_count(m_functor);
+ const size_t pool_reduce_bytes = 2 * Analysis::value_size(m_functor);
+
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+
+ m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
+ ,
+ 0 // team_shared_bytes
+ ,
+ 0 // thread_local_bytes
+ );
+
+ if (execute_in_serial(m_policy.space())) {
+ typename Analysis::Reducer final_reducer(m_functor);
+
+ reference_type update = final_reducer.init(
+ pointer_type(m_instance->get_thread_data(0)->pool_reduce_local()));
+
+ ParallelScan::template exec_range<WorkTag>(m_functor, m_policy.begin(),
+ m_policy.end(), update, true);
+
+ return;
+ }
+
+#pragma omp parallel num_threads(m_instance->thread_pool_size())
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+ typename Analysis::Reducer final_reducer(m_functor);
+
+ const WorkRange range(m_policy, omp_get_thread_num(),
+ omp_get_num_threads());
+
+ reference_type update_sum = final_reducer.init(
+ reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+ ParallelScan::template exec_range<WorkTag>(
+ m_functor, range.begin(), range.end(), update_sum, false);
+
+ if (data.pool_rendezvous()) {
+ pointer_type ptr_prev = nullptr;
+
+ const int n = omp_get_num_threads();
+
+ for (int i = 0; i < n; ++i) {
+ pointer_type ptr =
+ (pointer_type)data.pool_member(i)->pool_reduce_local();
+
+ if (i) {
+ for (int j = 0; j < value_count; ++j) {
+ ptr[j + value_count] = ptr_prev[j + value_count];
+ }
+ final_reducer.join(ptr + value_count, ptr_prev);
+ } else {
+ final_reducer.init(ptr + value_count);
+ }
+
+ ptr_prev = ptr;
+ }
+
+ data.pool_rendezvous_release();
+ }
+
+ reference_type update_base = final_reducer.reference(
+ reinterpret_cast<pointer_type>(data.pool_reduce_local()) +
+ value_count);
+
+ ParallelScan::template exec_range<WorkTag>(
+ m_functor, range.begin(), range.end(), update_base, true);
+ }
+ }
+
+ //----------------------------------------
+
+ inline ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_instance(nullptr), m_functor(arg_functor), m_policy(arg_policy) {
+ m_instance = arg_policy.space().impl_internal_space_instance();
+ }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+ ReturnType, Kokkos::OpenMP> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+
+ using Analysis = FunctorAnalysis<FunctorPatternInterface::SCAN, Policy,
+ FunctorType, ReturnType>;
+
+ using WorkTag = typename Policy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+
+ using value_type = typename Analysis::value_type;
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+
+ OpenMPInternal* m_instance;
+ const FunctorType m_functor;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+ const FunctorType& functor, const Member ibeg, const Member iend,
+ reference_type update, const bool final) {
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ functor(iwork, update, final);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+ const FunctorType& functor, const Member ibeg, const Member iend,
+ reference_type update, const bool final) {
+ const TagType t{};
+ for (Member iwork = ibeg; iwork < iend; ++iwork) {
+ functor(t, iwork, update, final);
+ }
+ }
+
+ public:
+ inline void execute() const {
+ const int value_count = Analysis::value_count(m_functor);
+ const size_t pool_reduce_bytes = 2 * Analysis::value_size(m_functor);
+
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(m_instance->m_instance_mutex);
+
+ m_instance->resize_thread_data(pool_reduce_bytes, 0 // team_reduce_bytes
+ ,
+ 0 // team_shared_bytes
+ ,
+ 0 // thread_local_bytes
+ );
+
+ if (execute_in_serial(m_policy.space())) {
+ typename Analysis::Reducer final_reducer(m_functor);
+
+ reference_type update = final_reducer.init(
+ pointer_type(m_instance->get_thread_data(0)->pool_reduce_local()));
+
+ this->template exec_range<WorkTag>(m_functor, m_policy.begin(),
+ m_policy.end(), update, true);
+
+ *m_result_ptr = update;
+
+ return;
+ }
+
+#pragma omp parallel num_threads(m_instance->thread_pool_size())
+ {
+ HostThreadTeamData& data = *(m_instance->get_thread_data());
+ typename Analysis::Reducer final_reducer(m_functor);
+
+ const WorkRange range(m_policy, omp_get_thread_num(),
+ omp_get_num_threads());
+ reference_type update_sum = final_reducer.init(
+ reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+ ParallelScanWithTotal::template exec_range<WorkTag>(
+ m_functor, range.begin(), range.end(), update_sum, false);
+
+ if (data.pool_rendezvous()) {
+ pointer_type ptr_prev = nullptr;
+
+ const int n = omp_get_num_threads();
+
+ for (int i = 0; i < n; ++i) {
+ pointer_type ptr =
+ (pointer_type)data.pool_member(i)->pool_reduce_local();
+
+ if (i) {
+ for (int j = 0; j < value_count; ++j) {
+ ptr[j + value_count] = ptr_prev[j + value_count];
+ }
+ final_reducer.join(ptr + value_count, ptr_prev);
+ } else {
+ final_reducer.init(ptr + value_count);
+ }
+
+ ptr_prev = ptr;
+ }
+
+ data.pool_rendezvous_release();
+ }
+
+ reference_type update_base = final_reducer.reference(
+ reinterpret_cast<pointer_type>(data.pool_reduce_local()) +
+ value_count);
+
+ ParallelScanWithTotal::template exec_range<WorkTag>(
+ m_functor, range.begin(), range.end(), update_base, true);
+
+ if (omp_get_thread_num() == omp_get_num_threads() - 1) {
+ *m_result_ptr = update_base;
+ }
+ }
+ }
+
+ //----------------------------------------
+
+ template <class ViewType>
+ ParallelScanWithTotal(const FunctorType& arg_functor,
+ const Policy& arg_policy,
+ const ViewType& arg_result_view)
+ : m_instance(nullptr),
+ m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_view.data()) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::OpenMP parallel_scan result must be host-accessible!");
+ m_instance = arg_policy.space().impl_internal_space_instance();
+ }
+
+ //----------------------------------------
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* KOKKOS_OPENMP_PARALLEL_REDUCE_SCAN_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_OPENMP) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+#include <OpenMP/Kokkos_OpenMP_Task.hpp>
+#include <cassert>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<Kokkos::OpenMP, typename Kokkos::OpenMP::memory_space>;
+
+HostThreadTeamData& HostThreadTeamDataSingleton::singleton() {
+ static HostThreadTeamDataSingleton s;
+ return s;
+}
+
+HostThreadTeamDataSingleton::HostThreadTeamDataSingleton()
+ : HostThreadTeamData() {
+ Kokkos::OpenMP::memory_space space;
+ const size_t num_pool_reduce_bytes = 32;
+ const size_t num_team_reduce_bytes = 32;
+ const size_t num_team_shared_bytes = 1024;
+ const size_t num_thread_local_bytes = 1024;
+ const size_t alloc_bytes = HostThreadTeamData::scratch_size(
+ num_pool_reduce_bytes, num_team_reduce_bytes, num_team_shared_bytes,
+ num_thread_local_bytes);
+
+ void* ptr = space.allocate("Kokkos::Impl::HostThreadTeamData", alloc_bytes);
+
+ HostThreadTeamData::scratch_assign(
+ ptr, alloc_bytes, num_pool_reduce_bytes, num_team_reduce_bytes,
+ num_team_shared_bytes, num_thread_local_bytes);
+}
+
+HostThreadTeamDataSingleton::~HostThreadTeamDataSingleton() {
+ Kokkos::OpenMP::memory_space space;
+ space.deallocate(HostThreadTeamData::scratch_buffer(),
+ static_cast<size_t>(HostThreadTeamData::scratch_bytes()));
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+#else
+void KOKKOS_CORE_SRC_OPENMP_KOKKOS_OPENMP_TASK_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_OPENMP ) && defined( \
+ KOKKOS_ENABLE_TASKDAG ) */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_OPENMP_TASK_HPP
#define KOKKOS_IMPL_OPENMP_TASK_HPP
#include <Kokkos_Macros.hpp>
#if defined(KOKKOS_ENABLE_OPENMP) && defined(KOKKOS_ENABLE_TASKDAG)
+#include <Kokkos_Atomic.hpp>
#include <Kokkos_TaskScheduler_fwd.hpp>
#include <impl/Kokkos_HostThreadTeam.hpp>
-#include <Kokkos_OpenMP.hpp>
+#include <OpenMP/Kokkos_OpenMP.hpp>
+
+#include <impl/Kokkos_TaskTeamMember.hpp>
#include <type_traits>
#include <cassert>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
execution_space().impl_internal_space_instance();
const int pool_size = get_max_team_count(scheduler.get_execution_space());
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(instance->m_instance_mutex);
+
// TODO @tasking @new_feature DSH allow team sizes other than 1
const int team_size = 1; // Threads per core
instance->resize_thread_data(0, /* global reduce buffer */
using task_base_type = typename scheduler_type::task_base;
using queue_type = typename scheduler_type::queue_type;
- if (1 == OpenMP::impl_thread_pool_size()) {
+ execution_space exec;
+ if (1 == exec.impl_thread_pool_size()) {
task_base_type* const end = (task_base_type*)task_base_type::EndTag;
HostThreadTeamData& team_data_single =
Impl::OpenMPInternal* instance =
execution_space().impl_internal_space_instance();
- const int pool_size = OpenMP::impl_thread_pool_size();
+ const int pool_size = instance->thread_pool_size();
+
+ // Serialize kernels on the same execution space instance
+ std::lock_guard<std::mutex> lock(instance->m_instance_mutex);
const int team_size = 1; // Threads per core
instance->resize_thread_data(0 /* global reduce buffer */
0 /* thread local buffer */
);
assert(pool_size % team_size == 0);
+
auto& queue = scheduler.queue();
queue.initialize_team_queues(pool_size / team_size);
// If 0 == m_ready_count then set task = 0
- if (*((volatile int*)&team_queue.m_ready_count) > 0) {
+ if (desul::atomic_load(&team_queue.m_ready_count,
+ desul::MemoryOrderAcquire(),
+ desul::MemoryScopeDevice()) > 0) {
task = end;
// Attempt to acquire a task
// Loop by priority and then type
} // namespace Impl
} // namespace Kokkos
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_OPENMP_TEAM_HPP
#define KOKKOS_OPENMP_TEAM_HPP
using traits = PolicyTraits<Properties...>;
- const typename traits::execution_space& space() const {
- static typename traits::execution_space m_space;
- return m_space;
- }
+ const typename traits::execution_space& space() const { return m_space; }
template <class ExecSpace, class... OtherProperties>
friend class TeamPolicyInternal;
m_chunk_size = p.m_chunk_size;
m_tune_team = p.m_tune_team;
m_tune_vector = p.m_tune_vector;
+ m_space = p.m_space;
}
//----------------------------------------
template <class FunctorType>
int team_size_max(const FunctorType&, const ParallelForTag&) const {
- int pool_size = traits::execution_space::impl_thread_pool_size(1);
+ int pool_size = m_space.impl_thread_pool_size(1);
int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
return pool_size < max_host_team_size ? pool_size : max_host_team_size;
}
template <class FunctorType>
int team_size_max(const FunctorType&, const ParallelReduceTag&) const {
- int pool_size = traits::execution_space::impl_thread_pool_size(1);
+ int pool_size = m_space.impl_thread_pool_size(1);
int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
return pool_size < max_host_team_size ? pool_size : max_host_team_size;
}
}
template <class FunctorType>
int team_size_recommended(const FunctorType&, const ParallelForTag&) const {
- return traits::execution_space::impl_thread_pool_size(2);
+ return m_space.impl_thread_pool_size(2);
}
template <class FunctorType>
int team_size_recommended(const FunctorType&,
const ParallelReduceTag&) const {
- return traits::execution_space::impl_thread_pool_size(2);
+ return m_space.impl_thread_pool_size(2);
}
template <class FunctorType, class ReducerType>
inline int team_size_recommended(const FunctorType& f, const ReducerType&,
bool m_tune_team;
bool m_tune_vector;
+ typename traits::execution_space m_space;
+
inline void init(const int league_size_request, const int team_size_request) {
- const int pool_size = traits::execution_space::impl_thread_pool_size(0);
- const int team_grain = traits::execution_space::impl_thread_pool_size(2);
+ const int pool_size = m_space.impl_thread_pool_size(0);
+ const int team_grain = m_space.impl_thread_pool_size(2);
const int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
const int team_max =
((pool_size < max_host_team_size) ? pool_size : max_host_team_size);
// Round team size up to a multiple of 'team_gain'
const int team_size_grain =
team_grain * ((m_team_size + team_grain - 1) / team_grain);
+
+ // more helpful than "floating point exception occured"
+ if (0 == team_size_grain) {
+ Kokkos::abort("Kokkos::abort: Requested Team Size rounded up to 0!");
+ }
const int team_count = pool_size / team_size_grain;
// Constraint : pool_size = m_team_alloc * team_count
m_team_alloc = pool_size / team_count;
- // Maxumum number of iterations each team will take:
+ // Maximum number of iterations each team will take:
m_team_iter = (m_league_size + team_count - 1) / team_count;
set_auto_chunk_size();
}
/** \brief Specify league size, request team size */
- TeamPolicyInternal(const typename traits::execution_space&,
+ TeamPolicyInternal(const typename traits::execution_space& space,
int league_size_request, int team_size_request,
int /* vector_length_request */ = 1)
: m_team_scratch_size{0, 0},
m_thread_scratch_size{0, 0},
m_chunk_size(0),
m_tune_team(false),
- m_tune_vector(false) {
+ m_tune_vector(false),
+ m_space(space) {
init(league_size_request, team_size_request);
}
- TeamPolicyInternal(const typename traits::execution_space&,
+ TeamPolicyInternal(const typename traits::execution_space& space,
int league_size_request,
const Kokkos::AUTO_t& /* team_size_request */
,
m_thread_scratch_size{0, 0},
m_chunk_size(0),
m_tune_team(true),
- m_tune_vector(false) {
- init(league_size_request,
- traits::execution_space::impl_thread_pool_size(2));
+ m_tune_vector(false),
+ m_space(space) {
+ init(league_size_request, m_space.impl_thread_pool_size(2));
}
- TeamPolicyInternal(const typename traits::execution_space&,
+ TeamPolicyInternal(const typename traits::execution_space& space,
int league_size_request,
const Kokkos::AUTO_t& /* team_size_request */
,
m_thread_scratch_size{0, 0},
m_chunk_size(0),
m_tune_team(true),
- m_tune_vector(true) {
- init(league_size_request,
- traits::execution_space::impl_thread_pool_size(2));
+ m_tune_vector(true),
+ m_space(space) {
+ init(league_size_request, m_space.impl_thread_pool_size(2));
}
- TeamPolicyInternal(const typename traits::execution_space&,
+ TeamPolicyInternal(const typename traits::execution_space& space,
int league_size_request, const int team_size_request,
const Kokkos::AUTO_t& /* vector_length_request */)
: m_team_scratch_size{0, 0},
m_thread_scratch_size{0, 0},
m_chunk_size(0),
m_tune_team(false),
- m_tune_vector(true) {
+ m_tune_vector(true),
+ m_space(space) {
init(league_size_request, team_size_request);
}
m_chunk_size(0),
m_tune_team(true),
m_tune_vector(false) {
- init(league_size_request,
- traits::execution_space::impl_thread_pool_size(2));
+ init(league_size_request, m_space.impl_thread_pool_size(2));
}
TeamPolicyInternal(int league_size_request,
m_chunk_size(0),
m_tune_team(true),
m_tune_vector(true) {
- init(league_size_request,
- traits::execution_space::impl_thread_pool_size(2));
+ init(league_size_request, m_space.impl_thread_pool_size(2));
}
TeamPolicyInternal(int league_size_request, int team_size_request,
private:
/** \brief finalize chunk_size if it was set to AUTO*/
inline void set_auto_chunk_size() {
- int concurrency =
- traits::execution_space::impl_thread_pool_size(0) / m_team_alloc;
+ int concurrency = m_space.impl_thread_pool_size(0) / m_team_alloc;
if (concurrency == 0) concurrency = 1;
if (m_chunk_size > 0) {
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_UNIQUE_TOKEN_HPP
+#define KOKKOS_OPENMP_UNIQUE_TOKEN_HPP
+
+#include <Kokkos_UniqueToken.hpp>
+
+namespace Kokkos::Experimental {
+template <>
+class UniqueToken<OpenMP, UniqueTokenScope::Instance> {
+ public:
+ using execution_space = OpenMP;
+ using size_type = int;
+
+ private:
+ using buffer_type = Kokkos::View<uint32_t*, Kokkos::HostSpace>;
+ execution_space m_exec;
+ size_type m_count;
+ buffer_type m_buffer_view;
+ uint32_t volatile* m_buffer;
+
+ public:
+ /// \brief create object size for concurrency on the given instance
+ ///
+ /// This object should not be shared between instances
+ UniqueToken(execution_space const& exec = execution_space()) noexcept
+ : m_exec(exec),
+ m_count(m_exec.impl_thread_pool_size()),
+ m_buffer_view(buffer_type()),
+ m_buffer(nullptr) {}
+
+ UniqueToken(size_type max_size,
+ execution_space const& exec = execution_space())
+ : m_exec(exec),
+ m_count(max_size),
+ m_buffer_view("UniqueToken::m_buffer_view",
+ ::Kokkos::Impl::concurrent_bitset::buffer_bound(m_count)),
+ m_buffer(m_buffer_view.data()) {}
+
+ /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+ KOKKOS_INLINE_FUNCTION
+ int size() const noexcept {
+ KOKKOS_IF_ON_HOST((return m_count;))
+
+ KOKKOS_IF_ON_DEVICE((return 0;))
+ }
+
+ /// \brief acquire value such that 0 <= value < size()
+ KOKKOS_INLINE_FUNCTION
+ int acquire() const noexcept {
+ KOKKOS_IF_ON_HOST(
+ (if (m_count >= m_exec.impl_thread_pool_size()) return m_exec
+ .impl_thread_pool_rank();
+ const ::Kokkos::pair<int, int> result =
+ ::Kokkos::Impl::concurrent_bitset::acquire_bounded(
+ m_buffer, m_count, ::Kokkos::Impl::clock_tic() % m_count);
+
+ if (result.first < 0) {
+ ::Kokkos::abort(
+ "UniqueToken<OpenMP> failure to acquire tokens, no tokens "
+ "available");
+ }
+
+ return result.first;))
+
+ KOKKOS_IF_ON_DEVICE((return 0;))
+ }
+
+ /// \brief release a value acquired by generate
+ KOKKOS_INLINE_FUNCTION
+ void release(int i) const noexcept {
+ KOKKOS_IF_ON_HOST((if (m_count < m_exec.impl_thread_pool_size()) {
+ ::Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
+ }))
+
+ KOKKOS_IF_ON_DEVICE(((void)i;))
+ }
+};
+
+template <>
+class UniqueToken<OpenMP, UniqueTokenScope::Global> {
+ public:
+ using execution_space = OpenMP;
+ using size_type = int;
+
+ /// \brief create object size for concurrency on the given instance
+ ///
+ /// This object should not be shared between instances
+ UniqueToken(execution_space const& = execution_space()) noexcept {}
+
+ /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+ KOKKOS_INLINE_FUNCTION
+ int size() const noexcept {
+ KOKKOS_IF_ON_HOST(
+ (return Kokkos::Impl::OpenMPInternal::max_hardware_threads();))
+
+ KOKKOS_IF_ON_DEVICE((return 0;))
+ }
+
+ /// \brief acquire value such that 0 <= value < size()
+ // FIXME this is wrong when using nested parallelism. In that case multiple
+ // threads have the same thread ID.
+ KOKKOS_INLINE_FUNCTION
+ int acquire() const noexcept {
+ KOKKOS_IF_ON_HOST((return omp_get_thread_num();))
+
+ KOKKOS_IF_ON_DEVICE((return 0;))
+ }
+
+ /// \brief release a value acquired by generate
+ KOKKOS_INLINE_FUNCTION
+ void release(int) const noexcept {}
+};
+} // namespace Kokkos::Experimental
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP
+#define KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP
+
+#include <OpenMP/Kokkos_OpenMP.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+ Kokkos::OpenMP> {
+ private:
+ using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+
+ Policy m_policy;
+ FunctorType m_functor;
+
+ template <class TagType>
+ std::enable_if_t<std::is_void<TagType>::value> exec_one(
+ const std::int32_t w) const noexcept {
+ m_functor(w);
+ }
+
+ template <class TagType>
+ std::enable_if_t<!std::is_void<TagType>::value> exec_one(
+ const std::int32_t w) const noexcept {
+ const TagType t{};
+ m_functor(t, w);
+ }
+
+ public:
+ inline void execute() {
+ // We need to introduce pool_size to work around NVHPC 22.5 ICE
+ // We need to use [[maybe_unused]] to work around an unused-variable warning
+ // from HIP
+ OpenMP exec;
+ [[maybe_unused]] int pool_size = exec.impl_thread_pool_size();
+#pragma omp parallel num_threads(pool_size)
+ {
+ // Spin until COMPLETED_TOKEN.
+ // END_TOKEN indicates no work is currently available.
+
+ for (std::int32_t w = Policy::END_TOKEN;
+ Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+ if (Policy::END_TOKEN != w) {
+ exec_one<typename Policy::work_tag>(w);
+ m_policy.completed_work(w);
+ }
+ }
+ }
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* #define KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_OPENMPTARGET_HPP
#define KOKKOS_OPENMPTARGET_HPP
#include <cstddef>
#include <iosfwd>
-#include <Kokkos_OpenMPTargetSpace.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTargetSpace.hpp>
#include <Kokkos_ScratchSpace.hpp>
#include <Kokkos_Parallel.hpp>
-#include <Kokkos_TaskScheduler.hpp>
#include <Kokkos_Layout.hpp>
#include <impl/Kokkos_Profiling_Interface.hpp>
#include <impl/Kokkos_InitializationSettings.hpp>
using scratch_memory_space = ScratchMemorySpace<OpenMPTarget>;
- inline static bool in_parallel() { return omp_in_parallel(); }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED inline static bool in_parallel() {
+ return omp_in_parallel();
+ }
+#endif
static void fence(const std::string& name =
"Kokkos::OpenMPTarget::fence: Unnamed Instance Fence");
static void impl_static_fence(const std::string& name);
/** \brief Return the maximum amount of concurrency. */
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
static int concurrency();
+#else
+ int concurrency() const;
+#endif
//! Print configuration information to the given output stream.
void print_configuration(std::ostream& os, bool verbose = false) const;
uint32_t impl_instance_id() const noexcept;
private:
+ friend bool operator==(OpenMPTarget const& lhs, OpenMPTarget const& rhs) {
+ return lhs.impl_internal_space_instance() ==
+ rhs.impl_internal_space_instance();
+ }
+ friend bool operator!=(OpenMPTarget const& lhs, OpenMPTarget const& rhs) {
+ return !(lhs == rhs);
+ }
Impl::OpenMPTargetInternal* m_space_instance;
};
} // namespace Experimental
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
-#include <OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp>
#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp>
-#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel_MDRange.hpp>
-#include <OpenMPTarget/Kokkos_OpenMPTarget_Task.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelFor_MDRange.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelReduce_MDRange.hpp>
/*--------------------------------------------------------------------------*/
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <algorithm>
+#include <omp.h>
+
+/*--------------------------------------------------------------------------*/
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <memory.h>
+
+#include <iostream>
+#include <sstream>
+#include <cstring>
+
+#include <OpenMPTarget/Kokkos_OpenMPTarget.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTargetSpace.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <Kokkos_Atomic.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Experimental {
+/* Default allocation mechanism */
+OpenMPTargetSpace::OpenMPTargetSpace() {}
+
+void* OpenMPTargetSpace::impl_allocate(
+
+ const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ static_assert(sizeof(void*) == sizeof(uintptr_t),
+ "Error sizeof(void*) != sizeof(uintptr_t)");
+
+ void* ptr = omp_target_alloc(arg_alloc_size, omp_get_default_device());
+
+ if (!ptr) {
+ Kokkos::Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+
+ return ptr;
+}
+
+void* OpenMPTargetSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void* OpenMPTargetSpace::allocate(const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+
+void OpenMPTargetSpace::impl_deallocate(
+ const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+ if (arg_alloc_ptr) {
+ omp_target_free(arg_alloc_ptr, omp_get_default_device());
+ }
+}
+
+void OpenMPTargetSpace::deallocate(void* const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void OpenMPTargetSpace::deallocate(const char* arg_label,
+ void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const
+
+{
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::Experimental::OpenMPTargetSpace);
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_OPENMPTARGETSPACE_HPP
+#define KOKKOS_OPENMPTARGETSPACE_HPP
+
+#include <cstring>
+#include <string>
+#include <iosfwd>
+#include <typeinfo>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_DeepCopy.hpp>
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Error.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <omp.h>
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace,
+ Kokkos::Experimental::OpenMPTargetSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = false };
+ enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+ Kokkos::HostSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = false };
+ enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+
+/// \class OpenMPTargetSpace
+/// \brief Memory management for host memory.
+///
+/// OpenMPTargetSpace is a memory space that governs host memory. "Host"
+/// memory means the usual CPU-accessible memory.
+class OpenMPTargetSpace {
+ public:
+ //! Tag this class as a kokkos memory space
+ using memory_space = OpenMPTargetSpace;
+ using size_type = unsigned;
+
+ /// \typedef execution_space
+ /// \brief Default execution space for this memory space.
+ ///
+ /// Every memory space has a default execution space. This is
+ /// useful for things like initializing a View (which happens in
+ /// parallel using the View's default execution space).
+ using execution_space = Kokkos::Experimental::OpenMPTarget;
+
+ //! This memory space preferred device_type
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+
+ /*--------------------------------*/
+
+ /**\brief Default memory space instance */
+ OpenMPTargetSpace();
+ OpenMPTargetSpace(OpenMPTargetSpace&& rhs) = default;
+ OpenMPTargetSpace(const OpenMPTargetSpace& rhs) = default;
+ OpenMPTargetSpace& operator=(OpenMPTargetSpace&&) = default;
+ OpenMPTargetSpace& operator=(const OpenMPTargetSpace&) = default;
+ ~OpenMPTargetSpace() = default;
+
+ /**\brief Allocate untracked memory in the space */
+ // FIXME_OPENMPTARGET Use execution space instance
+ void* allocate(const OpenMPTarget&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ // FIXME_OPENMPTARGET Use execution space instance
+ void* allocate(const OpenMPTarget&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+ void* allocate(const size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ /**\brief Deallocate untracked memory in the space */
+ void deallocate(void* const arg_alloc_ptr,
+ const std::size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ static constexpr const char* name() { return "OpenMPTargetSpace"; }
+
+ private:
+ void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+ void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0,
+ const Kokkos::Tools::SpaceHandle =
+ Kokkos::Tools::make_space_handle(name())) const;
+};
+} // namespace Experimental
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_SPECIALIZATION(
+ Kokkos::Experimental::OpenMPTargetSpace);
+
+#endif
+#endif /* #define KOKKOS_OPENMPTARGETSPACE_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_ABORT_HPP
+#define KOKKOS_OPENMPTARGET_ABORT_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+
+namespace Kokkos {
+namespace Impl {
+
+KOKKOS_INLINE_FUNCTION void OpenMPTarget_abort(char const *msg) {
+ fprintf(stderr, "%s.\n", msg);
+ std::abort();
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_OPENMPTARGET_DEEP_COPY_HPP
+#define KOKKOS_OPENMPTARGET_DEEP_COPY_HPP
+
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Error.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+// TODO: implement all possible deep_copies
+template <class ExecutionSpace>
+struct DeepCopy<Kokkos::Experimental::OpenMPTargetSpace,
+ Kokkos::Experimental::OpenMPTargetSpace, ExecutionSpace> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ // In the Release and RelWithDebInfo builds, the size of the memcpy should
+ // be greater than zero to avoid error. omp_target_memcpy returns zero on
+ // success.
+ if (n > 0)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ dst, const_cast<void*>(src), n, 0, 0, omp_get_default_device(),
+ omp_get_default_device()));
+ }
+ DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+ exec.fence(
+ "Kokkos::Impl::DeepCopy<OpenMPTargetSpace, OpenMPTargetSpace>: fence "
+ "before "
+ "copy");
+ if (n > 0)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ dst, const_cast<void*>(src), n, 0, 0, omp_get_default_device(),
+ omp_get_default_device()));
+ }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<Kokkos::Experimental::OpenMPTargetSpace, HostSpace,
+ ExecutionSpace> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ if (n > 0)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ dst, const_cast<void*>(src), n, 0, 0, omp_get_default_device(),
+ omp_get_initial_device()));
+ }
+ DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+ exec.fence(
+ "Kokkos::Impl::DeepCopy<OpenMPTargetSpace, HostSpace>: fence before "
+ "copy");
+ if (n > 0)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ dst, const_cast<void*>(src), n, 0, 0, omp_get_default_device(),
+ omp_get_initial_device()));
+ }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<HostSpace, Kokkos::Experimental::OpenMPTargetSpace,
+ ExecutionSpace> {
+ DeepCopy(void* dst, const void* src, size_t n) {
+ if (n > 0)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ dst, const_cast<void*>(src), n, 0, 0, omp_get_initial_device(),
+ omp_get_default_device()));
+ }
+ DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+ exec.fence(
+ "Kokkos::Impl::DeepCopy<HostSpace, OpenMPTargetSpace>: fence before "
+ "copy");
+ if (n > 0)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ dst, const_cast<void*>(src), n, 0, 0, omp_get_initial_device(),
+ omp_get_default_device()));
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // KOKKOS_OPENMPTARGET_DEEP_COPY_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_ERROR_HPP
+#define KOKKOS_OPENMPTARGET_ERROR_HPP
+
+#include <impl/Kokkos_Error.hpp>
+#include <sstream>
+
+namespace Kokkos {
+namespace Impl {
+
+inline void ompt_internal_safe_call(int e, const char* name,
+ const char* file = nullptr,
+ const int line = 0) {
+ if (e != 0) {
+ std::ostringstream out;
+ out << name << " return value of " << e << " indicates failure";
+ if (file) {
+ out << " " << file << ":" << line;
+ }
+ throw_runtime_exception(out.str());
+ }
+}
+
+#define KOKKOS_IMPL_OMPT_SAFE_CALL(call) \
+ Kokkos::Impl::ompt_internal_safe_call(call, #call, __FILE__, __LINE__)
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_FUNCTOR_ADAPTER_HPP
+#define KOKKOS_OPENMPTARGET_FUNCTOR_ADAPTER_HPP
+
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Macros.hpp>
+#include <type_traits>
+
+namespace Kokkos::Experimental::Impl {
+
+template <class Functor, class Policy>
+class FunctorAdapter {
+ Functor m_functor;
+ using WorkTag = typename Policy::work_tag;
+
+ public:
+ FunctorAdapter() = default;
+ FunctorAdapter(Functor const &functor) : m_functor(functor) {}
+
+ Functor get_functor() const { return m_functor; }
+
+ template <class... Args>
+ KOKKOS_FUNCTION void operator()(Args &&...args) const {
+ if constexpr (std::is_void_v<WorkTag>) {
+ m_functor(static_cast<Args &&>(args)...);
+ } else {
+ m_functor(WorkTag(), static_cast<Args &&>(args)...);
+ }
+ }
+};
+
+} // namespace Kokkos::Experimental::Impl
+
+#endif // KOKKOS_OPENMPTARGET_FUNCTOR_ADAPTER_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#endif
#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(_OPENMP)
// constructor. undef'ed at the end
#define KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
-#include <Kokkos_OpenMPTarget.hpp>
+#include <Kokkos_Core.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget.hpp>
#include <OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp>
#include <OpenMPTarget/Kokkos_OpenMPTarget_Instance.hpp>
#include <impl/Kokkos_ExecSpaceManager.hpp>
[&]() {});
}
}
-int OpenMPTargetInternal::concurrency() { return 128000; }
+int OpenMPTargetInternal::concurrency() const {
+ int max_threads = 2048 * 80;
+#if defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ int max_threads_sm = 2048;
+#if defined(KOKKOS_ARCH_AMPERE86)
+ max_threads = max_threads_sm * 84;
+#elif defined(KOKKOS_ARCH_AMPERE80)
+ max_threads = max_threads_sm * 108;
+#elif defined(KOKKOS_ARCH_VOLTA72)
+ max_threads = max_threads_sm * 84;
+#elif defined(KOKKOS_ARCH_VOLTA70)
+ max_threads = max_threads_sm * 80;
+#elif defined(KOKKOS_ARCH_PASCAL60) || defined(KOKKOS_ARCH_PASCAL61)
+ max_threads = max_threads_sm * 60;
+#endif
+#elif defined(KOKKOS_ARCH_INTEL_GPU)
+#pragma omp target map(max_threads)
+ { max_threads = omp_get_num_procs(); }
+
+ // Multiply the number of processors with the SIMD length.
+ max_threads *= 32;
+#endif
+
+ return max_threads;
+}
const char* OpenMPTargetInternal::name() { return "OpenMPTarget"; }
void OpenMPTargetInternal::print_configuration(std::ostream& os,
bool /*verbose*/) const {
// FIXME_OPENMPTARGET
os << "Using OpenMPTarget\n";
+#if defined(KOKKOS_IMPL_OPENMPTARGET_HIERARCHICAL_INTEL_GPU)
+ os << "Defined KOKKOS_IMPL_OPENMPTARGET_HIERARCHICAL_INTEL_GPU: Workaround "
+ "for "
+ "hierarchical parallelism for Intel GPUs.";
+#endif
}
void OpenMPTargetInternal::impl_finalize() {
m_is_initialized = false;
- Kokkos::Impl::OpenMPTargetExec space;
- if (space.m_lock_array != nullptr) space.clear_lock_array();
- if (space.m_uniquetoken_ptr != nullptr)
+ if (m_uniquetoken_ptr != nullptr)
Kokkos::kokkos_free<Kokkos::Experimental::OpenMPTargetSpace>(
- space.m_uniquetoken_ptr);
+ m_uniquetoken_ptr);
}
+
void OpenMPTargetInternal::impl_initialize() {
m_is_initialized = true;
// FIXME_OPENMPTARGET: Only fix the number of teams for NVIDIA architectures
// from Pascal and upwards.
-#if defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) || \
- defined(KOKKOS_ARCH_TURING75) || defined(KOKKOS_ARCH_AMPERE)
-#if defined(KOKKOS_COMPILER_CLANG) && (KOKKOS_COMPILER_CLANG >= 1300)
+ // FIXME_OPENMPTARGTE: Cray compiler did not yet implement omp_set_num_teams.
+#if !defined(KOKKOS_COMPILER_CRAY_LLVM)
+#if defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU) && defined(KOKKOS_COMPILER_CLANG) && \
+ (KOKKOS_COMPILER_CLANG >= 1300)
omp_set_num_teams(512);
#endif
#endif
return &self;
}
-} // Namespace Impl
+void OpenMPTargetInternal::verify_is_process(const char* const label) {
+ // Fails if the current task is in a parallel region or is not on the host.
+ if (omp_in_parallel() && (!omp_is_initial_device())) {
+ std::string msg(label);
+ msg.append(" ERROR: in parallel or on device");
+ Kokkos::Impl::throw_runtime_exception(msg);
+ }
+}
+
+void OpenMPTargetInternal::verify_initialized(const char* const label) {
+ if (0 == Kokkos::Experimental::OpenMPTarget().impl_is_initialized()) {
+ std::string msg(label);
+ msg.append(" ERROR: not initialized");
+ Kokkos::Impl::throw_runtime_exception(msg);
+ }
+}
+
+void OpenMPTargetInternal::clear_scratch() {
+ Kokkos::Experimental::OpenMPTargetSpace space;
+ space.deallocate(m_scratch_ptr, m_scratch_size);
+ m_scratch_ptr = nullptr;
+ m_scratch_size = 0;
+}
+
+void* OpenMPTargetInternal::get_scratch_ptr() { return m_scratch_ptr; }
+
+void OpenMPTargetInternal::resize_scratch(int64_t team_size,
+ int64_t shmem_size_L0,
+ int64_t shmem_size_L1,
+ int64_t league_size) {
+ Kokkos::Experimental::OpenMPTargetSpace space;
+ // Level-0 scratch when using clang/17 and higher comes from their OpenMP
+ // extension, `ompx_dyn_cgroup_mem`.
+#if defined(KOKKOS_IMPL_OPENMPTARGET_LLVM_EXTENSIONS)
+ shmem_size_L0 = 0;
+#endif
+ const int64_t shmem_size =
+ shmem_size_L0 + shmem_size_L1; // L0 + L1 scratch memory per team.
+ const int64_t padding = shmem_size * 10 / 100; // Padding per team.
+
+ // Maximum active teams possible.
+ // The number should not exceed the maximum in-flight teams possible or the
+ // league_size.
+ int max_active_teams =
+ std::min(OpenMPTargetInternal::concurrency() / team_size, league_size);
+
+ // max_active_teams is the number of active teams on the given hardware.
+ // We set the number of teams to be twice the number of max_active_teams for
+ // the compiler to pick the right number in its case.
+ // FIXME_OPENMPTARGET: Cray compiler did not yet implement omp_set_num_teams.
+#if !defined(KOKKOS_COMPILER_CRAY_LLVM)
+ omp_set_num_teams(max_active_teams * 2);
+#endif
+
+ // Total amount of scratch memory allocated is depenedent
+ // on the maximum number of in-flight teams possible.
+ int64_t total_size =
+ (shmem_size +
+ ::Kokkos::Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE + padding) *
+ max_active_teams * 2;
+
+ if (total_size > m_scratch_size) {
+ space.deallocate(m_scratch_ptr, m_scratch_size);
+ m_scratch_size = total_size;
+ m_scratch_ptr = space.allocate(total_size);
+ }
+}
+
+} // namespace Impl
OpenMPTarget::OpenMPTarget()
: m_space_instance(Impl::OpenMPTargetInternal::impl_singleton()) {}
return m_space_instance->impl_get_instance_id();
}
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
int OpenMPTarget::concurrency() {
return Impl::OpenMPTargetInternal::impl_singleton()->concurrency();
}
+#else
+int OpenMPTarget::concurrency() const {
+ return m_space_instance->concurrency();
+}
+#endif
void OpenMPTarget::fence(const std::string& name) {
Impl::OpenMPTargetInternal::impl_singleton()->fence(name);
name, Kokkos::Experimental::Impl::openmp_fence_is_static::yes);
}
-void OpenMPTarget::impl_initialize(InitializationSettings const&) {
+void OpenMPTarget::impl_initialize(InitializationSettings const& settings) {
+ using Kokkos::Impl::get_visible_devices;
+ std::vector<int> const& visible_devices = get_visible_devices();
+ using Kokkos::Impl::get_gpu;
+ const int device_num = get_gpu(settings).value_or(visible_devices[0]);
+ omp_set_default_device(device_num);
+
Impl::OpenMPTargetInternal::impl_singleton()->impl_initialize();
}
void OpenMPTarget::impl_finalize() {
UniqueToken<Kokkos::Experimental::OpenMPTarget,
Kokkos::Experimental::UniqueTokenScope::Global>::
- UniqueToken(Kokkos::Experimental::OpenMPTarget const&) {
+ UniqueToken(Kokkos::Experimental::OpenMPTarget const& space) {
#ifdef KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
- uint32_t* ptr = Kokkos::Impl::OpenMPTargetExec::m_uniquetoken_ptr;
+ uint32_t* ptr = space.impl_internal_space_instance()->m_uniquetoken_ptr;
int count = Kokkos::Experimental::OpenMPTarget().concurrency();
if (ptr == nullptr) {
int size = count * sizeof(uint32_t);
Kokkos::kokkos_malloc<Kokkos::Experimental::OpenMPTargetSpace>(
"Kokkos::OpenMPTarget::m_uniquetoken_ptr", size));
std::vector<uint32_t> h_buf(count, 0);
- OMPT_SAFE_CALL(omp_target_memcpy(ptr, h_buf.data(), size, 0, 0,
- omp_get_default_device(),
- omp_get_initial_device()));
+ if (0 < size)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(ptr, h_buf.data(), size, 0,
+ 0, omp_get_default_device(),
+ omp_get_initial_device()));
- Kokkos::Impl::OpenMPTargetExec::m_uniquetoken_ptr = ptr;
+ space.impl_internal_space_instance()->m_uniquetoken_ptr = ptr;
}
#else
// FIXME_OPENMPTARGET - 2 versions of non-working implementations to fill `ptr`
// Version 1 - Creating a target region and filling the
// pointer Error - CUDA error: named symbol not found
#pragma omp target teams distribute parallel for is_device_ptr(ptr) \
- map(to \
- : size)
+ map(to : size)
for (int i = 0; i < count; ++i) ptr[i] = 0;
// Version 2 : Allocating a view on the device and filling it with a scalar
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_INSTANCE_HPP
+#define KOKKOS_OPENMPTARGET_INSTANCE_HPP
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+enum class openmp_fence_is_static { yes, no };
+
+class OpenMPTargetInternal {
+ private:
+ OpenMPTargetInternal() = default;
+ OpenMPTargetInternal(const OpenMPTargetInternal&) = delete;
+ OpenMPTargetInternal& operator=(const OpenMPTargetInternal&) = delete;
+
+ public:
+ void fence(openmp_fence_is_static is_static = openmp_fence_is_static::no);
+ void fence(const std::string& name,
+ openmp_fence_is_static is_static = openmp_fence_is_static::no);
+
+ /** \brief Return the maximum amount of concurrency. */
+ int concurrency() const;
+
+ //! Print configuration information to the given output stream.
+ void print_configuration(std::ostream& os, bool verbose) const;
+
+ static const char* name();
+
+ //! Free any resources being consumed by the device.
+ void impl_finalize();
+
+ //! Has been initialized
+ int impl_is_initialized();
+ uint32_t impl_get_instance_id() const noexcept;
+ //! Initialize, telling the CUDA run-time library which device to use.
+ void impl_initialize();
+
+ static OpenMPTargetInternal* impl_singleton();
+
+ static void verify_is_process(const char* const);
+ static void verify_initialized(const char* const);
+
+ void* get_scratch_ptr();
+ void clear_scratch();
+ void resize_scratch(int64_t team_reduce_bytes, int64_t team_shared_bytes,
+ int64_t thread_local_bytes, int64_t league_size);
+
+ void* m_scratch_ptr = nullptr;
+ std::mutex m_mutex_scratch_ptr;
+ int64_t m_scratch_size = 0;
+ uint32_t* m_uniquetoken_ptr = nullptr;
+
+ private:
+ bool m_is_initialized = false;
+ uint32_t m_instance_id = Kokkos::Tools::Experimental::Impl::idForInstance<
+ Kokkos::Experimental::OpenMPTarget>(reinterpret_cast<uintptr_t>(this));
+};
+} // Namespace Impl
+} // Namespace Experimental
+} // Namespace Kokkos
+
+#endif // KOKKOS_OPENMPTARGET_INSTANCE_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_MDRANGEPOLICY_HPP_
+#define KOKKOS_OPENMPTARGET_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+using OpenMPTargetIterateLeft = std::integral_constant<Iterate, Iterate::Left>;
+using OpenMPTargetIterateRight =
+ std::integral_constant<Iterate, Iterate::Right>;
+
+template <typename Rank,
+ ::Kokkos::Impl::TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, Kokkos::Experimental::OpenMPTarget,
+ ThreadAndVector>
+ : AcceleratorBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_MACROS_HPP
+#define KOKKOS_OPENMPTARGET_MACROS_HPP
+
+// Intel architectures prefer the classical hierarchical parallelism that relies
+// on OpenMP.
+#if defined(KOKKOS_ARCH_INTEL_GPU)
+#define KOKKOS_IMPL_OPENMPTARGET_HIERARCHICAL_INTEL_GPU
+#endif
+
+// Define a macro for llvm compiler greater than version 17 and on NVIDIA and
+// AMD GPUs. This would be useful in cases where non-OpenMP standard llvm
+// extensions can be used.
+#if defined(KOKKOS_COMPILER_CLANG) && (KOKKOS_COMPILER_CLANG >= 1700) && \
+ (defined(KOKKOS_ARCH_AMD_GPU) || defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU))
+#define KOKKOS_IMPL_OPENMPTARGET_LLVM_EXTENSIONS
+#endif
+
+#define KOKKOS_IMPL_OPENMPTARGET_PRAGMA_HELPER(x) _Pragma(#x)
+#define KOKKOS_IMPL_OMPTARGET_PRAGMA(x) \
+ KOKKOS_IMPL_OPENMPTARGET_PRAGMA_HELPER(omp target x)
+
+// Use scratch memory extensions to request dynamic shared memory for the
+// right compiler/architecture combination.
+#ifdef KOKKOS_IMPL_OPENMPTARGET_LLVM_EXTENSIONS
+#define KOKKOS_IMPL_OMPX_DYN_CGROUP_MEM(N) ompx_dyn_cgroup_mem(N)
+#else
+#define KOKKOS_IMPL_OMPX_DYN_CGROUP_MEM(N)
+#endif
+
+#endif // KOKKOS_OPENMPTARGET_MACROS_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLEL_HPP
+#define KOKKOS_OPENMPTARGET_PARALLEL_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include <impl/Kokkos_Traits.hpp>
+
+#include <Kokkos_Atomic.hpp>
+#include "Kokkos_OpenMPTarget_Abort.hpp"
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Macros.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+class OpenMPTargetExecTeamMember {
+ public:
+ static constexpr int TEAM_REDUCE_SIZE = 512;
+
+ using execution_space = Kokkos::Experimental::OpenMPTarget;
+ using scratch_memory_space = execution_space::scratch_memory_space;
+ using team_handle = OpenMPTargetExecTeamMember;
+
+ scratch_memory_space m_team_shared;
+ size_t m_team_scratch_size[2];
+ int m_team_rank;
+ int m_team_size;
+ int m_league_rank;
+ int m_league_size;
+ int m_vector_length;
+ int m_vector_lane;
+ int m_shmem_block_index;
+ void* m_glb_scratch;
+ void* m_reduce_scratch;
+
+ public:
+ KOKKOS_INLINE_FUNCTION
+ const execution_space::scratch_memory_space& team_shmem() const {
+ return m_team_shared.set_team_thread_mode(0, 1, 0);
+ }
+
+ // set_team_thread_mode routine parameters for future understanding:
+ // first parameter - scratch level.
+ // second parameter - size multiplier for advancing scratch ptr after a
+ // request was serviced. third parameter - offset size multiplier from current
+ // scratch ptr when returning a ptr for a request.
+ KOKKOS_INLINE_FUNCTION
+ const execution_space::scratch_memory_space& team_scratch(int level) const {
+ return m_team_shared.set_team_thread_mode(level, 1, 0);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const execution_space::scratch_memory_space& thread_scratch(int level) const {
+ return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
+ }
+
+ KOKKOS_INLINE_FUNCTION int league_rank() const { return m_league_rank; }
+ KOKKOS_INLINE_FUNCTION int league_size() const { return m_league_size; }
+ KOKKOS_INLINE_FUNCTION int team_rank() const { return m_team_rank; }
+ KOKKOS_INLINE_FUNCTION int team_size() const { return m_team_size; }
+ KOKKOS_INLINE_FUNCTION void* impl_reduce_scratch() const {
+ return m_reduce_scratch;
+ }
+
+ KOKKOS_INLINE_FUNCTION void team_barrier() const {
+#pragma omp barrier
+ }
+
+ template <class ValueType>
+ KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType& value,
+ int thread_id) const {
+ // Make sure there is enough scratch space:
+ using type = std::conditional_t<(sizeof(ValueType) < TEAM_REDUCE_SIZE),
+ ValueType, void>;
+ type* team_scratch =
+ reinterpret_cast<type*>(static_cast<char*>(m_glb_scratch) +
+ TEAM_REDUCE_SIZE * omp_get_team_num());
+#pragma omp barrier
+ if (team_rank() == thread_id) *team_scratch = value;
+#pragma omp barrier
+ value = *team_scratch;
+ }
+
+ template <class Closure, class ValueType>
+ KOKKOS_INLINE_FUNCTION void team_broadcast(const Closure& f, ValueType& value,
+ const int& thread_id) const {
+ f(value);
+ team_broadcast(value, thread_id);
+ }
+
+ template <typename ReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ team_reduce(ReducerType const& reducer) const noexcept {
+ team_reduce(reducer, reducer.reference());
+ }
+
+ // FIXME_OPENMPTARGET this function currently ignores the reducer passed.
+ template <typename ReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ team_reduce(ReducerType const&,
+ typename ReducerType::value_type& value) const noexcept {
+#pragma omp barrier
+
+ using value_type = typename ReducerType::value_type;
+ // const JoinLambdaAdapter<value_type, JoinOp> op(op_in);
+
+ // Make sure there is enough scratch space:
+ using type = std::conditional_t<(sizeof(value_type) < TEAM_REDUCE_SIZE),
+ value_type, void>;
+
+ const int n_values = TEAM_REDUCE_SIZE / sizeof(value_type);
+ type* team_scratch =
+ reinterpret_cast<type*>(static_cast<char*>(m_glb_scratch) +
+ TEAM_REDUCE_SIZE * omp_get_team_num());
+ for (int i = m_team_rank; i < n_values; i += m_team_size) {
+ team_scratch[i] = value_type();
+ }
+
+#pragma omp barrier
+
+ for (int k = 0; k < m_team_size; k += n_values) {
+ if ((k <= m_team_rank) && (k + n_values > m_team_rank))
+ team_scratch[m_team_rank % n_values] += value;
+#pragma omp barrier
+ }
+
+ for (int d = 1; d < n_values; d *= 2) {
+ if ((m_team_rank + d < n_values) && (m_team_rank % (2 * d) == 0)) {
+ team_scratch[m_team_rank] += team_scratch[m_team_rank + d];
+ }
+#pragma omp barrier
+ }
+ value = team_scratch[0];
+ }
+
+ /** \brief Intra-team exclusive prefix sum with team_rank() ordering
+ * with intra-team non-deterministic ordering accumulation.
+ *
+ * The global inter-team accumulation value will, at the end of the
+ * league's parallel execution, be the scan's total.
+ * Parallel execution ordering of the league's teams is non-deterministic.
+ * As such the base value for each team's scan operation is similarly
+ * non-deterministic.
+ */
+ template <typename ArgType>
+ KOKKOS_INLINE_FUNCTION ArgType
+ team_scan(const ArgType& /*value*/, ArgType* const /*global_accum*/) const {
+ // FIXME_OPENMPTARGET
+ /* // Make sure there is enough scratch space:
+ using type =
+ std::conditional_t<(sizeof(ArgType) < TEAM_REDUCE_SIZE), ArgType, void>;
+
+ volatile type * const work_value = ((type*) m_exec.scratch_thread());
+
+ *work_value = value ;
+
+ memory_fence();
+
+ if ( team_fan_in() ) {
+ // The last thread to synchronize returns true, all other threads wait
+ for team_fan_out()
+ // m_team_base[0] == highest ranking team member
+ // m_team_base[ m_team_size - 1 ] == lowest ranking team member
+ //
+ // 1) copy from lower to higher rank, initialize lowest rank to zero
+ // 2) prefix sum from lowest to highest rank, skipping lowest rank
+
+ type accum = 0 ;
+
+ if ( global_accum ) {
+ for ( int i = m_team_size ; i-- ; ) {
+ type & val = *((type*) m_exec.pool_rev( m_team_base_rev + i
+ )->scratch_thread()); accum += val ;
+ }
+ accum = atomic_fetch_add( global_accum , accum );
+ }
+
+ for ( int i = m_team_size ; i-- ; ) {
+ type & val = *((type*) m_exec.pool_rev( m_team_base_rev + i
+ )->scratch_thread()); const type offset = accum ; accum += val ; val =
+ offset ;
+ }
+
+ memory_fence();
+ }
+
+ team_fan_out();
+
+ return *work_value ;*/
+ return ArgType();
+ }
+
+ /** \brief Intra-team exclusive prefix sum with team_rank() ordering.
+ *
+ * The highest rank thread can compute the reduction total as
+ * reduction_total = dev.team_scan( value ) + value ;
+ */
+ template <typename Type>
+ KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value) const {
+ return this->template team_scan<Type>(value, 0);
+ }
+
+ //----------------------------------------
+ // Private for the driver
+
+ private:
+ using space = execution_space::scratch_memory_space;
+
+ public:
+ // FIXME_OPENMPTARGET - 512(16*32) bytes at the begining of the scratch space
+ // for each league is saved for reduction. It should actually be based on the
+ // ValueType of the reduction variable.
+ inline OpenMPTargetExecTeamMember(
+ const int league_rank, const int league_size, const int team_size,
+ const int vector_length // const TeamPolicyInternal< OpenMPTarget,
+ // Properties ...> & team
+ ,
+ void* const glb_scratch, const int shmem_block_index,
+ const size_t shmem_size_L0, const size_t shmem_size_L1)
+ : m_team_scratch_size{shmem_size_L0, shmem_size_L1},
+ m_team_rank(0),
+ m_team_size(team_size),
+ m_league_rank(league_rank),
+ m_league_size(league_size),
+ m_vector_length(vector_length),
+ m_shmem_block_index(shmem_block_index),
+ m_glb_scratch(glb_scratch) {
+ const int omp_tid = omp_get_thread_num();
+
+ // The scratch memory allocated is a sum of TEAM_REDUCE_SIZE, L0 shmem size
+ // and L1 shmem size. TEAM_REDUCE_SIZE = 512 bytes saved per team for
+ // hierarchical reduction. There is an additional 10% of the requested
+ // scratch memory allocated per team as padding. Hence the product with 0.1.
+ //
+ // Use llvm extensions for dynamic shared memory with compilers/architecture
+ // combinations where it is supported.
+ //
+ // Size allocated in HBM will now change based on whether we use llvm
+ // extensions.
+#if defined(KOKKOS_IMPL_OPENMPTARGET_LLVM_EXTENSIONS)
+ const int total_shmem = shmem_size_L1 + shmem_size_L1 * 0.1;
+#else
+ const int total_shmem =
+ shmem_size_L0 + shmem_size_L1 + (shmem_size_L0 + shmem_size_L1) * 0.1;
+#endif
+
+ // Per team offset for buffer in HBM.
+ const int reduce_offset =
+ m_shmem_block_index * (total_shmem + TEAM_REDUCE_SIZE);
+
+#if defined(KOKKOS_IMPL_OPENMPTARGET_LLVM_EXTENSIONS)
+ const int l1_offset = reduce_offset + TEAM_REDUCE_SIZE;
+ char* l0_scratch =
+ static_cast<char*>(llvm_omp_target_dynamic_shared_alloc());
+ m_team_shared = scratch_memory_space(
+ l0_scratch, shmem_size_L0, static_cast<char*>(glb_scratch) + l1_offset,
+ shmem_size_L1);
+#else
+ const int l0_offset = reduce_offset + TEAM_REDUCE_SIZE;
+ const int l1_offset = l0_offset + shmem_size_L0;
+ m_team_shared = scratch_memory_space(
+ (static_cast<char*>(glb_scratch) + l0_offset), shmem_size_L0,
+ static_cast<char*>(glb_scratch) + l1_offset, shmem_size_L1);
+#endif
+ m_reduce_scratch = static_cast<char*>(glb_scratch) + reduce_offset;
+ m_league_rank = league_rank;
+ m_team_rank = omp_tid;
+ m_vector_lane = 0;
+ }
+
+ static inline int team_reduce_size() { return TEAM_REDUCE_SIZE; }
+};
+
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget, Properties...>
+ : public PolicyTraits<Properties...> {
+ public:
+ //! Tag this class as a kokkos execution policy
+ using execution_policy = TeamPolicyInternal;
+
+ using traits = PolicyTraits<Properties...>;
+
+ //----------------------------------------
+
+ template <class FunctorType>
+ inline static int team_size_max(const FunctorType&, const ParallelForTag&) {
+ return 256;
+ }
+
+ template <class FunctorType>
+ inline static int team_size_max(const FunctorType&,
+ const ParallelReduceTag&) {
+ return 256;
+ }
+
+ template <class FunctorType, class ReducerType>
+ inline static int team_size_max(const FunctorType&, const ReducerType&,
+ const ParallelReduceTag&) {
+ return 256;
+ }
+
+ template <class FunctorType>
+ inline static int team_size_recommended(const FunctorType&,
+ const ParallelForTag&) {
+ return 128;
+ }
+
+ template <class FunctorType>
+ inline static int team_size_recommended(const FunctorType&,
+ const ParallelReduceTag&) {
+ return 128;
+ }
+
+ template <class FunctorType, class ReducerType>
+ inline static int team_size_recommended(const FunctorType&,
+ const ReducerType&,
+ const ParallelReduceTag&) {
+ return 128;
+ }
+
+ //----------------------------------------
+
+ private:
+ int m_league_size;
+ int m_team_size;
+ int m_vector_length;
+ int m_team_alloc;
+ int m_team_iter;
+ std::array<size_t, 2> m_team_scratch_size;
+ std::array<size_t, 2> m_thread_scratch_size;
+ bool m_tune_team_size;
+ bool m_tune_vector_length;
+ constexpr const static size_t default_team_size = 256;
+ int m_chunk_size;
+
+ inline void init(const int league_size_request, const int team_size_request,
+ const int vector_length_request) {
+ m_league_size = league_size_request;
+
+ // Minimum team size should be 32 for OpenMPTarget backend.
+ if (team_size_request < 32) {
+ Kokkos::Impl::OpenMPTarget_abort(
+ "OpenMPTarget backend requires a minimum of 32 threads per team.\n");
+ } else
+ m_team_size = team_size_request;
+
+ m_vector_length = vector_length_request;
+ set_auto_chunk_size();
+ }
+
+ template <typename ExecSpace, typename... OtherProperties>
+ friend class TeamPolicyInternal;
+
+ public:
+ // FIXME_OPENMPTARGET : Currently this routine is a copy of the Cuda
+ // implementation, but this has to be tailored to be architecture specific.
+ inline static int scratch_size_max(int level) {
+ return (
+ level == 0 ? 1024 * 40 : // 48kB is the max for CUDA, but we need some
+ // for team_member.reduce etc.
+ 20 * 1024 *
+ 1024); // arbitrarily setting this to 20MB, for a Volta V100
+ // that would give us about 3.2GB for 2 teams per SM
+ }
+ inline bool impl_auto_team_size() const { return m_tune_team_size; }
+ inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
+ inline void impl_set_team_size(const size_t size) { m_team_size = size; }
+ inline void impl_set_vector_length(const size_t length) {
+ m_tune_vector_length = length;
+ }
+ inline int impl_vector_length() const { return m_vector_length; }
+ inline int team_size() const { return m_team_size; }
+ inline int league_size() const { return m_league_size; }
+ inline size_t scratch_size(const int& level, int team_size_ = -1) const {
+ if (team_size_ < 0) team_size_ = m_team_size;
+ return m_team_scratch_size[level] +
+ team_size_ * m_thread_scratch_size[level];
+ }
+
+ inline Kokkos::Experimental::OpenMPTarget space() const {
+ return Kokkos::Experimental::OpenMPTarget();
+ }
+
+ template <class... OtherProperties>
+ TeamPolicyInternal(const TeamPolicyInternal<OtherProperties...>& p)
+ : m_league_size(p.m_league_size),
+ m_team_size(p.m_team_size),
+ m_vector_length(p.m_vector_length),
+ m_team_alloc(p.m_team_alloc),
+ m_team_iter(p.m_team_iter),
+ m_team_scratch_size(p.m_team_scratch_size),
+ m_thread_scratch_size(p.m_thread_scratch_size),
+ m_tune_team_size(p.m_tune_team_size),
+ m_tune_vector_length(p.m_tune_vector_length),
+ m_chunk_size(p.m_chunk_size) {}
+
+ /** \brief Specify league size, request team size */
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request, int team_size_request,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, vector_length_request);
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size / vector_length_request,
+ vector_length_request);
+ }
+
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size, 1);
+ }
+ TeamPolicyInternal(const typename traits::execution_space&,
+ int league_size_request, int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, 1);
+ }
+
+ TeamPolicyInternal(int league_size_request, int team_size_request,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, vector_length_request);
+ }
+
+ TeamPolicyInternal(int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ int vector_length_request = 1)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(false),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size / vector_length_request,
+ vector_length_request);
+ }
+
+ TeamPolicyInternal(int league_size_request,
+ const Kokkos::AUTO_t& /* team_size_request */
+ ,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(true),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, default_team_size, 1);
+ }
+ TeamPolicyInternal(int league_size_request, int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */)
+ : m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_tune_team_size(false),
+ m_tune_vector_length(true),
+ m_chunk_size(0) {
+ init(league_size_request, team_size_request, 1);
+ }
+ inline static size_t vector_length_max() {
+ return 32; /* TODO: this is bad. Need logic that is compiler and backend
+ aware */
+ }
+ inline int team_alloc() const { return m_team_alloc; }
+ inline int team_iter() const { return m_team_iter; }
+
+ inline int chunk_size() const { return m_chunk_size; }
+
+ /** \brief set chunk_size to a discrete value*/
+ inline TeamPolicyInternal& set_chunk_size(
+ typename traits::index_type chunk_size_) {
+ m_chunk_size = chunk_size_;
+ return *this;
+ }
+
+ /** \brief set per team scratch size for a specific level of the scratch
+ * hierarchy */
+ inline TeamPolicyInternal& set_scratch_size(const int& level,
+ const PerTeamValue& per_team) {
+ m_team_scratch_size[level] = per_team.value;
+ return *this;
+ }
+
+ /** \brief set per thread scratch size for a specific level of the scratch
+ * hierarchy */
+ inline TeamPolicyInternal& set_scratch_size(
+ const int& level, const PerThreadValue& per_thread) {
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ /** \brief set per thread and per team scratch size for a specific level of
+ * the scratch hierarchy */
+ inline TeamPolicyInternal& set_scratch_size(
+ const int& level, const PerTeamValue& per_team,
+ const PerThreadValue& per_thread) {
+ m_team_scratch_size[level] = per_team.value;
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ private:
+ /** \brief finalize chunk_size if it was set to AUTO*/
+ inline void set_auto_chunk_size() {
+ int concurrency = 2048 * 128;
+
+ if (concurrency == 0) concurrency = 1;
+
+ if (m_chunk_size > 0) {
+ if (!Impl::is_integral_power_of_two(m_chunk_size))
+ Kokkos::abort("TeamPolicy blocking granularity must be power of two");
+ }
+
+ int new_chunk_size = 1;
+ while (new_chunk_size * 100 * concurrency < m_league_size)
+ new_chunk_size *= 2;
+ if (new_chunk_size < 128) {
+ new_chunk_size = 1;
+ while ((new_chunk_size * 40 * concurrency < m_league_size) &&
+ (new_chunk_size < 128))
+ new_chunk_size *= 2;
+ }
+ m_chunk_size = new_chunk_size;
+ }
+
+ public:
+ using member_type = Impl::OpenMPTargetExecTeamMember;
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+namespace Kokkos {
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>
+TeamThreadRange(const Impl::OpenMPTargetExecTeamMember& thread,
+ const iType& count) {
+ return Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+ std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
+TeamThreadRange(const Impl::OpenMPTargetExecTeamMember& thread,
+ const iType1& begin, const iType2& end) {
+ using iType = std::common_type_t<iType1, iType2>;
+ return Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(begin),
+ iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>
+ThreadVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+ const iType& count) {
+ return Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+ std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
+ThreadVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+ const iType1& arg_begin, const iType2& arg_end) {
+ using iType = std::common_type_t<iType1, iType2>;
+ return Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(arg_begin),
+ iType(arg_end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>
+TeamVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+ const iType& count) {
+ return Impl::TeamVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+ std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
+TeamVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+ const iType1& arg_begin, const iType2& arg_end) {
+ using iType = std::common_type_t<iType1, iType2>;
+ return Impl::TeamVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(arg_begin),
+ iType(arg_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember> PerTeam(
+ const Impl::OpenMPTargetExecTeamMember& thread) {
+ return Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember> PerThread(
+ const Impl::OpenMPTargetExecTeamMember& thread) {
+ return Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>(thread);
+}
+} // namespace Kokkos
+
+namespace Kokkos {
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+ /*single_struct*/,
+ const FunctorType& lambda) {
+ lambda();
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+ single_struct,
+ const FunctorType& lambda) {
+ if (single_struct.team_member.team_rank() == 0) lambda();
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+ /*single_struct*/,
+ const FunctorType& lambda, ValueType& val) {
+ lambda(val);
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+ const Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+ single_struct,
+ const FunctorType& lambda, ValueType& val) {
+ if (single_struct.team_member.team_rank() == 0) {
+ lambda(val);
+ }
+ single_struct.team_member.team_broadcast(val, 0);
+}
+} // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename iType>
+struct TeamThreadRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
+ using index_type = iType;
+ const iType start;
+ const iType end;
+ const OpenMPTargetExecTeamMember& team;
+
+ TeamThreadRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+ iType count)
+ : start(0), end(count), team(thread_) {}
+ TeamThreadRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+ iType begin_, iType end_)
+ : start(begin_), end(end_), team(thread_) {}
+};
+
+template <typename iType>
+struct ThreadVectorRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
+ using index_type = iType;
+ const index_type start;
+ const index_type end;
+ const OpenMPTargetExecTeamMember& team;
+
+ ThreadVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+ index_type count)
+ : start(0), end(count), team(thread_) {}
+ ThreadVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+ index_type begin_, index_type end_)
+ : start(begin_), end(end_), team(thread_) {}
+};
+
+template <typename iType>
+struct TeamVectorRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
+ using index_type = iType;
+ const index_type start;
+ const index_type end;
+ const OpenMPTargetExecTeamMember& team;
+
+ TeamVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+ index_type count)
+ : start(0), end(count), team(thread_) {}
+ TeamVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+ index_type begin_, index_type end_)
+ : start(begin_), end(end_), team(thread_) {}
+};
+
+} // namespace Impl
+
+} // namespace Kokkos
+
+#endif /* KOKKOS_OPENMPTARGET_PARALLEL_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLELFOR_MDRANGE_HPP
+#define KOKKOS_OPENMPTARGET_PARALLELFOR_MDRANGE_HPP
+
+#include <omp.h>
+#include <Kokkos_Parallel.hpp>
+#include "Kokkos_OpenMPTarget_MDRangePolicy.hpp"
+#include "Kokkos_OpenMPTarget_Instance.hpp"
+#include "Kokkos_OpenMPTarget_FunctorAdapter.hpp"
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ private:
+ using Policy = Kokkos::MDRangePolicy<Traits...>;
+ using Member = typename Policy::member_type;
+ using Index = typename Policy::index_type;
+
+ using FunctorAdapter =
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, Policy>;
+ const FunctorAdapter m_functor;
+
+ const Policy m_policy;
+
+ public:
+ inline void execute() const {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+
+ Policy policy = m_policy;
+
+ static_assert(1 < Policy::rank && Policy::rank < 7);
+ static_assert(Policy::inner_direction == Iterate::Left ||
+ Policy::inner_direction == Iterate::Right);
+
+ execute_tile<Policy::rank>(
+ m_functor, policy,
+ std::integral_constant<Iterate, Policy::inner_direction>());
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 2> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor)
+ for (auto i0 = begin_0; i0 < end_0; ++i0)
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ functor(i0, i1);
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 3> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ functor(i0, i1, i2);
+ }
+ }
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 4> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ functor(i0, i1, i2, i3);
+ }
+ }
+ }
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 5> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ functor(i0, i1, i2, i3, i4);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 6> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+ const Index begin_5 = policy.m_lower[5];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+ const Index end_5 = policy.m_upper[5];
+
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i5 = begin_5; i5 < end_5; ++i5) {
+ {
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 2> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor)
+ for (auto i1 = begin_1; i1 < end_1; ++i1)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1);
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 3> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor)
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2);
+ }
+ }
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 4> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor)
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3);
+ }
+ }
+ }
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 5> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor)
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3, i4);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ template <int Rank>
+ inline std::enable_if_t<Rank == 6> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+ const Index begin_5 = policy.m_lower[5];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+ const Index end_5 = policy.m_upper[5];
+
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor)
+ for (auto i5 = begin_5; i5 < end_5; ++i5) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ {
+ functor(i0, i1, i2, i3, i4, i5);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+ // TODO DZP: based on a conversation with Christian, we're using 256 as a
+ // heuristic here. We need something better once we can query these kinds of
+ // properties
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ return 256;
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* KOKKOS_OPENMPTARGET_PARALLELFOR_MDRANGE_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLEL_FOR_RANGE_HPP
+#define KOKKOS_OPENMPTARGET_PARALLEL_FOR_RANGE_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include "Kokkos_OpenMPTarget_Instance.hpp"
+#include "Kokkos_OpenMPTarget_FunctorAdapter.hpp"
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using Member = typename Policy::member_type;
+
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, Policy> m_functor;
+ const Policy m_policy;
+
+ public:
+ void execute() const { execute_impl(); }
+
+ void execute_impl() const {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ const auto begin = m_policy.begin();
+ const auto end = m_policy.end();
+
+ if (end <= begin) return;
+
+ auto const a_functor(m_functor);
+
+#pragma omp target teams distribute parallel for map(to : a_functor)
+ for (auto i = begin; i < end; ++i) {
+ a_functor(i);
+ }
+ }
+
+ ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLEL_FOR_TEAM_HPP
+#define KOKKOS_OPENMPTARGET_PARALLEL_FOR_TEAM_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Macros.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_FunctorAdapter.hpp>
+
+namespace Kokkos {
+
+/** \brief Inter-thread parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda) {
+#pragma omp for nowait schedule(static, 1)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
+}
+
+/** \brief Intra-thread vector parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda) {
+#pragma omp simd
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
+}
+
+/** \brief Intra-team vector parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+ const Impl::TeamVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda) {
+#pragma omp for simd nowait schedule(static, 1)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
+}
+
+namespace Impl {
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ private:
+ using Policy =
+ Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget,
+ Properties...>;
+ using Member = typename Policy::member_type;
+
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, Policy> m_functor;
+
+ const Policy m_policy;
+ const size_t m_shmem_size;
+
+ public:
+ void execute() const {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ execute_impl();
+ }
+
+ private:
+ void execute_impl() const {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ const auto league_size = m_policy.league_size();
+ const auto team_size = m_policy.team_size();
+ const auto vector_length = m_policy.impl_vector_length();
+
+ const size_t shmem_size_L0 = m_policy.scratch_size(0, team_size);
+ const size_t shmem_size_L1 = m_policy.scratch_size(1, team_size);
+ m_policy.space().impl_internal_space_instance()->resize_scratch(
+ team_size, shmem_size_L0, shmem_size_L1, league_size);
+
+ void* scratch_ptr =
+ m_policy.space().impl_internal_space_instance()->get_scratch_ptr();
+ auto const a_functor(m_functor);
+
+ // FIXME_OPENMPTARGET - If the team_size is not a multiple of 32, the
+ // scratch implementation does not work in the Release or RelWithDebugInfo
+ // mode but works in the Debug mode.
+
+ // Maximum active teams possible.
+ // FIXME_OPENMPTARGET: Cray compiler did not yet implement
+ // omp_get_max_teams.
+#if !defined(KOKKOS_COMPILER_CRAY_LLVM)
+ int max_active_teams = omp_get_max_teams();
+#else
+ int max_active_teams =
+ std::min(m_policy.space().concurrency() / team_size, league_size);
+#endif
+
+ // FIXME_OPENMPTARGET: Although the maximum number of teams is set using the
+ // omp_set_num_teams in the resize_scratch routine, the call is not
+ // respected. Hence we need to use `num_teams` routine to restrict the
+ // number of teams generated to max_active_teams. Hopefully we can avoid the
+ // num_teams clause in the future and let compiler pick the right number of
+ // teams. This is not true for Intel architectures.
+
+ // If the league size is <=0, do not launch the kernel.
+ if (max_active_teams <= 0) return;
+
+// Performing our own scheduling of teams to avoid separation of code between
+// teams-distribute and parallel. Gave a 2x performance boost in test cases with
+// the clang compiler. atomic_compare_exchange can be avoided since the standard
+// guarantees that the number of teams specified in the `num_teams` clause is
+// always less than or equal to the maximum concurrently running teams.
+#if !defined(KOKKOS_IMPL_OPENMPTARGET_HIERARCHICAL_INTEL_GPU)
+ KOKKOS_IMPL_OMPTARGET_PRAGMA(
+ teams thread_limit(team_size) firstprivate(a_functor)
+ num_teams(max_active_teams) is_device_ptr(scratch_ptr)
+ KOKKOS_IMPL_OMPX_DYN_CGROUP_MEM(shmem_size_L0))
+#pragma omp parallel
+ {
+ if (omp_get_num_teams() > max_active_teams)
+ Kokkos::abort("`omp_set_num_teams` call was not respected.\n");
+
+ const int blockIdx = omp_get_team_num();
+ const int gridDim = omp_get_num_teams();
+
+ // Iterate through the number of teams until league_size and assign the
+ // league_id accordingly
+ // Guarantee that the compilers respect the `num_teams` clause
+ for (int league_id = blockIdx; league_id < league_size;
+ league_id += gridDim) {
+ typename Policy::member_type team(league_id, league_size, team_size,
+ vector_length, scratch_ptr, blockIdx,
+ shmem_size_L0, shmem_size_L1);
+ a_functor(team);
+ }
+ }
+#else
+#pragma omp target teams distribute firstprivate(a_functor) \
+ is_device_ptr(scratch_ptr) num_teams(max_active_teams) \
+ thread_limit(team_size)
+ for (int i = 0; i < league_size; i++) {
+#pragma omp parallel
+ {
+ if (omp_get_num_teams() > max_active_teams)
+ Kokkos::abort("`omp_set_num_teams` call was not respected.\n");
+
+ typename Policy::member_type team(i, league_size, team_size,
+ vector_length, scratch_ptr, i,
+ shmem_size_L0, shmem_size_L1);
+ a_functor(team);
+ }
+ }
+#endif
+ }
+
+ public:
+ ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor, arg_policy.team_size())) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLELREDUCE_MDRANGE_HPP
+#define KOKKOS_OPENMPTARGET_PARALLELREDUCE_MDRANGE_HPP
+
+#include <omp.h>
+#include <Kokkos_Parallel.hpp>
+#include "Kokkos_OpenMPTarget_MDRangePolicy.hpp"
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel_Common.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ private:
+ using Policy = Kokkos::MDRangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using Member = typename Policy::member_type;
+ using Index = typename Policy::index_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ static constexpr bool UseReducer =
+ !std::is_same_v<FunctorType, typename ReducerType::functor_type>;
+
+ const pointer_type m_result_ptr;
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+
+ using ParReduceCopy = ParallelReduceCopy<pointer_type>;
+
+ bool m_result_ptr_on_device;
+
+ using FunctorAdapter =
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, Policy>;
+
+ public:
+ inline void execute() const {
+ // Only let one ParallelReduce instance at a time use the scratch memory.
+ std::scoped_lock<std::mutex> scratch_memory_lock(
+ m_policy.space().impl_internal_space_instance()->m_mutex_scratch_ptr);
+
+ auto const functor = FunctorAdapter(m_functor_reducer.get_functor());
+ execute_tile<Policy::rank, typename ReducerType::value_type>(
+ functor, m_policy, m_result_ptr,
+ std::integral_constant<Iterate, Policy::inner_direction>());
+ }
+
+ template <class ViewType>
+ inline ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ Policy arg_policy, const ViewType& arg_result_view)
+ : m_result_ptr(arg_result_view.data()),
+ m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr_on_device(
+ MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+ typename ViewType::memory_space>::accessible) {}
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 2> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor) \
+ reduction(custom : result)
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, result);
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor) \
+ reduction(+ : result)
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, result);
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 3> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction( \
+ custom \
+:ValueType : OpenMPTargetReducerWrapper< \
+ typename ReducerType::functor_type>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper< \
+ typename ReducerType::functor_type>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor) \
+ reduction(custom : result)
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, result);
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor) \
+ reduction(+ : result)
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, result);
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 4> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[3];
+ const Index begin_3 = policy.m_lower[2];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor) \
+ reduction(custom : result)
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3, result);
+ }
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor) \
+ reduction(+ : result)
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3, result);
+ }
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 5> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor) \
+ reduction(custom : result)
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3, i4, result);
+ }
+ }
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor) \
+ reduction(+ : result)
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3, i4, result);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 6> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateLeft) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+ const Index begin_5 = policy.m_lower[5];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+ const Index end_5 = policy.m_upper[5];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor) \
+ reduction(custom : result)
+ for (auto i5 = begin_5; i5 < end_5; ++i5) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3, i4, i5, result);
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor) \
+ reduction(+ : result)
+ for (auto i5 = begin_5; i5 < end_5; ++i5) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ functor(i0, i1, i2, i3, i4, i5, result);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 2> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor) \
+ reduction(custom : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ functor(i0, i1, result);
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor) \
+ reduction(+ : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ functor(i0, i1, result);
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 3> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction( \
+ custom \
+:ValueType : OpenMPTargetReducerWrapper< \
+ typename ReducerType::functor_type>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper< \
+ typename ReducerType::functor_type>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor) \
+ reduction(custom : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ functor(i0, i1, i2, result);
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor) \
+ reduction(+ : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ functor(i0, i1, i2, result);
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 4> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[3];
+ const Index begin_3 = policy.m_lower[2];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor) \
+ reduction(custom : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ functor(i0, i1, i2, i3, result);
+ }
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor) \
+ reduction(+ : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ functor(i0, i1, i2, i3, result);
+ }
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 5> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor) \
+ reduction(custom : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ functor(i0, i1, i2, i3, i4, result);
+ }
+ }
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor) \
+ reduction(+ : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ functor(i0, i1, i2, i3, i4, result);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <int Rank, class ValueType>
+ inline std::enable_if_t<Rank == 6> execute_tile(
+ const FunctorAdapter& functor, const Policy& policy, pointer_type ptr,
+ OpenMPTargetIterateRight) const {
+ const Index begin_0 = policy.m_lower[0];
+ const Index begin_1 = policy.m_lower[1];
+ const Index begin_2 = policy.m_lower[2];
+ const Index begin_3 = policy.m_lower[3];
+ const Index begin_4 = policy.m_lower[4];
+ const Index begin_5 = policy.m_lower[5];
+
+ const Index end_0 = policy.m_upper[0];
+ const Index end_1 = policy.m_upper[1];
+ const Index end_2 = policy.m_upper[2];
+ const Index end_3 = policy.m_upper[3];
+ const Index end_4 = policy.m_upper[4];
+ const Index end_5 = policy.m_upper[5];
+
+ ValueType result = ValueType();
+
+ // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+ // loops which leads to code duplication for different reduction types.
+ if constexpr (UseReducer) {
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor) \
+ reduction(custom : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i5 = begin_5; i5 < end_5; ++i5) {
+ functor(i0, i1, i2, i3, i4, i5, result);
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor) \
+ reduction(+ : result)
+ for (auto i0 = begin_0; i0 < end_0; ++i0) {
+ for (auto i1 = begin_1; i1 < end_1; ++i1) {
+ for (auto i2 = begin_2; i2 < end_2; ++i2) {
+ for (auto i3 = begin_3; i3 < end_3; ++i3) {
+ for (auto i4 = begin_4; i4 < end_4; ++i4) {
+ for (auto i5 = begin_5; i5 < end_5; ++i5) {
+ functor(i0, i1, i2, i3, i4, i5, result);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ ParReduceCopy::memcpy_result(ptr, &result, sizeof(ValueType),
+ m_result_ptr_on_device);
+ }
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ return 256;
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+#endif /* KOKKOS_OPENMPTARGET_PARALLELREDUCE_MDRANGE_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLELREDUCE_RANGE_HPP
+#define KOKKOS_OPENMPTARGET_PARALLELREDUCE_RANGE_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel_Common.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Instance.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ static constexpr bool FunctorHasJoin = Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE, Policy, FunctorType,
+ typename ReducerType::value_type>::Reducer::has_join_member_function();
+ static constexpr bool UseReducer =
+ !std::is_same_v<FunctorType, typename ReducerType::functor_type>;
+ static constexpr bool IsArray = std::is_pointer_v<reference_type>;
+
+ using ParReduceSpecialize =
+ ParallelReduceSpecialize<FunctorType, Policy,
+ typename ReducerType::functor_type, pointer_type,
+ typename ReducerType::value_type>;
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ bool m_result_ptr_on_device;
+ const int m_result_ptr_num_elems;
+
+ public:
+ void execute() const {
+ // Only let one ParallelReduce instance at a time use the scratch memory.
+ std::scoped_lock<std::mutex> scratch_memory_lock(
+ m_policy.space().impl_internal_space_instance()->m_mutex_scratch_ptr);
+
+ auto const functor =
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, Policy>(
+ m_functor_reducer.get_functor());
+
+ if constexpr (FunctorHasJoin) {
+ // Enter this loop if the Functor has a init-join.
+ ParReduceSpecialize::execute_init_join(functor, m_policy, m_result_ptr,
+ m_result_ptr_on_device);
+ } else if constexpr (UseReducer) {
+ // Enter this loop if the Functor is a reducer type.
+ ParReduceSpecialize::execute_reducer(functor, m_policy, m_result_ptr,
+ m_result_ptr_on_device);
+ } else if constexpr (IsArray) {
+ // Enter this loop if the reduction is on an array and the routine is
+ // templated over the size of the array.
+ if (m_result_ptr_num_elems <= 2) {
+ ParReduceSpecialize::template execute_array<2>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 4) {
+ ParReduceSpecialize::template execute_array<4>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 8) {
+ ParReduceSpecialize::template execute_array<8>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 16) {
+ ParReduceSpecialize::template execute_array<16>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 32) {
+ ParReduceSpecialize::template execute_array<32>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else {
+ Kokkos::abort("array reduction length must be <= 32");
+ }
+ } else {
+ // This loop handles the basic scalar reduction.
+ ParReduceSpecialize::template execute_array<1>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result_view)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_view.data()),
+ m_result_ptr_on_device(
+ MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+ typename ViewType::memory_space>::accessible),
+ m_result_ptr_num_elems(arg_result_view.size()) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLELREDUCE_TEAM_HPP
+#define KOKKOS_OPENMPTARGET_PARALLELREDUCE_TEAM_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel_Common.hpp>
+
+// FIXME_OPENMPTARGET - Using this macro to implement a workaround for
+// hierarchical reducers. It avoids hitting the code path which we wanted to
+// write but doesn't work. undef'ed at the end.
+// Intel compilers prefer the non-workaround version.
+#ifndef KOKKOS_ARCH_INTEL_GPU
+#define KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+#endif
+
+namespace Kokkos {
+
+/** \brief Inter-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team
+ * and a summation of val is performed and put into result.
+ */
+
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+ // elements in the array <= 32. For reduction we allocate, 16 bytes per
+ // element in the scratch space, hence, 16*32 = 512.
+ static_assert(sizeof(ValueType) <=
+ Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+ ValueType* TeamThread_scratch =
+ static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+ TeamThread_scratch[0] = ValueType();
+#pragma omp barrier
+
+ if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp for reduction(+ : TeamThread_scratch[ : 1])
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ ValueType tmp = ValueType();
+ lambda(i, tmp);
+ TeamThread_scratch[0] += tmp;
+ }
+ } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+
+#pragma omp for reduction(custom : TeamThread_scratch[ : 1])
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ ValueType tmp = ValueType();
+ lambda(i, tmp);
+ TeamThread_scratch[0] += tmp;
+ }
+ }
+
+ result = TeamThread_scratch[0];
+}
+
+#if !defined(KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND)
+// For some reason the actual version we wanted to write doesn't work
+// and crashes. We should try this with every new compiler
+// This is the variant we actually wanted to write
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ReducerType result) {
+ using ValueType = typename ReducerType::value_type;
+
+#pragma omp declare reduction(custominner \
+:ValueType : Impl::OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, \
+ omp_in)) \
+ initializer(Impl::OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+ // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+ // elements in the array <= 32. For reduction we allocate, 16 bytes per
+ // element in the scratch space, hence, 16*32 = 512.
+ static_assert(sizeof(ValueType) <=
+ Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+ ValueType* TeamThread_scratch =
+ static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+ Impl::OpenMPTargetReducerWrapper<ReducerType>::init(TeamThread_scratch[0]);
+#pragma omp barrier
+
+#pragma omp for reduction(custominner : TeamThread_scratch[ : 1])
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, TeamThread_scratch[0]);
+ }
+ result.reference() = TeamThread_scratch[0];
+}
+#else
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ReducerType result) {
+ using ValueType = typename ReducerType::value_type;
+
+ // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+ // elements in the array <= 32. For reduction we allocate, 16 bytes per
+ // element in the scratch space, hence, 16*32 = 512.
+ static_assert(sizeof(ValueType) <=
+ Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+ ValueType* TeamThread_scratch =
+ static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp declare reduction(omp_red_teamthread_reducer \
+:ValueType : Impl::OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, \
+ omp_in)) \
+ initializer(Impl::OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp barrier
+ ValueType tmp;
+ result.init(tmp);
+ TeamThread_scratch[0] = tmp;
+#pragma omp barrier
+
+ iType team_size = iType(omp_get_num_threads());
+#pragma omp for reduction( \
+ omp_red_teamthread_reducer : TeamThread_scratch[ : 1]) \
+ schedule(static, 1)
+ for (iType t = 0; t < team_size; t++) {
+ ValueType tmp2;
+ result.init(tmp2);
+
+ for (iType i = loop_boundaries.start + t; i < loop_boundaries.end;
+ i += team_size) {
+ lambda(i, tmp2);
+ }
+
+ // FIXME_OPENMPTARGET: Join should work but doesn't. Every threads gets a
+ // private TeamThread_scratch[0] and at the end of the for-loop the `join`
+ // operation is performed by OpenMP itself and hence the simple assignment
+ // works.
+ // result.join(TeamThread_scratch[0], tmp2);
+ TeamThread_scratch[0] = tmp2;
+ }
+
+ result.reference() = TeamThread_scratch[0];
+}
+#endif // KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+
+/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a reduction of val is performed using JoinType(ValueType& val, const
+ * ValueType& update) and put into init_result. The input value of init_result
+ * is used as initializer for temporary variables of ValueType. Therefore the
+ * input value should be the neutral element with respect to the join operation
+ * (e.g. '0 for +-' or '1 for *').
+ */
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, const JoinType& join, ValueType& init_result) {
+ ValueType* TeamThread_scratch =
+ static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+ // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+ // elements in the array <= 32. For reduction we allocate, 16 bytes per
+ // element in the scratch space, hence, 16*32 = 512.
+ static_assert(sizeof(ValueType) <=
+ Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+ // FIXME_OPENMPTARGET: Still need to figure out how to get value_count here.
+ const int value_count = 1;
+
+#pragma omp barrier
+ TeamThread_scratch[0] = init_result;
+#pragma omp barrier
+
+#pragma omp for
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, TeamThread_scratch[omp_get_num_threads() * value_count]);
+ }
+
+ // Reduce all partial results within a team.
+ const int team_size = omp_get_num_threads();
+ int tree_neighbor_offset = 1;
+ do {
+#pragma omp for
+ for (int i = 0; i < team_size - tree_neighbor_offset;
+ i += 2 * tree_neighbor_offset) {
+ const int neighbor = i + tree_neighbor_offset;
+ join(lambda, &TeamThread_scratch[i * value_count],
+ &TeamThread_scratch[neighbor * value_count]);
+ }
+ tree_neighbor_offset *= 2;
+ } while (tree_neighbor_offset < team_size);
+ init_result = TeamThread_scratch[0];
+}
+
+/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ ValueType vector_reduce = ValueType();
+
+ if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp simd reduction(+ : vector_reduce)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ ValueType tmp = ValueType();
+ lambda(i, tmp);
+ vector_reduce += tmp;
+ }
+ } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+
+#pragma omp simd reduction(custom : vector_reduce)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, vector_reduce);
+ }
+ }
+
+ result = vector_reduce;
+}
+
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ReducerType const& result) {
+ using ValueType = typename ReducerType::value_type;
+
+#pragma omp declare reduction(custom \
+:ValueType : Impl::OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, \
+ omp_in)) \
+ initializer(Impl::OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+ ValueType vector_reduce;
+ Impl::OpenMPTargetReducerWrapper<ReducerType>::init(vector_reduce);
+
+#pragma omp simd reduction(custom : vector_reduce)
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, vector_reduce);
+ }
+
+ result.reference() = vector_reduce;
+}
+
+/** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a reduction of val is performed using JoinType(ValueType& val, const
+ * ValueType& update) and put into init_result. The input value of init_result
+ * is used as initializer for temporary variables of ValueType. Therefore the
+ * input value should be the neutral element with respect to the join operation
+ * (e.g. '0 for +-' or '1 for *').
+ */
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, const JoinType& join, ValueType& init_result) {
+ ValueType result = init_result;
+
+ // FIXME_OPENMPTARGET think about omp simd
+ // join does not work with omp reduction clause
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ ValueType tmp = ValueType();
+ lambda(i, tmp);
+ join(result, tmp);
+ }
+
+ init_result = result;
+}
+
+/** \brief Intra-team vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling team
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ const Impl::TeamVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ValueType& result) {
+ // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+ // elements in the array <= 32. For reduction we allocate, 16 bytes per
+ // element in the scratch space, hence, 16*32 = 512.
+ static_assert(sizeof(ValueType) <=
+ Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+ ValueType* TeamVector_scratch =
+ static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+ TeamVector_scratch[0] = ValueType();
+#pragma omp barrier
+
+ if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp for simd reduction(+ : TeamVector_scratch[ : 1])
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ ValueType tmp = ValueType();
+ lambda(i, tmp);
+ TeamVector_scratch[0] += tmp;
+ }
+ } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+
+#pragma omp for simd reduction(custom : TeamVector_scratch[ : 1])
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ ValueType tmp = ValueType();
+ lambda(i, tmp);
+ TeamVector_scratch[0] += tmp;
+ }
+ }
+
+ result = TeamVector_scratch[0];
+}
+
+#if !defined(KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND)
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ReducerType const& result) {
+ using ValueType = typename ReducerType::value_type;
+
+ // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+ // elements in the array <= 32. For reduction we allocate, 16 bytes per
+ // element in the scratch space, hence, 16*32 = 512.
+ static_assert(sizeof(ValueType) <=
+ Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+#pragma omp declare reduction(custom \
+:ValueType : Impl::OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, \
+ omp_in)) \
+ initializer(Impl::OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+ ValueType* TeamVector_scratch =
+ static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+ Impl::OpenMPTargetReducerWrapper<ReducerType>::init(TeamVector_scratch[0]);
+#pragma omp barrier
+
+#pragma omp for simd reduction(custom : TeamVector_scratch[ : 1])
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+ lambda(i, TeamVector_scratch[0]);
+ }
+
+ result.reference() = TeamVector_scratch[0];
+}
+#else
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const Lambda& lambda, ReducerType const& result) {
+ using ValueType = typename ReducerType::value_type;
+
+ // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+ // elements in the array <= 32. For reduction we allocate, 16 bytes per
+ // element in the scratch space, hence, 16*32 = 512.
+ static_assert(sizeof(ValueType) <=
+ Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+ ValueType* TeamVector_scratch =
+ static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp declare reduction(omp_red_teamthread_reducer \
+:ValueType : Impl::OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, \
+ omp_in)) \
+ initializer(Impl::OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp barrier
+ ValueType tmp;
+ result.init(tmp);
+ TeamVector_scratch[0] = tmp;
+#pragma omp barrier
+
+ iType team_size = iType(omp_get_num_threads());
+#pragma omp for simd reduction( \
+ omp_red_teamthread_reducer : TeamVector_scratch[ : 1]) \
+ schedule(static, 1)
+ for (iType t = 0; t < team_size; t++) {
+ ValueType tmp2;
+ result.init(tmp2);
+
+ for (iType i = loop_boundaries.start + t; i < loop_boundaries.end;
+ i += team_size) {
+ lambda(i, tmp2);
+ }
+ TeamVector_scratch[0] = tmp2;
+ }
+
+ result.reference() = TeamVector_scratch[0];
+}
+#endif // KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Properties>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ private:
+ using Policy =
+ Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget,
+ Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+ using value_type = typename ReducerType::value_type;
+
+ bool m_result_ptr_on_device;
+ const int m_result_ptr_num_elems;
+
+ static constexpr bool FunctorHasJoin = Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE, Policy, FunctorType,
+ typename ReducerType::value_type>::Reducer::has_join_member_function();
+ static constexpr bool UseReducer =
+ !std::is_same_v<FunctorType, typename ReducerType::functor_type>;
+ static constexpr bool IsArray = std::is_pointer_v<reference_type>;
+
+ using ParReduceSpecialize =
+ ParallelReduceSpecialize<FunctorType, Policy,
+ typename ReducerType::functor_type, pointer_type,
+ typename ReducerType::value_type>;
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const size_t m_shmem_size;
+
+ public:
+ void execute() const {
+ // Only let one ParallelReduce instance at a time use the scratch memory.
+ std::scoped_lock<std::mutex> scratch_memory_lock(
+ m_policy.space().impl_internal_space_instance()->m_mutex_scratch_ptr);
+ auto const functor =
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, Policy>(
+ m_functor_reducer.get_functor());
+ if constexpr (FunctorHasJoin) {
+ ParReduceSpecialize::execute_init_join(functor, m_policy, m_result_ptr,
+ m_result_ptr_on_device);
+ } else if constexpr (UseReducer) {
+ ParReduceSpecialize::execute_reducer(functor, m_policy, m_result_ptr,
+ m_result_ptr_on_device);
+ } else if constexpr (IsArray) {
+ if (m_result_ptr_num_elems <= 2) {
+ ParReduceSpecialize::template execute_array<2>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 4) {
+ ParReduceSpecialize::template execute_array<4>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 8) {
+ ParReduceSpecialize::template execute_array<8>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 16) {
+ ParReduceSpecialize::template execute_array<16>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else if (m_result_ptr_num_elems <= 32) {
+ ParReduceSpecialize::template execute_array<32>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ } else {
+ Kokkos::abort("array reduction length must be <= 32");
+ }
+ } else {
+ ParReduceSpecialize::template execute_array<1>(
+ functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_result_ptr_on_device(
+ MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+ typename ViewType::memory_space>::accessible),
+ m_result_ptr_num_elems(arg_result.size()),
+ m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result.data()),
+ m_shmem_size(
+ arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor_reducer.get_functor(), arg_policy.team_size())) {}
+};
+
+} // namespace Impl
+
+#ifdef KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+#undef KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+#endif
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLELSCAN_RANGE_HPP
+#define KOKKOS_OPENMPTARGET_PARALLELSCAN_RANGE_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_FunctorAdapter.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ protected:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+
+ using Member = typename Policy::member_type;
+ using idx_type = typename Policy::index_type;
+
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ Policy, FunctorType, void>;
+
+ using value_type = typename Analysis::value_type;
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>
+ m_functor_reducer;
+ const Policy m_policy;
+
+ value_type* m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+
+ using FunctorAdapter =
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, Policy>;
+
+ public:
+ void impl_execute(
+ Kokkos::View<value_type**, Kokkos::LayoutRight,
+ Kokkos::Experimental::OpenMPTargetSpace>
+ element_values,
+ Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
+ chunk_values,
+ Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count)
+ const {
+ const idx_type N = m_policy.end() - m_policy.begin();
+ const idx_type chunk_size = 128;
+ const idx_type n_chunks = (N + chunk_size - 1) / chunk_size;
+ idx_type nteams = n_chunks > 512 ? 512 : n_chunks;
+ idx_type team_size = 128;
+
+ auto a_functor_reducer = m_functor_reducer;
+ auto a_functor = FunctorAdapter(m_functor_reducer.get_functor());
+
+#pragma omp target teams distribute map(to : a_functor_reducer, a_functor) \
+ num_teams(nteams)
+ for (idx_type team_id = 0; team_id < n_chunks; ++team_id) {
+ const typename Analysis::Reducer& reducer =
+ a_functor_reducer.get_reducer();
+#pragma omp parallel num_threads(team_size)
+ {
+ const idx_type local_offset = team_id * chunk_size;
+
+#pragma omp for
+ for (idx_type i = 0; i < chunk_size; ++i) {
+ const idx_type idx = local_offset + i;
+ value_type val;
+ reducer.init(&val);
+ if (idx < N) a_functor(idx, val, false);
+
+ element_values(team_id, i) = val;
+ }
+#pragma omp barrier
+ if (omp_get_thread_num() == 0) {
+ value_type sum;
+ reducer.init(&sum);
+ for (idx_type i = 0; i < chunk_size; ++i) {
+ reducer.join(&sum, &element_values(team_id, i));
+ element_values(team_id, i) = sum;
+ }
+ chunk_values(team_id) = sum;
+ }
+#pragma omp barrier
+ if (omp_get_thread_num() == 0) {
+ if (Kokkos::atomic_fetch_add(&count(), 1) == n_chunks - 1) {
+ value_type sum;
+ reducer.init(&sum);
+ for (idx_type i = 0; i < n_chunks; ++i) {
+ reducer.join(&sum, &chunk_values(i));
+ chunk_values(i) = sum;
+ }
+ }
+ }
+ }
+ }
+
+#pragma omp target teams distribute map(to : a_functor_reducer, a_functor) \
+ num_teams(nteams) thread_limit(team_size)
+ for (idx_type team_id = 0; team_id < n_chunks; ++team_id) {
+ const typename Analysis::Reducer& reducer =
+ a_functor_reducer.get_reducer();
+#pragma omp parallel num_threads(team_size)
+ {
+ const idx_type local_offset = team_id * chunk_size;
+ value_type offset_value;
+ if (team_id > 0)
+ offset_value = chunk_values(team_id - 1);
+ else
+ reducer.init(&offset_value);
+
+#pragma omp for
+ for (idx_type i = 0; i < chunk_size; ++i) {
+ const idx_type idx = local_offset + i;
+ value_type local_offset_value;
+ if (i > 0) {
+ local_offset_value = element_values(team_id, i - 1);
+ // FIXME_OPENMPTARGET We seem to access memory illegaly on AMD GPUs
+#if defined(KOKKOS_ARCH_AMD_GPU) && !defined(KOKKOS_ARCH_AMD_GFX1030) && \
+ !defined(KOKKOS_ARCH_AMD_GFX1100) && !defined(KOKKOS_ARCH_AMD_GFX1103)
+ if constexpr (Analysis::Reducer::has_join_member_function()) {
+ a_functor.get_functor().join(local_offset_value, offset_value);
+ } else
+ local_offset_value += offset_value;
+#else
+ reducer.join(&local_offset_value, &offset_value);
+#endif
+ } else
+ local_offset_value = offset_value;
+ if (idx < N) a_functor(idx, local_offset_value, true);
+
+ if (idx == N - 1 && m_result_ptr_device_accessible)
+ *m_result_ptr = local_offset_value;
+ }
+ }
+ }
+ }
+
+ void execute() const {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ const idx_type N = m_policy.end() - m_policy.begin();
+ const idx_type chunk_size = 128;
+ const idx_type n_chunks = (N + chunk_size - 1) / chunk_size;
+
+ // Only let one ParallelReduce instance at a time use the scratch memory.
+ std::scoped_lock<std::mutex> scratch_memory_lock(
+ m_policy.space().impl_internal_space_instance()->m_mutex_scratch_ptr);
+
+ // This could be scratch memory per team
+ Kokkos::View<value_type**, Kokkos::LayoutRight,
+ Kokkos::Experimental::OpenMPTargetSpace>
+ element_values("element_values", n_chunks, chunk_size);
+ Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
+ chunk_values("chunk_values", n_chunks);
+ Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count(
+ "Count");
+
+ impl_execute(element_values, chunk_values, count);
+ }
+
+ //----------------------------------------
+
+ ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy,
+ pointer_type arg_result_ptr = nullptr,
+ bool arg_result_ptr_device_accessible = false)
+ : m_functor_reducer(arg_functor, typename Analysis::Reducer{arg_functor}),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_ptr),
+ m_result_ptr_device_accessible(arg_result_ptr_device_accessible) {}
+
+ //----------------------------------------
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+ ReturnType, Kokkos::Experimental::OpenMPTarget>
+ : public ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenMPTarget> {
+ using base_t = ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Experimental::OpenMPTarget>;
+ using value_type = typename base_t::value_type;
+
+ public:
+ void execute() const {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget parallel_for");
+ const int64_t N = base_t::m_policy.end() - base_t::m_policy.begin();
+ const int chunk_size = 128;
+ const int64_t n_chunks = (N + chunk_size - 1) / chunk_size;
+
+ if (N > 0) {
+ // Only let one ParallelReduce instance at a time use the scratch memory.
+ std::scoped_lock<std::mutex> scratch_memory_lock(
+ base_t::m_policy.space()
+ .impl_internal_space_instance()
+ ->m_mutex_scratch_ptr);
+
+ // This could be scratch memory per team
+ Kokkos::View<value_type**, Kokkos::LayoutRight,
+ Kokkos::Experimental::OpenMPTargetSpace>
+ element_values("element_values", n_chunks, chunk_size);
+ Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
+ chunk_values("chunk_values", n_chunks);
+ Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count(
+ "Count");
+
+ base_t::impl_execute(element_values, chunk_values, count);
+
+ if (!base_t::m_result_ptr_device_accessible) {
+ const int size = base_t::m_functor_reducer.get_reducer().value_size();
+ DeepCopy<HostSpace, Kokkos::Experimental::OpenMPTargetSpace,
+ Kokkos::Experimental::OpenMPTarget>(
+ base_t::m_policy.space(), base_t::m_result_ptr,
+ chunk_values.data() + (n_chunks - 1), size);
+ }
+ } else if (!base_t::m_result_ptr_device_accessible) {
+ base_t::m_functor_reducer.get_reducer().init(base_t::m_result_ptr);
+ }
+ }
+
+ template <class ViewType>
+ ParallelScanWithTotal(const FunctorType& arg_functor,
+ const typename base_t::Policy& arg_policy,
+ const ViewType& arg_result_view)
+ : base_t(arg_functor, arg_policy, arg_result_view.data(),
+ MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+ typename ViewType::memory_space>::accessible) {
+ }
+};
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLELSCAN_TEAM_HPP
+#define KOKKOS_OPENMPTARGET_PARALLELSCAN_TEAM_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp>
+
+// FIXME_OPENMPTARGET - Using this macro to implement a workaround for
+// hierarchical scan. It avoids hitting the code path which we wanted to
+// write but doesn't work. undef'ed at the end.
+#ifndef KOKKOS_ARCH_INTEL_GPU
+#define KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
+#endif
+
+namespace Kokkos {
+
+// This is largely the same code as in HIP and CUDA except for the member name
+template <typename iType, class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_bounds,
+ const FunctorType& lambda, ValueType& return_val) {
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ TeamPolicy<Experimental::OpenMPTarget>,
+ FunctorType, void>;
+ using analysis_value_type = typename Analysis::value_type;
+ static_assert(std::is_same_v<analysis_value_type, ValueType>,
+ "Non-matching value types of functor and return type");
+
+ const auto start = loop_bounds.start;
+ const auto end = loop_bounds.end;
+ // Note this thing is called .member in the CUDA specialization of
+ // TeamThreadRangeBoundariesStruct
+ auto& member = loop_bounds.team;
+ const auto team_rank = member.team_rank();
+
+#if defined(KOKKOS_IMPL_TEAM_SCAN_WORKAROUND)
+ ValueType scan_val = {};
+
+ if (team_rank == 0) {
+ for (iType i = start; i < end; ++i) {
+ lambda(i, scan_val, true);
+ }
+ }
+ member.team_broadcast(scan_val, 0);
+ return_val = scan_val;
+
+#pragma omp barrier
+#else
+ const auto team_size = member.team_size();
+ const auto nchunk = (end - start + team_size - 1) / team_size;
+ ValueType accum = {};
+ // each team has to process one or
+ // more chunks of the prefix scan
+ for (iType i = 0; i < nchunk; ++i) {
+ auto ii = start + i * team_size + team_rank;
+ // local accumulation for this chunk
+ ValueType local_accum = {};
+ // user updates value with prefix value
+ if (ii < loop_bounds.end) lambda(ii, local_accum, false);
+ // perform team scan
+ local_accum = member.team_scan(local_accum);
+ // add this blocks accum to total accumulation
+ auto val = accum + local_accum;
+ // user updates their data with total accumulation
+ if (ii < loop_bounds.end) lambda(ii, val, true);
+ // the last value needs to be propogated to next chunk
+ if (team_rank == team_size - 1) accum = val;
+ // broadcast last value to rest of the team
+ member.team_broadcast(accum, team_size - 1);
+ }
+ return_val = accum;
+
+#endif
+}
+
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_bounds,
+ const FunctorType& lambda) {
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ TeamPolicy<Experimental::OpenMPTarget>,
+ FunctorType, void>;
+ using value_type = typename Analysis::value_type;
+ value_type scan_val;
+ parallel_scan(loop_bounds, lambda, scan_val);
+}
+} // namespace Kokkos
+
+namespace Kokkos {
+
+/** \brief Intra-thread vector parallel exclusive prefix sum. Executes
+ * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes in the thread and a scan
+ * operation is performed. Depending on the target execution space the operator
+ * might be called twice: once with final=false and once with final=true. When
+ * final==true val contains the prefix sum value. The contribution of this "i"
+ * needs to be added to val no matter whether final==true or not. In a serial
+ * execution (i.e. team_size==1) the operator is only called once with
+ * final==true. Scan_val will be set to the final sum value over all vector
+ * lanes.
+ */
+template <typename iType, class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const FunctorType& lambda, ValueType& return_val) {
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ TeamPolicy<Experimental::OpenMPTarget>,
+ FunctorType, void>;
+ using analysis_value_type = typename Analysis::value_type;
+ static_assert(std::is_same_v<analysis_value_type, ValueType>,
+ "Non-matching value types of functor and return type");
+
+ ValueType scan_val = {};
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+ for (iType i = loop_boundaries.start; i < loop_boundaries.end; ++i) {
+ lambda(i, scan_val, true);
+ }
+
+ return_val = scan_val;
+}
+
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+ const FunctorType& lambda) {
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ TeamPolicy<Experimental::OpenMPTarget>,
+ FunctorType, void>;
+ using value_type = typename Analysis::value_type;
+
+ value_type scan_val = value_type();
+ parallel_scan(loop_boundaries, lambda, scan_val);
+}
+
+} // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
+#undef KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLEL_COMMON_HPP
+#define KOKKOS_OPENMPTARGET_PARALLEL_COMMON_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Reducer.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Macros.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_FunctorAdapter.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// This class has the memcpy routine that is commonly used by ParallelReduce
+// over RangePolicy and TeamPolicy.
+template <class PointerType>
+struct ParallelReduceCopy {
+ // Copy the result back to device if the view is on the device.
+ static void memcpy_result(PointerType dest, PointerType src, size_t size,
+ bool ptr_on_device) {
+ if (ptr_on_device) {
+ if (0 < size) {
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(dest, src, size, 0, 0,
+ omp_get_default_device(),
+ omp_get_initial_device()));
+ }
+
+ } else {
+ *dest = *src;
+ }
+ }
+};
+
+// template <class FunctorType, class PolicyType, class ReducerType,
+// class PointerType, class ValueType>
+template <class FunctorType, class ReducerType, class PointerType,
+ class ValueType, class PolicyType>
+struct ParallelReduceSpecialize {
+ inline static void execute(const FunctorType& /*f*/, const PolicyType& /*p*/,
+ PointerType /*result_ptr*/) {
+ constexpr int FunctorHasJoin =
+ Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+ FunctorType,
+ ValueType>::Reducer::has_join_member_function();
+ constexpr int UseReducerType = is_reducer_v<ReducerType>;
+
+ std::stringstream error_message;
+ error_message << "Error: Invalid Specialization " << FunctorHasJoin << ' '
+ << UseReducerType << '\n';
+ // FIXME_OPENMPTARGET
+ OpenMPTarget_abort(error_message.str().c_str());
+ }
+};
+
+template <class FunctorType, class ReducerType, class PointerType,
+ class ValueType, class... PolicyArgs>
+struct ParallelReduceSpecialize<FunctorType, Kokkos::RangePolicy<PolicyArgs...>,
+ ReducerType, PointerType, ValueType> {
+ using PolicyType = Kokkos::RangePolicy<PolicyArgs...>;
+ using ReducerTypeFwd =
+ std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
+ FunctorType, ReducerType>;
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ PolicyType, ReducerTypeFwd, ValueType>;
+ using ReferenceType = typename Analysis::reference_type;
+
+ using ParReduceCopy = ParallelReduceCopy<PointerType>;
+
+ using FunctorAdapter =
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, PolicyType>;
+
+ static void execute_reducer(const FunctorAdapter& f, const PolicyType& p,
+ PointerType result_ptr, bool ptr_on_device) {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget RangePolicy "
+ "parallel_reduce:reducer");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget RangePolicy "
+ "parallel_reduce:reducer");
+ const auto begin = p.begin();
+ const auto end = p.end();
+
+ ValueType result;
+ OpenMPTargetReducerWrapper<ReducerType>::init(result);
+
+ // Initialize and copy back the result even if it is a zero length
+ // reduction.
+ if (end <= begin) {
+ ParReduceCopy::memcpy_result(result_ptr, &result, sizeof(ValueType),
+ ptr_on_device);
+ return;
+ }
+
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for map(to : f) \
+ reduction(custom : result)
+ for (auto i = begin; i < end; ++i) {
+ f(i, result);
+ }
+
+ ParReduceCopy::memcpy_result(result_ptr, &result, sizeof(ValueType),
+ ptr_on_device);
+ }
+
+ template <int NumReductions>
+ static void execute_array(const FunctorAdapter& f, const PolicyType& p,
+ PointerType result_ptr, bool ptr_on_device) {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget RangePolicy "
+ "parallel_reduce:array_reduction");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget RangePolicy "
+ "parallel_reduce:array_reduction");
+ const auto begin = p.begin();
+ const auto end = p.end();
+
+ // Enter the loop if the reduction is on a scalar type.
+ if constexpr (NumReductions == 1) {
+ ValueType result = ValueType();
+
+ // Initialize and copy back the result even if it is a zero length
+ // reduction.
+ if (end <= begin) {
+ ParReduceCopy::memcpy_result(result_ptr, &result, sizeof(ValueType),
+ ptr_on_device);
+ return;
+ }
+
+ // Case where reduction is on a native data type.
+ if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp target teams distribute parallel for map(to : f) \
+ reduction(+ : result)
+ for (auto i = begin; i < end; ++i) f(i, result);
+ } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+#pragma omp target teams distribute parallel for map(to : f) \
+ reduction(custom : result)
+ for (auto i = begin; i < end; ++i) f(i, result);
+ }
+
+ ParReduceCopy::memcpy_result(result_ptr, &result, sizeof(ValueType),
+ ptr_on_device);
+ } else {
+ ValueType result[NumReductions] = {};
+
+ // Initialize and copy back the result even if it is a zero length
+ // reduction.
+ if (end <= begin) {
+ ParReduceCopy::memcpy_result(result_ptr, result,
+ NumReductions * sizeof(ValueType),
+ ptr_on_device);
+ return;
+ }
+#pragma omp target teams distribute parallel for map(to : f) \
+ reduction(+ : result[ : NumReductions])
+ for (auto i = begin; i < end; ++i) {
+ f(i, result);
+ }
+
+ ParReduceCopy::memcpy_result(
+ result_ptr, result, NumReductions * sizeof(ValueType), ptr_on_device);
+ }
+ }
+
+ static void execute_init_join(const FunctorAdapter& f, const PolicyType& p,
+ PointerType ptr, const bool ptr_on_device) {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget RangePolicy "
+ "parallel_reduce:init_join");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget RangePolicy "
+ "parallel_reduce:init_join");
+ const auto begin = p.begin();
+ const auto end = p.end();
+
+ using FunctorAnalysis =
+ Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+ FunctorType, ValueType>;
+
+ // Initialize the result pointer.
+
+ const auto size = end - begin;
+
+ // FIXME_OPENMPTARGET: The team size and concurrency are currently
+ // based on NVIDIA-V100 and should be modifid to be based on the
+ // architecture in the future.
+ const int max_team_threads = 32;
+ const int max_teams =
+ p.space().impl_internal_space_instance()->concurrency() /
+ max_team_threads;
+ // Number of elements in the reduction
+ const auto value_count = FunctorAnalysis::value_count(f.get_functor());
+
+ // Allocate scratch per active thread. Achieved by setting the first
+ // parameter of `resize_scratch=1`.
+ p.space().impl_internal_space_instance()->resize_scratch(
+ 1, 0, value_count * sizeof(ValueType),
+ std::numeric_limits<int64_t>::max());
+ ValueType* scratch_ptr = static_cast<ValueType*>(
+ p.space().impl_internal_space_instance()->get_scratch_ptr());
+
+ typename FunctorAnalysis::Reducer final_reducer(f.get_functor());
+
+ if (end <= begin) {
+#pragma omp target map(to : final_reducer) is_device_ptr(scratch_ptr)
+ {
+ // If there is no work to be done, copy back the initialized values and
+ // exit.
+ final_reducer.init(scratch_ptr);
+ final_reducer.final(scratch_ptr);
+ }
+ if (0 < value_count) {
+ if (!ptr_on_device)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_initial_device(), omp_get_default_device()));
+ else
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_default_device(), omp_get_default_device()));
+ }
+
+ return;
+ }
+
+#pragma omp target teams num_teams(max_teams) thread_limit(max_team_threads) \
+ map(to : final_reducer) is_device_ptr(scratch_ptr)
+ {
+#pragma omp parallel
+ {
+ const int team_num = omp_get_team_num();
+ const int num_teams = omp_get_num_teams();
+ const auto chunk_size = size / num_teams;
+ const auto team_begin = begin + team_num * chunk_size;
+ const auto team_end =
+ (team_num == num_teams - 1) ? end : (team_begin + chunk_size);
+ ValueType* team_scratch =
+ scratch_ptr + team_num * max_team_threads * value_count;
+ ReferenceType result = final_reducer.init(
+ &team_scratch[omp_get_thread_num() * value_count]);
+
+ // Accumulate partial results in thread specific storage.
+#pragma omp for simd
+ for (auto i = team_begin; i < team_end; ++i) {
+ f(i, result);
+ }
+
+ // Reduce all paritial results within a team.
+ const int team_size = max_team_threads;
+ int tree_neighbor_offset = 1;
+ do {
+#pragma omp for simd
+ for (int i = 0; i < team_size - tree_neighbor_offset;
+ i += 2 * tree_neighbor_offset) {
+ const int neighbor = i + tree_neighbor_offset;
+ final_reducer.join(&team_scratch[i * value_count],
+ &team_scratch[neighbor * value_count]);
+ }
+ tree_neighbor_offset *= 2;
+ } while (tree_neighbor_offset < team_size);
+ } // end parallel
+ } // end target
+
+ int tree_neighbor_offset = 1;
+ do {
+#pragma omp target teams distribute parallel for simd map(to : f) \
+ is_device_ptr(scratch_ptr)
+ for (int i = 0; i < max_teams - tree_neighbor_offset;
+ i += 2 * tree_neighbor_offset) {
+ ValueType* team_scratch = scratch_ptr;
+ const int team_offset = max_team_threads * value_count;
+ final_reducer.join(
+ &team_scratch[i * team_offset],
+ &team_scratch[(i + tree_neighbor_offset) * team_offset]);
+
+ // If `final` is provided by the functor.
+ // Do the final only once at the end.
+ if (tree_neighbor_offset * 2 >= max_teams && omp_get_team_num() == 0 &&
+ omp_get_thread_num() == 0) {
+ final_reducer.final(scratch_ptr);
+ }
+ }
+ tree_neighbor_offset *= 2;
+ } while (tree_neighbor_offset < max_teams);
+
+ // If the result view is on the host, copy back the values via memcpy.
+ if (0 < value_count) {
+ if (!ptr_on_device)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_initial_device(), omp_get_default_device()));
+ else
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_default_device(), omp_get_default_device()));
+ }
+ }
+};
+
+template <class FunctorType, class ReducerType, class PointerType,
+ class ValueType, class... PolicyArgs>
+struct ParallelReduceSpecialize<FunctorType, TeamPolicyInternal<PolicyArgs...>,
+ ReducerType, PointerType, ValueType> {
+ using PolicyType = TeamPolicyInternal<PolicyArgs...>;
+ using ReducerTypeFwd =
+ std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
+ FunctorType, ReducerType>;
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ PolicyType, ReducerTypeFwd, ValueType>;
+
+ using ReferenceType = typename Analysis::reference_type;
+
+ using ParReduceCopy = ParallelReduceCopy<PointerType>;
+
+ using FunctorAdapter =
+ Kokkos::Experimental::Impl::FunctorAdapter<FunctorType, PolicyType>;
+
+ static void execute_reducer(const FunctorAdapter& f, const PolicyType& p,
+ PointerType result_ptr, bool ptr_on_device) {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget TeamPolicy "
+ "parallel_reduce:reducer");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget TeamPolicy "
+ "parallel_reduce:reducer");
+
+ const int league_size = p.league_size();
+ const int team_size = p.team_size();
+ const int vector_length = p.impl_vector_length();
+
+ const size_t shmem_size_L0 = p.scratch_size(0, team_size);
+ const size_t shmem_size_L1 = p.scratch_size(1, team_size);
+ p.space().impl_internal_space_instance()->resize_scratch(
+ PolicyType::member_type::TEAM_REDUCE_SIZE, shmem_size_L0, shmem_size_L1,
+ league_size);
+ void* scratch_ptr =
+ p.space().impl_internal_space_instance()->get_scratch_ptr();
+
+ ValueType result = ValueType();
+
+ // Maximum active teams possible.
+ // FIXME_OPENMPTARGET: Cray compiler did not yet implement
+ // omp_get_max_teams.
+#if !defined(KOKKOS_COMPILER_CRAY_LLVM)
+ int max_active_teams = omp_get_max_teams();
+#else
+ int max_active_teams =
+ std::min(p.space().concurrency() / team_size, league_size);
+#endif
+
+ // If the league size is <=0, do not launch the kernel.
+ if (max_active_teams <= 0) return;
+
+#pragma omp declare reduction(custom \
+:ValueType : OpenMPTargetReducerWrapper<ReducerType>::join(omp_out, omp_in)) \
+ initializer(OpenMPTargetReducerWrapper<ReducerType>::init(omp_priv))
+
+#if !defined(KOKKOS_IMPL_OPENMPTARGET_HIERARCHICAL_INTEL_GPU)
+ KOKKOS_IMPL_OMPTARGET_PRAGMA(
+ teams num_teams(max_active_teams) thread_limit(team_size)
+ firstprivate(f) is_device_ptr(scratch_ptr) reduction(custom
+ : result)
+ KOKKOS_IMPL_OMPX_DYN_CGROUP_MEM(shmem_size_L0))
+#pragma omp parallel reduction(custom : result)
+ {
+ if (omp_get_num_teams() > max_active_teams)
+ Kokkos::abort("`omp_set_num_teams` call was not respected.\n");
+
+ const int blockIdx = omp_get_team_num();
+ const int gridDim = omp_get_num_teams();
+
+ // Guarantee that the compilers respect the `num_teams` clause
+ for (int league_id = blockIdx; league_id < league_size;
+ league_id += gridDim) {
+ typename PolicyType::member_type team(
+ league_id, league_size, team_size, vector_length, scratch_ptr,
+ blockIdx, shmem_size_L0, shmem_size_L1);
+ f(team, result);
+ }
+ }
+#else
+#pragma omp target teams distribute firstprivate(f) is_device_ptr(scratch_ptr) \
+ num_teams(max_active_teams) thread_limit(team_size) \
+ reduction(custom : result)
+ for (int i = 0; i < league_size; i++) {
+#pragma omp parallel reduction(custom : result)
+ {
+ if (omp_get_num_teams() > max_active_teams)
+ Kokkos::abort("`omp_set_num_teams` call was not respected.\n");
+
+ typename PolicyType::member_type team(i, league_size, team_size,
+ vector_length, scratch_ptr, i,
+ shmem_size_L0, shmem_size_L1);
+ f(team, result);
+ }
+ }
+#endif
+
+ // Copy results back to device if `parallel_reduce` is on a device view.
+ ParReduceCopy::memcpy_result(result_ptr, &result, sizeof(ValueType),
+ ptr_on_device);
+ }
+
+ template <int NumReductions>
+ static void execute_array(const FunctorAdapter& f, const PolicyType& p,
+ PointerType result_ptr, bool ptr_on_device) {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget TeamPolicy "
+ "parallel_reduce:array_reduction");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget TeamPolicy "
+ "parallel_reduce:array_reduction");
+
+ const int league_size = p.league_size();
+ const int team_size = p.team_size();
+ const int vector_length = p.impl_vector_length();
+
+ const size_t shmem_size_L0 = p.scratch_size(0, team_size);
+ const size_t shmem_size_L1 = p.scratch_size(1, team_size);
+ p.space().impl_internal_space_instance()->resize_scratch(
+ PolicyType::member_type::TEAM_REDUCE_SIZE, shmem_size_L0, shmem_size_L1,
+ league_size);
+ void* scratch_ptr =
+ p.space().impl_internal_space_instance()->get_scratch_ptr();
+
+ // Maximum active teams possible.
+ // FIXME_OPENMPTARGET: Cray compiler did not yet implement
+ // omp_get_max_teams.
+#if !defined(KOKKOS_COMPILER_CRAY_LLVM)
+ int max_active_teams = omp_get_max_teams();
+#else
+ int max_active_teams =
+ std::min(p.space().concurrency() / team_size, league_size);
+#endif
+
+ // If the league size is <=0, do not launch the kernel.
+ if (max_active_teams <= 0) return;
+
+ // Case where the number of reduction items is 1.
+ if constexpr (NumReductions == 1) {
+ ValueType result = ValueType();
+
+ // Case where reduction is on a native data type.
+ if constexpr (std::is_arithmetic<ValueType>::value) {
+ // Use scratch memory extensions to request dynamic shared memory for
+ // the right compiler/architecture combination.
+ KOKKOS_IMPL_OMPTARGET_PRAGMA(teams num_teams(max_active_teams) thread_limit(team_size) map(to: f) \
+ is_device_ptr(scratch_ptr) reduction(+: result) \
+ KOKKOS_IMPL_OMPX_DYN_CGROUP_MEM(shmem_size_L0))
+#pragma omp parallel reduction(+ : result)
+ {
+ if (omp_get_num_teams() > max_active_teams)
+ Kokkos::abort("`omp_set_num_teams` call was not respected.\n");
+
+ const int blockIdx = omp_get_team_num();
+ const int gridDim = omp_get_num_teams();
+
+ // Guarantee that the compilers respect the `num_teams` clause
+ for (int league_id = blockIdx; league_id < league_size;
+ league_id += gridDim) {
+ typename PolicyType::member_type team(
+ league_id, league_size, team_size, vector_length, scratch_ptr,
+ blockIdx, shmem_size_L0, shmem_size_L1);
+ f(team, result);
+ }
+ }
+ } else {
+ // Case where the reduction is on a non-native data type.
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+#pragma omp target teams num_teams(max_active_teams) thread_limit(team_size) \
+ map(to : f) is_device_ptr(scratch_ptr) reduction(custom : result)
+#pragma omp parallel reduction(custom : result)
+ {
+ if (omp_get_num_teams() > max_active_teams)
+ Kokkos::abort("`omp_set_num_teams` call was not respected.\n");
+
+ const int blockIdx = omp_get_team_num();
+ const int gridDim = omp_get_num_teams();
+
+ // Guarantee that the compilers respect the `num_teams` clause
+ for (int league_id = blockIdx; league_id < league_size;
+ league_id += gridDim) {
+ typename PolicyType::member_type team(
+ league_id, league_size, team_size, vector_length, scratch_ptr,
+ blockIdx, shmem_size_L0, shmem_size_L1);
+ f(team, result);
+ }
+ }
+ }
+
+ // Copy results back to device if `parallel_reduce` is on a device view.
+ ParReduceCopy::memcpy_result(result_ptr, &result, sizeof(ValueType),
+ ptr_on_device);
+ } else {
+ ValueType result[NumReductions] = {};
+ // Case where the reduction is on an array.
+#pragma omp target teams num_teams(max_active_teams) thread_limit(team_size) \
+ map(to : f) is_device_ptr(scratch_ptr) \
+ reduction(+ : result[ : NumReductions])
+#pragma omp parallel reduction(+ : result[ : NumReductions])
+ {
+ if (omp_get_num_teams() > max_active_teams)
+ Kokkos::abort("`omp_set_num_teams` call was not respected.\n");
+
+ const int blockIdx = omp_get_team_num();
+ const int gridDim = omp_get_num_teams();
+
+ // Guarantee that the compilers respect the `num_teams` clause
+ for (int league_id = blockIdx; league_id < league_size;
+ league_id += gridDim) {
+ typename PolicyType::member_type team(
+ league_id, league_size, team_size, vector_length, scratch_ptr,
+ blockIdx, shmem_size_L0, shmem_size_L1);
+ f(team, result);
+ }
+ }
+
+ // Copy results back to device if `parallel_reduce` is on a device view.
+ ParReduceCopy::memcpy_result(
+ result_ptr, result, NumReductions * sizeof(ValueType), ptr_on_device);
+ }
+ }
+
+ // FIXME_OPENMPTARGET : This routine is a copy from `parallel_reduce` over
+ // RangePolicy. Need a new implementation.
+ static void execute_init_join(const FunctorAdapter& f, const PolicyType& p,
+ PointerType ptr, const bool ptr_on_device) {
+ Experimental::Impl::OpenMPTargetInternal::verify_is_process(
+ "Kokkos::Experimental::OpenMPTarget TeamPolicy "
+ "parallel_reduce:init_join ");
+ Experimental::Impl::OpenMPTargetInternal::verify_initialized(
+ "Kokkos::Experimental::OpenMPTarget TeamPolicy "
+ "parallel_reduce:init_join");
+ using FunctorAnalysis =
+ Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+ FunctorType, ValueType>;
+
+ const int league_size = p.league_size();
+ const int team_size = p.team_size();
+ const int vector_length = p.impl_vector_length();
+
+ auto begin = 0;
+ auto end = league_size * team_size + team_size * vector_length;
+
+ const size_t shmem_size_L0 = p.scratch_size(0, team_size);
+ const size_t shmem_size_L1 = p.scratch_size(1, team_size);
+
+ // FIXME_OPENMPTARGET: This would oversubscribe scratch memory since we are
+ // already using the available scratch memory to create temporaries for each
+ // thread.
+ if ((shmem_size_L0 + shmem_size_L1) > 0) {
+ Kokkos::abort(
+ "OpenMPTarget: Scratch memory is not supported in `parallel_reduce` "
+ "over functors with init/join.");
+ }
+
+ const auto nteams = league_size;
+
+ // Number of elements in the reduction
+ const auto value_count = FunctorAnalysis::value_count(f.get_functor());
+
+ // Allocate scratch per active thread.
+ p.space().impl_internal_space_instance()->resize_scratch(
+ 1, 0, value_count * sizeof(ValueType), league_size);
+ void* scratch_ptr =
+ p.space().impl_internal_space_instance()->get_scratch_ptr();
+ typename FunctorAnalysis::Reducer final_reducer(f.get_functor());
+
+ if (end <= begin) {
+// If there is no work to be done, copy back the initialized values and
+// exit.
+#pragma omp target map(to : final_reducer) is_device_ptr(scratch_ptr)
+ {
+ final_reducer.init(scratch_ptr);
+ final_reducer.final(scratch_ptr);
+ }
+
+ if (0 < value_count) {
+ if (!ptr_on_device)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_initial_device(), omp_get_default_device()));
+ else
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_default_device(), omp_get_default_device()));
+ }
+
+ return;
+ }
+ // Use scratch memory extensions to request dynamic shared memory for the
+ // right compiler/architecture combination.
+ KOKKOS_IMPL_OMPTARGET_PRAGMA(
+ teams num_teams(nteams) thread_limit(team_size) map(to
+ : f)
+ is_device_ptr(scratch_ptr)
+ KOKKOS_IMPL_OMPX_DYN_CGROUP_MEM(shmem_size_L0)) {
+#pragma omp parallel
+ {
+ const int team_num = omp_get_team_num();
+ const int num_teams = omp_get_num_teams();
+ ValueType* team_scratch = static_cast<ValueType*>(scratch_ptr) +
+ team_num * team_size * value_count;
+ ReferenceType result = final_reducer.init(&team_scratch[0]);
+
+ for (int league_id = team_num; league_id < league_size;
+ league_id += num_teams) {
+ typename PolicyType::member_type team(
+ league_id, league_size, team_size, vector_length, scratch_ptr,
+ team_num, shmem_size_L0, shmem_size_L1);
+ f(team, result);
+ }
+ } // end parallel
+ } // end target
+
+ int tree_neighbor_offset = 1;
+ do {
+#pragma omp target teams distribute parallel for simd firstprivate( \
+ final_reducer) is_device_ptr(scratch_ptr)
+ for (int i = 0; i < nteams - tree_neighbor_offset;
+ i += 2 * tree_neighbor_offset) {
+ ValueType* team_scratch = static_cast<ValueType*>(scratch_ptr);
+ const int team_offset = team_size * value_count;
+ final_reducer.join(
+ &team_scratch[i * team_offset],
+ &team_scratch[(i + tree_neighbor_offset) * team_offset]);
+
+ // If `final` is provided by the functor.
+ // Do the final only once at the end.
+ if (tree_neighbor_offset * 2 >= nteams && omp_get_team_num() == 0 &&
+ omp_get_thread_num() == 0) {
+ final_reducer.final(scratch_ptr);
+ }
+ }
+ tree_neighbor_offset *= 2;
+ } while (tree_neighbor_offset < nteams);
+
+ // If the result view is on the host, copy back the values via memcpy.
+ if (0 < value_count) {
+ if (!ptr_on_device)
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_initial_device(), omp_get_default_device()));
+ else
+ KOKKOS_IMPL_OMPT_SAFE_CALL(omp_target_memcpy(
+ ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+ omp_get_default_device(), omp_get_default_device()));
+ }
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGETREDUCER_HPP
+#define KOKKOS_OPENMPTARGETREDUCER_HPP
+
+#include <impl/Kokkos_Traits.hpp>
+
+#include <Kokkos_Atomic.hpp>
+#include "Kokkos_OpenMPTarget_Abort.hpp"
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Reducer>
+struct OpenMPTargetReducerWrapper {
+ using value_type = typename Reducer::value_type;
+
+ // Using a generic unknown Reducer for the OpenMPTarget backend is not
+ // implemented.
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type&, const value_type&) = delete;
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type&) = delete;
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Sum<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) { dest += src; }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::sum();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Prod<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) { dest *= src; }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::prod();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Min<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src < dest) dest = src;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::min();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Max<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src > dest) dest = src;
+ }
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::max();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<LAnd<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest = dest && src;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::land();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<LOr<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ using result_view_type = Kokkos::View<value_type, Space>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest = dest || src;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::lor();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<BAnd<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest = dest & src;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::band();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<BOr<Scalar, Space>> {
+ public:
+ // Required
+ using value_type = std::remove_cv_t<Scalar>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest = dest | src;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val = reduction_identity<value_type>::bor();
+ }
+};
+
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinLoc<Scalar, Index, Space>> {
+ private:
+ using scalar_type = std::remove_cv_t<Scalar>;
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = ValLocScalar<scalar_type, index_type>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src.val < dest.val)
+ dest = src;
+ else if (src.val == dest.val &&
+ dest.loc == reduction_identity<index_type>::min()) {
+ dest.loc = src.loc;
+ }
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.val = reduction_identity<scalar_type>::min();
+ val.loc = reduction_identity<index_type>::min();
+ }
+};
+
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MaxLoc<Scalar, Index, Space>> {
+ private:
+ using scalar_type = std::remove_cv_t<Scalar>;
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = ValLocScalar<scalar_type, index_type>;
+
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src.val > dest.val)
+ dest = src;
+ else if (src.val == dest.val &&
+ dest.loc == reduction_identity<index_type>::min()) {
+ dest.loc = src.loc;
+ }
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.val = reduction_identity<scalar_type>::max();
+ val.loc = reduction_identity<index_type>::min();
+ }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<MinMax<Scalar, Space>> {
+ private:
+ using scalar_type = std::remove_cv_t<Scalar>;
+
+ public:
+ // Required
+ using value_type = MinMaxScalar<scalar_type>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src.min_val < dest.min_val) {
+ dest.min_val = src.min_val;
+ }
+ if (src.max_val > dest.max_val) {
+ dest.max_val = src.max_val;
+ }
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.max_val = reduction_identity<scalar_type>::max();
+ val.min_val = reduction_identity<scalar_type>::min();
+ }
+};
+
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinMaxLoc<Scalar, Index, Space>> {
+ private:
+ using scalar_type = std::remove_cv_t<Scalar>;
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = MinMaxLocScalar<scalar_type, index_type>;
+
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src.min_val < dest.min_val) {
+ dest.min_val = src.min_val;
+ dest.min_loc = src.min_loc;
+ } else if (dest.min_val == src.min_val &&
+ dest.min_loc == reduction_identity<index_type>::min()) {
+ dest.min_loc = src.min_loc;
+ }
+ if (src.max_val > dest.max_val) {
+ dest.max_val = src.max_val;
+ dest.max_loc = src.max_loc;
+ } else if (dest.max_val == src.max_val &&
+ dest.max_loc == reduction_identity<index_type>::min()) {
+ dest.max_loc = src.max_loc;
+ }
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.max_val = reduction_identity<scalar_type>::max();
+ val.min_val = reduction_identity<scalar_type>::min();
+ val.max_loc = reduction_identity<index_type>::min();
+ val.min_loc = reduction_identity<index_type>::min();
+ }
+};
+
+//
+// specialize for MaxFirstLoc
+//
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MaxFirstLoc<Scalar, Index, Space>> {
+ private:
+ using scalar_type = std::remove_cv_t<Scalar>;
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = ValLocScalar<scalar_type, index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (dest.val < src.val) {
+ dest = src;
+ } else if (!(src.val < dest.val)) {
+ dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+ }
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.val = reduction_identity<scalar_type>::max();
+ val.loc = reduction_identity<index_type>::min();
+ }
+#pragma omp end declare target
+};
+
+//
+// specialize for MinFirstLoc
+//
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinFirstLoc<Scalar, Index, Space>> {
+ private:
+ using scalar_type = std::remove_cv_t<Scalar>;
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = ValLocScalar<scalar_type, index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src.val < dest.val) {
+ dest = src;
+ } else if (!(dest.val < src.val)) {
+ dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+ }
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.val = reduction_identity<scalar_type>::min();
+ val.loc = reduction_identity<index_type>::min();
+ }
+#pragma omp end declare target
+};
+
+//
+// specialize for MinMaxFirstLastLoc
+//
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinMaxFirstLastLoc<Scalar, Index, Space>> {
+ private:
+ using scalar_type = std::remove_cv_t<Scalar>;
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = MinMaxLocScalar<scalar_type, index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ if (src.min_val < dest.min_val) {
+ dest.min_val = src.min_val;
+ dest.min_loc = src.min_loc;
+ } else if (!(dest.min_val < src.min_val)) {
+ dest.min_loc = (src.min_loc < dest.min_loc) ? src.min_loc : dest.min_loc;
+ }
+
+ if (dest.max_val < src.max_val) {
+ dest.max_val = src.max_val;
+ dest.max_loc = src.max_loc;
+ } else if (!(src.max_val < dest.max_val)) {
+ dest.max_loc = (src.max_loc > dest.max_loc) ? src.max_loc : dest.max_loc;
+ }
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.max_val = reduction_identity<scalar_type>::max();
+ val.min_val = reduction_identity<scalar_type>::min();
+ val.max_loc = reduction_identity<index_type>::max();
+ val.min_loc = reduction_identity<index_type>::min();
+ }
+#pragma omp end declare target
+};
+
+//
+// specialize for FirstLoc
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<FirstLoc<Index, Space>> {
+ private:
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = FirstLocScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest.min_loc_true = (src.min_loc_true < dest.min_loc_true)
+ ? src.min_loc_true
+ : dest.min_loc_true;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.min_loc_true = reduction_identity<index_type>::min();
+ }
+#pragma omp end declare target
+};
+
+//
+// specialize for LastLoc
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<LastLoc<Index, Space>> {
+ private:
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = LastLocScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest.max_loc_true = (src.max_loc_true > dest.max_loc_true)
+ ? src.max_loc_true
+ : dest.max_loc_true;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.max_loc_true = reduction_identity<index_type>::max();
+ }
+#pragma omp end declare target
+};
+
+//
+// specialize for StdIsPartitioned
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<StdIsPartitioned<Index, Space>> {
+ private:
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = StdIsPartScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest.max_loc_true = (dest.max_loc_true < src.max_loc_true)
+ ? src.max_loc_true
+ : dest.max_loc_true;
+
+ dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+ ? dest.min_loc_false
+ : src.min_loc_false;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.max_loc_true = ::Kokkos::reduction_identity<index_type>::max();
+ val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
+ }
+#pragma omp end declare target
+};
+
+//
+// specialize for StdPartitionPoint
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<StdPartitionPoint<Index, Space>> {
+ private:
+ using index_type = std::remove_cv_t<Index>;
+
+ public:
+ // Required
+ using value_type = StdPartPointScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+ // Required
+ KOKKOS_INLINE_FUNCTION
+ static void join(value_type& dest, const value_type& src) {
+ dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+ ? dest.min_loc_false
+ : src.min_loc_false;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ static void init(value_type& val) {
+ val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
+ }
+#pragma omp end declare target
+};
+
+/*
+template<class ReducerType>
+class OpenMPTargetReducerWrapper {
+ public:
+ const ReducerType& reducer;
+ using value_type = typename ReducerType::value_type;
+ value_type& value;
+
+ KOKKOS_INLINE_FUNCTION
+ void join(const value_type& upd) {
+ reducer.join(value,upd);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ void init(const value_type& upd) {
+ reducer.init(value,upd);
+ }
+};*/
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_OPENMPTARGET_UNIQUE_TOKEN_HPP
#define KOKKOS_OPENMPTARGET_UNIQUE_TOKEN_HPP
#include <Kokkos_Macros.hpp>
#ifdef KOKKOS_ENABLE_OPENMPTARGET
-#include <Kokkos_OpenMPTargetSpace.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTargetSpace.hpp>
#include <Kokkos_UniqueToken.hpp>
#include <impl/Kokkos_SharedAlloc.hpp>
#include <impl/Kokkos_ConcurrentBitset.hpp>
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Concepts.hpp>
#include <SYCL/Kokkos_SYCL_Instance.hpp>
-#include <Kokkos_SYCL.hpp>
+#include <SYCL/Kokkos_SYCL.hpp>
#include <Kokkos_HostSpace.hpp>
-#include <Kokkos_Serial.hpp>
#include <Kokkos_Core.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_DeviceManagement.hpp>
} // namespace
namespace Kokkos {
-namespace Experimental {
SYCL::SYCL()
: m_space_instance(&Impl::SYCLInternal::singleton(),
[](Impl::SYCLInternal*) {}) {
ptr->finalize();
delete ptr;
}) {
+ // In principle could be guarded with
+ // #ifdef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ // but we chose to require user-provided queues to be in-order
+ // unconditionally so that code downstream does not break
+ // when the backend setting changes.
+ if (!stream.is_in_order())
+ Kokkos::abort("User provided sycl::queues must be in-order!");
Impl::SYCLInternal::singleton().verify_is_initialized(
"SYCL instance constructor");
m_space_instance->initialize(stream);
}
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
int SYCL::concurrency() {
return Impl::SYCLInternal::singleton().m_maxConcurrency;
}
+#else
+int SYCL::concurrency() const { return m_space_instance->m_maxConcurrency; }
+#endif
const char* SYCL::name() { return "SYCL"; }
void SYCL::impl_finalize() { Impl::SYCLInternal::singleton().finalize(); }
void SYCL::print_configuration(std::ostream& os, bool verbose) const {
- os << "Devices:\n";
- os << " KOKKOS_ENABLE_SYCL: yes\n";
-
os << "\nRuntime Configuration:\n";
- os << "macro KOKKOS_ENABLE_SYCL : defined\n";
- if (verbose)
+#ifdef KOKKOS_ENABLE_ONEDPL
+ os << "macro KOKKOS_ENABLE_ONEDPL : defined\n";
+#else
+ os << "macro KOKKOS_ENABLE_ONEDPL : undefined\n";
+#endif
+#ifdef KOKKOS_IMPL_SYCL_DEVICE_GLOBAL_SUPPORTED
+ os << "macro KOKKOS_IMPL_SYCL_DEVICE_GLOBAL_SUPPORTED : defined\n";
+#else
+ os << "macro KOKKOS_IMPL_SYCL_DEVICE_GLOBAL_SUPPORTED : undefined\n";
+#endif
+#ifdef KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE
+ os << "macro KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE : defined\n";
+#else
+ os << "macro KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE : undefined\n";
+#endif
+#ifdef SYCL_EXT_ONEAPI_DEVICE_GLOBAL
+ os << "macro SYCL_EXT_ONEAPI_DEVICE_GLOBAL : defined\n";
+#else
+ os << "macro SYCL_EXT_ONEAPI_DEVICE_GLOBAL : undefined\n";
+#endif
+#ifdef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ os << "macro KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES : defined\n";
+#else
+ os << "macro KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES : undefined\n";
+#endif
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ os << "macro SYCL_EXT_ONEAPI_GRAPH : defined\n";
+#else
+ os << "macro SYCL_EXT_ONEAPI_GRAPH : undefined\n";
+#endif
+#ifdef SYCL_EXT_INTEL_QUEUE_IMMEDIATE_COMMAND_LIST
+ if (sycl_queue()
+ .has_property<
+ sycl::ext::intel::property::queue::immediate_command_list>())
+ os << "Immediate command lists enforced\n";
+ else if (sycl_queue()
+ .has_property<sycl::ext::intel::property::queue::
+ no_immediate_command_list>())
+ os << "Standard command queue enforced\n";
+ else
+#endif
+ {
+ os << "Immediate command lists and standard command queue allowed.\n";
+ if (const char* environment_setting =
+ std::getenv("SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS"))
+ os << "SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS="
+ << environment_setting << " takes precedence.\n";
+ else
+ os << "SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS not defined.\n";
+ }
+
+ int counter = 0;
+ int active_device = Kokkos::device_id();
+ std::cout << "\nAvailable devices: \n";
+ std::vector<sycl::device> devices = Impl::get_sycl_devices();
+ for (const auto& device : devices) {
+ std::string device_type;
+ switch (device.get_info<sycl::info::device::device_type>()) {
+ case sycl::info::device_type::cpu: device_type = "cpu"; break;
+ case sycl::info::device_type::gpu: device_type = "gpu"; break;
+ case sycl::info::device_type::accelerator:
+ device_type = "accelerator";
+ break;
+ case sycl::info::device_type::custom: device_type = "custom"; break;
+ case sycl::info::device_type::automatic: device_type = "automatic"; break;
+ case sycl::info::device_type::host: device_type = "host"; break;
+ case sycl::info::device_type::all: device_type = "all"; break;
+ }
+ os << "[" << device.get_backend() << "]:" << device_type << ':' << counter
+ << "] " << device.get_info<sycl::info::device::name>();
+ if (counter == active_device) os << " : Selected";
+ os << '\n';
+ ++counter;
+ }
+
+ if (verbose) {
+ os << '\n';
SYCL::impl_sycl_info(os, m_space_instance->m_queue->get_device());
+ }
}
void SYCL::fence(const std::string& name) const {
}
void SYCL::impl_static_fence(const std::string& name) {
- Kokkos::Tools::Experimental::Impl::profile_fence_event<
- Kokkos::Experimental::SYCL>(
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::SYCL>(
name,
Kokkos::Tools::Experimental::SpecialSynchronizationCases::
GlobalDeviceSynchronization,
});
}
-int SYCL::sycl_device() const {
- return impl_internal_space_instance()->m_syclDev;
-}
-
void SYCL::impl_initialize(InitializationSettings const& settings) {
- std::vector<sycl::device> gpu_devices =
- sycl::device::get_devices(sycl::info::device_type::gpu);
- // If the device id is not specified and there are no GPUs, sidestep Kokkos
- // device selection and use whatever is available (if no GPU architecture is
- // specified).
-#if !defined(KOKKOS_ARCH_INTEL_GPU) && !defined(KOKKOS_ARCH_KEPLER) && \
- !defined(KOKKOS_ARCH_MAXWELL) && !defined(KOKKOS_ARCH_PASCAL) && \
- !defined(KOKKOS_ARCH_VOLTA) && !defined(KOKKOS_ARCH_TURING75) && \
- !defined(KOKKOS_ARCH_AMPERE)
- if (!settings.has_device_id() && gpu_devices.empty()) {
- Impl::SYCLInternal::singleton().initialize(sycl::device());
- return;
- }
-#endif
- using Kokkos::Impl::get_gpu;
- Impl::SYCLInternal::singleton().initialize(gpu_devices[get_gpu(settings)]);
+ const auto& visible_devices = ::Kokkos::Impl::get_visible_devices();
+ const auto id =
+ ::Kokkos::Impl::get_gpu(settings).value_or(visible_devices[0]);
+ std::vector<sycl::device> sycl_devices = Impl::get_sycl_devices();
+ Impl::SYCLInternal::singleton().initialize(sycl_devices[id]);
+ Impl::SYCLInternal::m_syclDev = id;
}
std::ostream& SYCL::impl_sycl_info(std::ostream& os,
using namespace sycl::info;
return os << "Name: " << device.get_info<device::name>()
<< "\nDriver Version: " << device.get_info<device::driver_version>()
- << "\nIs Host: " << device.is_host()
<< "\nIs CPU: " << device.is_cpu()
<< "\nIs GPU: " << device.is_gpu()
<< "\nIs Accelerator: " << device.is_accelerator()
<< "\nNative Vector Width Half: "
<< device.get_info<device::native_vector_width_half>()
<< "\nAddress Bits: " << device.get_info<device::address_bits>()
- << "\nImage Support: " << device.get_info<device::image_support>()
<< "\nMax Mem Alloc Size: "
<< device.get_info<device::max_mem_alloc_size>()
<< "\nMax Read Image Args: "
<< device.get_info<device::image3d_max_depth>()
<< "\nImage Max Buffer Size: "
<< device.get_info<device::image_max_buffer_size>()
- << "\nImage Max Array Size: "
- << device.get_info<device::image_max_array_size>()
<< "\nMax Samplers: " << device.get_info<device::max_samplers>()
<< "\nMax Parameter Size: "
<< device.get_info<device::max_parameter_size>()
<< "\nLocal Mem Size: " << device.get_info<device::local_mem_size>()
<< "\nError Correction Support: "
<< device.get_info<device::error_correction_support>()
- << "\nHost Unified Memory: "
- << device.get_info<device::host_unified_memory>()
<< "\nProfiling Timer Resolution: "
<< device.get_info<device::profiling_timer_resolution>()
- << "\nIs Endian Little: "
- << device.get_info<device::is_endian_little>()
<< "\nIs Available: " << device.get_info<device::is_available>()
- << "\nIs Compiler Available: "
- << device.get_info<device::is_compiler_available>()
- << "\nIs Linker Available: "
- << device.get_info<device::is_linker_available>()
- << "\nQueue Profiling: "
- << device.get_info<device::queue_profiling>()
<< "\nVendor: " << device.get_info<device::vendor>()
- << "\nProfile: " << device.get_info<device::profile>()
<< "\nVersion: " << device.get_info<device::version>()
- << "\nPrintf Buffer Size: "
- << device.get_info<device::printf_buffer_size>()
- << "\nPreferred Interop User Sync: "
- << device.get_info<device::preferred_interop_user_sync>()
<< "\nPartition Max Sub Devices: "
<< device.get_info<device::partition_max_sub_devices>()
<< "\nReference Count: "
namespace Impl {
+std::vector<sycl::device> get_sycl_devices() {
+#if defined(KOKKOS_ARCH_INTEL_GPU) || defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU) || \
+ defined(KOKKOS_ARCH_AMD_GPU)
+ std::vector<sycl::device> devices =
+ sycl::device::get_devices(sycl::info::device_type::gpu);
+#if defined(KOKKOS_ARCH_INTEL_GPU)
+ sycl::backend backend = sycl::backend::ext_oneapi_level_zero;
+#elif defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ sycl::backend backend = sycl::backend::ext_oneapi_cuda;
+#elif defined(KOKKOS_ARCH_AMD_GPU)
+ sycl::backend backend = sycl::backend::ext_oneapi_hip;
+#endif
+ devices.erase(std::remove_if(devices.begin(), devices.end(),
+ [backend](const sycl::device& d) {
+ return d.get_backend() != backend;
+ }),
+ devices.end());
+#else
+ std::vector<sycl::device> devices = sycl::device::get_devices();
+#endif
+ return devices;
+}
+
int g_sycl_space_factory_initialized =
Kokkos::Impl::initialize_space_factory<SYCL>("170_SYCL");
-}
-} // namespace Experimental
+} // namespace Impl
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_SYCL_HPP
#define KOKKOS_SYCL_HPP
#include <Kokkos_Macros.hpp>
#ifdef KOKKOS_ENABLE_SYCL
+// FIXME_SYCL
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
#include <CL/sycl.hpp>
-#include <Kokkos_SYCL_Space.hpp>
+#endif
+#include <SYCL/Kokkos_SYCL_Space.hpp>
#include <Kokkos_Layout.hpp>
#include <Kokkos_ScratchSpace.hpp>
#include <impl/Kokkos_Profiling_Interface.hpp>
#include <impl/Kokkos_InitializationSettings.hpp>
namespace Kokkos {
-namespace Experimental {
namespace Impl {
class SYCLInternal;
}
//! \name Functions that all Kokkos devices must implement.
//@{
- KOKKOS_INLINE_FUNCTION static int in_parallel() {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION static int in_parallel() {
#if defined(__SYCL_DEVICE_ONLY__)
return true;
#else
return false;
#endif
}
-
- /** \brief Set the device in a "sleep" state. */
- static bool sleep();
-
- /** \brief Wake the device from the 'sleep' state. A noop for OpenMP. */
- static bool wake();
+#endif
/** \brief Wait until all dispatched functors complete. A noop for OpenMP. */
static void impl_static_fence(const std::string& name);
- void fence(
- const std::string& name =
- "Kokkos::Experimental::SYCL::fence: Unnamed Instance Fence") const;
+ void fence(const std::string& name =
+ "Kokkos::SYCL::fence: Unnamed Instance Fence") const;
/// \brief Print configuration information to the given output stream.
void print_configuration(std::ostream& os, bool verbose = false) const;
static void impl_initialize(InitializationSettings const&);
- int sycl_device() const;
-
static bool impl_is_initialized();
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
static int concurrency();
+#else
+ int concurrency() const;
+#endif
+
static const char* name();
inline Impl::SYCLInternal* impl_internal_space_instance() const {
static std::ostream& impl_sycl_info(std::ostream& os,
const sycl::device& device);
+ friend bool operator==(SYCL const& lhs, SYCL const& rhs) {
+ return lhs.impl_internal_space_instance() ==
+ rhs.impl_internal_space_instance();
+ }
+ friend bool operator!=(SYCL const& lhs, SYCL const& rhs) {
+ return !(lhs == rhs);
+ }
Kokkos::Impl::HostSharedPtr<Impl::SYCLInternal> m_space_instance;
};
-} // namespace Experimental
-
namespace Tools {
namespace Experimental {
template <>
-struct DeviceTypeTraits<Kokkos::Experimental::SYCL> {
+struct DeviceTypeTraits<Kokkos::SYCL> {
/// \brief An ID to differentiate (for example) Serial from OpenMP in Tooling
static constexpr DeviceType id = DeviceType::SYCL;
- static int device_id(const Kokkos::Experimental::SYCL& exec) {
- return exec.sycl_device();
+ static int device_id(const Kokkos::SYCL& exec) {
+ return exec.impl_internal_space_instance()->m_syclDev;
}
};
} // namespace Experimental
namespace Experimental {
template <class... Args>
std::vector<SYCL> partition_space(const SYCL& sycl_space, Args...) {
-#ifdef __cpp_fold_expressions
static_assert(
(... && std::is_arithmetic_v<Args>),
"Kokkos Error: partitioning arguments must be integers or floats");
-#endif
sycl::context context = sycl_space.sycl_queue().get_context();
sycl::device device =
std::vector<SYCL> instances;
instances.reserve(sizeof...(Args));
for (unsigned int i = 0; i < sizeof...(Args); ++i)
- instances.emplace_back(sycl::queue(context, device));
+ instances.emplace_back(
+ sycl::queue(context, device, sycl::property::queue::in_order()));
return instances;
}
template <class T>
std::vector<SYCL> partition_space(const SYCL& sycl_space,
- std::vector<T>& weights) {
+ std::vector<T> const& weights) {
static_assert(
std::is_arithmetic<T>::value,
"Kokkos Error: partitioning arguments must be integers or floats");
sycl::device device =
sycl_space.impl_internal_space_instance()->m_queue->get_device();
std::vector<SYCL> instances;
+
+ // We only care about the number of instances to create and ignore weights
+ // otherwise.
instances.reserve(weights.size());
for (unsigned int i = 0; i < weights.size(); ++i)
- instances.emplace_back(sycl::queue(context, device));
+ instances.emplace_back(
+ sycl::queue(context, device, sycl::property::queue::in_order()));
return instances;
}
+
} // namespace Experimental
+namespace Impl {
+std::vector<sycl::device> get_sycl_devices();
+} // namespace Impl
+
} // namespace Kokkos
#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_ABORT_HPP
+#define KOKKOS_SYCL_ABORT_HPP
+
+#include <Kokkos_Printf.hpp>
+#if defined(KOKKOS_ENABLE_SYCL)
+// FIXME_SYCL
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
+#include <CL/sycl.hpp>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+inline void sycl_abort(char const* msg) {
+#ifdef NDEBUG
+ Kokkos::printf("Aborting with message %s.\n", msg);
+#else
+ // Choosing "" here causes problems but a single whitespace character works.
+ const char* empty = " ";
+ __assert_fail(msg, empty, 0, empty);
+#endif
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCLDEEPCOPY_HPP
+#define KOKKOS_SYCLDEEPCOPY_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <SYCL/Kokkos_SYCL.hpp>
+
+#include <vector>
+
+#ifdef KOKKOS_ENABLE_SYCL
+
+namespace Kokkos {
+namespace Impl {
+
+void DeepCopySYCL(void* dst, const void* src, size_t n);
+void DeepCopyAsyncSYCL(const Kokkos::SYCL& instance, void* dst, const void* src,
+ size_t n);
+void DeepCopyAsyncSYCL(void* dst, const void* src, size_t n);
+
+template <class MemSpace>
+struct DeepCopy<MemSpace, HostSpace, Kokkos::SYCL,
+ std::enable_if_t<is_sycl_type_space<MemSpace>::value>> {
+ DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
+ DeepCopy(const Kokkos::SYCL& instance, void* dst, const void* src, size_t n) {
+ DeepCopyAsyncSYCL(instance, dst, src, n);
+ }
+};
+
+template <class MemSpace>
+struct DeepCopy<HostSpace, MemSpace, Kokkos::SYCL,
+ std::enable_if_t<is_sycl_type_space<MemSpace>::value>> {
+ DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
+ DeepCopy(const Kokkos::SYCL& instance, void* dst, const void* src, size_t n) {
+ DeepCopyAsyncSYCL(instance, dst, src, n);
+ }
+};
+
+template <class MemSpace1, class MemSpace2>
+struct DeepCopy<MemSpace1, MemSpace2, Kokkos::SYCL,
+ std::enable_if_t<is_sycl_type_space<MemSpace1>::value &&
+ is_sycl_type_space<MemSpace2>::value>> {
+ DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
+ DeepCopy(const Kokkos::SYCL& instance, void* dst, const void* src, size_t n) {
+ DeepCopyAsyncSYCL(instance, dst, src, n);
+ }
+};
+
+template <class MemSpace1, class MemSpace2, class ExecutionSpace>
+struct DeepCopy<
+ MemSpace1, MemSpace2, ExecutionSpace,
+ std::enable_if_t<is_sycl_type_space<MemSpace1>::value &&
+ is_sycl_type_space<MemSpace2>::value &&
+ !std::is_same<ExecutionSpace, Kokkos::SYCL>::value>> {
+ inline DeepCopy(void* dst, const void* src, size_t n) {
+ DeepCopySYCL(dst, src, n);
+ }
+
+ inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+ size_t n) {
+ exec.fence(fence_string());
+ DeepCopyAsyncSYCL(dst, src, n);
+ }
+
+ private:
+ static const std::string& fence_string() {
+ static const std::string string =
+ std::string("Kokkos::Impl::DeepCopy<") + MemSpace1::name() + "Space, " +
+ MemSpace2::name() +
+ "Space, ExecutionSpace>::DeepCopy: fence before copy";
+ return string;
+ }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<
+ MemSpace, HostSpace, ExecutionSpace,
+ std::enable_if_t<is_sycl_type_space<MemSpace>::value &&
+ !std::is_same<ExecutionSpace, Kokkos::SYCL>::value>> {
+ inline DeepCopy(void* dst, const void* src, size_t n) {
+ DeepCopySYCL(dst, src, n);
+ }
+
+ inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+ size_t n) {
+ exec.fence(fence_string());
+ DeepCopyAsyncSYCL(dst, src, n);
+ }
+
+ private:
+ static const std::string& fence_string() {
+ static const std::string string =
+ std::string("Kokkos::Impl::DeepCopy<") + MemSpace::name() +
+ "Space, HostSpace, ExecutionSpace>::DeepCopy: fence before copy";
+ return string;
+ }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<
+ HostSpace, MemSpace, ExecutionSpace,
+ std::enable_if_t<is_sycl_type_space<MemSpace>::value &&
+ !std::is_same<ExecutionSpace, Kokkos::SYCL>::value>> {
+ inline DeepCopy(void* dst, const void* src, size_t n) {
+ DeepCopySYCL(dst, src, n);
+ }
+
+ inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+ size_t n) {
+ exec.fence(fence_string());
+ DeepCopyAsyncSYCL(dst, src, n);
+ }
+
+ private:
+ static const std::string& fence_string() {
+ static const std::string string =
+ std::string("Kokkos::Impl::DeepCopy<HostSpace, ") + MemSpace::name() +
+ "Space, ExecutionSpace>::DeepCopy: fence before copy";
+ return string;
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_GRAPHNODEKERNEL_HPP
+#define KOKKOS_SYCL_GRAPHNODEKERNEL_HPP
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>
+
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_Parallel_Reduce.hpp>
+#include <Kokkos_PointerOwnership.hpp>
+
+#include <SYCL/Kokkos_SYCL_GraphNode_Impl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename PolicyType, typename Functor, typename PatternTag,
+ typename... Args>
+class GraphNodeKernelImpl<Kokkos::SYCL, PolicyType, Functor, PatternTag,
+ Args...>
+ : public PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
+ Args..., Kokkos::SYCL>::type {
+ public:
+ using Policy = PolicyType;
+ using graph_kernel = GraphNodeKernelImpl;
+ using base_t =
+ typename PatternImplSpecializationFromTag<PatternTag, Functor, Policy,
+ Args..., Kokkos::SYCL>::type;
+
+ // TODO use the name and executionspace
+ template <typename PolicyDeduced, typename... ArgsDeduced>
+ GraphNodeKernelImpl(std::string, Kokkos::SYCL const&, Functor arg_functor,
+ PolicyDeduced&& arg_policy, ArgsDeduced&&... args)
+ : base_t(std::move(arg_functor), (PolicyDeduced&&)arg_policy,
+ (ArgsDeduced&&)args...) {}
+
+ template <typename PolicyDeduced>
+ GraphNodeKernelImpl(Kokkos::SYCL const& exec_space, Functor arg_functor,
+ PolicyDeduced&& arg_policy)
+ : GraphNodeKernelImpl("", exec_space, std::move(arg_functor),
+ (PolicyDeduced&&)arg_policy) {}
+
+ void set_sycl_graph_ptr(
+ sycl::ext::oneapi::experimental::command_graph<
+ sycl::ext::oneapi::experimental::graph_state::modifiable>*
+ arg_graph) {
+ m_graph_ptr = arg_graph;
+ }
+
+ void set_sycl_graph_node_ptr(
+ std::optional<sycl::ext::oneapi::experimental::node>* arg_node) {
+ m_graph_node_ptr = arg_node;
+ }
+
+ std::optional<sycl::ext::oneapi::experimental::node>& get_sycl_graph_node()
+ const {
+ return *m_graph_node_ptr;
+ }
+
+ sycl::ext::oneapi::experimental::command_graph<
+ sycl::ext::oneapi::experimental::graph_state::modifiable>&
+ get_sycl_graph() const {
+ return *m_graph_ptr;
+ }
+
+ private:
+ Kokkos::ObservingRawPtr<sycl::ext::oneapi::experimental::command_graph<
+ sycl::ext::oneapi::experimental::graph_state::modifiable>>
+ m_graph_ptr = nullptr;
+ Kokkos::ObservingRawPtr<std::optional<sycl::ext::oneapi::experimental::node>>
+ m_graph_node_ptr = nullptr;
+};
+
+struct SYCLGraphNodeAggregateKernel {
+ using graph_kernel = SYCLGraphNodeAggregateKernel;
+
+ // Aggregates don't need a policy, but for the purposes of checking the static
+ // assertions about graph kernels,
+ struct Policy {
+ using is_graph_kernel = std::true_type;
+ };
+};
+
+template <typename KernelType,
+ typename Tag =
+ typename PatternTagFromImplSpecialization<KernelType>::type>
+struct get_graph_node_kernel_type
+ : type_identity<
+ GraphNodeKernelImpl<Kokkos::SYCL, typename KernelType::Policy,
+ typename KernelType::functor_type, Tag>> {};
+
+template <typename KernelType>
+struct get_graph_node_kernel_type<KernelType, Kokkos::ParallelReduceTag>
+ : type_identity<GraphNodeKernelImpl<
+ Kokkos::SYCL, typename KernelType::Policy,
+ CombinedFunctorReducer<typename KernelType::FunctorType,
+ typename KernelType::ReducerType>,
+ Kokkos::ParallelReduceTag>> {};
+
+template <typename KernelType>
+auto& get_sycl_graph_from_kernel(KernelType const& kernel) {
+ using graph_node_kernel_t =
+ typename get_graph_node_kernel_type<KernelType>::type;
+ auto const& kernel_as_graph_kernel =
+ static_cast<graph_node_kernel_t const&>(kernel);
+ auto& graph = kernel_as_graph_kernel.get_sycl_graph();
+
+ return graph;
+}
+
+template <typename KernelType>
+auto& get_sycl_graph_node_from_kernel(KernelType const& kernel) {
+ using graph_node_kernel_t =
+ typename get_graph_node_kernel_type<KernelType>::type;
+ auto const& kernel_as_graph_kernel =
+ static_cast<graph_node_kernel_t const&>(kernel);
+ auto& graph_node = kernel_as_graph_kernel.get_sycl_graph_node();
+
+ return graph_node;
+}
+
+template <typename Kernel, typename Lambda>
+void sycl_attach_kernel_to_node(Kernel& kernel, const Lambda& lambda) {
+ sycl::ext::oneapi::experimental::command_graph<
+ sycl::ext::oneapi::experimental::graph_state::modifiable>& graph =
+ Impl::get_sycl_graph_from_kernel(kernel);
+ std::optional<sycl::ext::oneapi::experimental::node>& graph_node =
+ Impl::get_sycl_graph_node_from_kernel(kernel);
+ KOKKOS_ENSURES(!graph_node);
+ graph_node = graph.add(lambda);
+ KOKKOS_ENSURES(graph_node);
+ // FIXME_SYCL_GRAPH not yet implemented in the compiler
+ // KOKKOS_ENSURES(graph_node.get_type() ==
+ // sycl::ext::oneapi::experimental::node_type::kernel)
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_GRAPHNODE_IMPL_HPP
+#define KOKKOS_SYCL_GRAPHNODE_IMPL_HPP
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>
+
+#include <SYCL/Kokkos_SYCL.hpp>
+
+#include <optional>
+
+namespace Kokkos {
+namespace Impl {
+template <>
+struct GraphNodeBackendSpecificDetails<Kokkos::SYCL> {
+ std::optional<sycl::ext::oneapi::experimental::node> node;
+
+ explicit GraphNodeBackendSpecificDetails() = default;
+
+ explicit GraphNodeBackendSpecificDetails(
+ _graph_node_is_root_ctor_tag) noexcept {}
+};
+
+template <typename Kernel, typename PredecessorRef>
+struct GraphNodeBackendDetailsBeforeTypeErasure<Kokkos::SYCL, Kernel,
+ PredecessorRef> {
+ protected:
+ GraphNodeBackendDetailsBeforeTypeErasure(
+ Kokkos::SYCL const &, Kernel &, PredecessorRef const &,
+ GraphNodeBackendSpecificDetails<Kokkos::SYCL> &) noexcept {}
+
+ GraphNodeBackendDetailsBeforeTypeErasure(
+ Kokkos::SYCL const &, _graph_node_is_root_ctor_tag,
+ GraphNodeBackendSpecificDetails<Kokkos::SYCL> &) noexcept {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_GRAPH_IMPL_HPP
+#define KOKKOS_SYCL_GRAPH_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>
+#include <impl/Kokkos_GraphNodeImpl.hpp>
+
+#include <SYCL/Kokkos_SYCL_GraphNodeKernel.hpp>
+
+#include <optional>
+
+namespace Kokkos {
+namespace Impl {
+template <>
+class GraphImpl<Kokkos::SYCL> {
+ public:
+ using node_details_t = GraphNodeBackendSpecificDetails<Kokkos::SYCL>;
+ using root_node_impl_t =
+ GraphNodeImpl<Kokkos::SYCL, Kokkos::Experimental::TypeErasedTag,
+ Kokkos::Experimental::TypeErasedTag>;
+ using aggregate_kernel_impl_t = SYCLGraphNodeAggregateKernel;
+ using aggregate_node_impl_t =
+ GraphNodeImpl<Kokkos::SYCL, aggregate_kernel_impl_t,
+ Kokkos::Experimental::TypeErasedTag>;
+
+ // Not movable or copyable; it spends its whole life as a shared_ptr in the
+ // Graph object.
+ GraphImpl() = delete;
+ GraphImpl(GraphImpl const&) = delete;
+ GraphImpl(GraphImpl&&) = delete;
+ GraphImpl& operator=(GraphImpl const&) = delete;
+ GraphImpl& operator=(GraphImpl&&) = delete;
+
+ ~GraphImpl();
+
+ explicit GraphImpl(Kokkos::SYCL instance);
+
+ void add_node(std::shared_ptr<aggregate_node_impl_t> const& arg_node_ptr);
+
+ template <class NodeImpl>
+ void add_node(std::shared_ptr<NodeImpl> const& arg_node_ptr);
+
+ template <class NodeImplPtr, class PredecessorRef>
+ void add_predecessor(NodeImplPtr arg_node_ptr, PredecessorRef arg_pred_ref);
+
+ void submit(const Kokkos::SYCL& exec);
+
+ Kokkos::SYCL const& get_execution_space() const noexcept;
+
+ auto create_root_node_ptr();
+
+ template <class... PredecessorRefs>
+ auto create_aggregate_ptr(PredecessorRefs&&...);
+
+ void instantiate() {
+ KOKKOS_EXPECTS(!m_graph_exec.has_value());
+ m_graph_exec = m_graph.finalize();
+ }
+
+ auto& sycl_graph() { return m_graph; }
+ auto& sycl_graph_exec() { return m_graph_exec; }
+
+ private:
+ Kokkos::SYCL m_execution_space;
+ sycl::ext::oneapi::experimental::command_graph<
+ sycl::ext::oneapi::experimental::graph_state::modifiable>
+ m_graph;
+ std::optional<sycl::ext::oneapi::experimental::command_graph<
+ sycl::ext::oneapi::experimental::graph_state::executable>>
+ m_graph_exec;
+};
+
+inline GraphImpl<Kokkos::SYCL>::~GraphImpl() {
+ m_execution_space.fence("Kokkos::GraphImpl::~GraphImpl: Graph Destruction");
+}
+
+inline GraphImpl<Kokkos::SYCL>::GraphImpl(Kokkos::SYCL instance)
+ : m_execution_space(std::move(instance)),
+ m_graph(m_execution_space.sycl_queue().get_context(),
+ m_execution_space.sycl_queue().get_device()) {}
+
+inline void GraphImpl<Kokkos::SYCL>::add_node(
+ std::shared_ptr<aggregate_node_impl_t> const& arg_node_ptr) {
+ // add an empty node that needs to be set up before finalizing the graph
+ arg_node_ptr->node_details_t::node = m_graph.add();
+}
+
+// Requires NodeImplPtr is a shared_ptr to specialization of GraphNodeImpl
+// Also requires that the kernel has the graph node tag in its policy
+template <class NodeImpl>
+inline void GraphImpl<Kokkos::SYCL>::add_node(
+ std::shared_ptr<NodeImpl> const& arg_node_ptr) {
+ static_assert(NodeImpl::kernel_type::Policy::is_graph_kernel::value);
+ KOKKOS_EXPECTS(arg_node_ptr);
+ // The Kernel launch from the execute() method has been shimmed to insert
+ // the node into the graph
+ auto& kernel = arg_node_ptr->get_kernel();
+ auto& node = static_cast<node_details_t*>(arg_node_ptr.get())->node;
+ KOKKOS_EXPECTS(!node);
+ kernel.set_sycl_graph_ptr(&m_graph);
+ kernel.set_sycl_graph_node_ptr(&node);
+ kernel.execute();
+ KOKKOS_ENSURES(node);
+}
+
+// Requires PredecessorRef is a specialization of GraphNodeRef that has
+// already been added to this graph and NodeImpl is a specialization of
+// GraphNodeImpl that has already been added to this graph.
+template <class NodeImplPtr, class PredecessorRef>
+inline void GraphImpl<Kokkos::SYCL>::add_predecessor(
+ NodeImplPtr arg_node_ptr, PredecessorRef arg_pred_ref) {
+ KOKKOS_EXPECTS(arg_node_ptr);
+ auto pred_ptr = GraphAccess::get_node_ptr(arg_pred_ref);
+ KOKKOS_EXPECTS(pred_ptr);
+
+ auto& pred_node = pred_ptr->node_details_t::node;
+ KOKKOS_EXPECTS(pred_node);
+
+ auto& node = arg_node_ptr->node_details_t::node;
+ KOKKOS_EXPECTS(node);
+
+ m_graph.make_edge(*pred_node, *node);
+}
+
+inline void GraphImpl<Kokkos::SYCL>::submit(const Kokkos::SYCL& exec) {
+ if (!m_graph_exec) {
+ instantiate();
+ }
+ exec.sycl_queue().ext_oneapi_graph(*m_graph_exec);
+}
+
+inline Kokkos::SYCL const& GraphImpl<Kokkos::SYCL>::get_execution_space()
+ const noexcept {
+ return m_execution_space;
+}
+
+inline auto GraphImpl<Kokkos::SYCL>::create_root_node_ptr() {
+ KOKKOS_EXPECTS(!m_graph_exec);
+ auto rv = std::make_shared<root_node_impl_t>(get_execution_space(),
+ _graph_node_is_root_ctor_tag{});
+ rv->node_details_t::node = m_graph.add();
+ return rv;
+}
+
+template <class... PredecessorRefs>
+inline auto GraphImpl<Kokkos::SYCL>::create_aggregate_ptr(
+ PredecessorRefs&&...) {
+ // The attachment to predecessors, which is all we really need, happens
+ // in the generic layer, which calls through to add_predecessor for
+ // each predecessor ref, so all we need to do here is create the (trivial)
+ // aggregate node.
+ return std::make_shared<aggregate_node_impl_t>(m_execution_space,
+ _graph_node_kernel_ctor_tag{},
+ aggregate_kernel_impl_t{});
+}
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_HALF_HPP_
+#define KOKKOS_SYCL_HALF_HPP_
+
+#ifdef KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
+
+#include <Kokkos_Half.hpp>
+#include <Kokkos_ReductionIdentity.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+/************************** half conversions **********************************/
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(half_t val) { return val; }
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(float val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(double val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(short val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned short val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(int val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned int val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long long val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long long val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long val) { return half_t::impl_type(val); }
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+ cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
+cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+ cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+ cast_from_half(half_t val) {
+ return static_cast<T>(half_t::impl_type(val));
+}
+} // namespace Experimental
+
+template <>
+struct reduction_identity<Kokkos::Experimental::half_t> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+ sum() noexcept {
+ return Kokkos::Experimental::half_t::impl_type(0.0F);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+ prod() noexcept {
+ return Kokkos::Experimental::half_t::impl_type(1.0F);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+ max() noexcept {
+ return std::numeric_limits<
+ Kokkos::Experimental::half_t::impl_type>::lowest();
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+ min() noexcept {
+ return std::numeric_limits<Kokkos::Experimental::half_t::impl_type>::max();
+ }
+};
+
+} // namespace Kokkos
+#endif // KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
+
+#ifdef KOKKOS_IMPL_SYCL_BHALF_TYPE_DEFINED
+
+namespace Kokkos {
+namespace Experimental {
+
+/************************** bhalf conversions *********************************/
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(bhalf_t val) { return val; }
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(float val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(double val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(short val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned short val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(int val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned int val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long long val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long long val) {
+ return bhalf_t::impl_type(val);
+}
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long val) { return bhalf_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long val) { return bhalf_t::impl_type(val); }
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+ cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
+cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+ cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+ cast_from_bhalf(bhalf_t val) {
+ return static_cast<T>(bhalf_t::impl_type(val));
+}
+} // namespace Experimental
+
+// sycl::bfloat16 doesn't have constexpr constructors so we return float
+template <>
+struct reduction_identity<Kokkos::Experimental::bhalf_t> {
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float sum() noexcept {
+ return 0.f;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float prod() noexcept {
+ return 1.0f;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float max() noexcept {
+ return -0x7f7f;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION constexpr static float min() noexcept {
+ return 0x7f7f;
+ }
+};
+
+} // namespace Kokkos
+#endif // KOKKOS_IMPL_SYCL_BHALF_TYPE_DEFINED
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_HALF_IMPL_TYPE_HPP_
+#define KOKKOS_SYCL_HALF_IMPL_TYPE_HPP_
+
+#include <Kokkos_Macros.hpp>
+
+// FIXME_SYCL
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
+#include <CL/sycl.hpp>
+#endif
+
+// Make sure no one else tries to define half_t
+#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
+
+namespace Kokkos::Impl {
+struct half_impl_t {
+ using type = sycl::half;
+};
+} // namespace Kokkos::Impl
+#endif // KOKKOS_IMPL_HALF_TYPE_DEFINED
+
+// Make sure no one else tries to define bhalf_t
+#ifndef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+// FIXME_SYCL Evaluate when to drop the check
+#if __has_include(<sycl/ext/oneapi/bfloat16.hpp>)
+#define KOKKOS_IMPL_BHALF_TYPE_DEFINED
+#define KOKKOS_IMPL_SYCL_BHALF_TYPE_DEFINED
+namespace Kokkos::Impl {
+struct bhalf_impl_t {
+ using type = sycl::ext::oneapi::bfloat16;
+};
+} // namespace Kokkos::Impl
+#elif defined(SYCL_EXT_ONEAPI_BFLOAT16) && defined(KOKKOS_ARCH_INTEL_GPU)
+// FIXME_SYCL bfloat16 is only supported for compute capability 8.0 or higher
+// on Nvidia GPUs but SYCL_EXT_ONEAPI_BFLOAT16 is defined even for lower compute
+// capability.
+#define KOKKOS_IMPL_BHALF_TYPE_DEFINED
+#define KOKKOS_IMPL_SYCL_BHALF_TYPE_DEFINED
+namespace Kokkos::Impl {
+struct bhalf_impl_t {
+ using type = sycl::ext::oneapi::experimental::bfloat16;
+};
+} // namespace Kokkos::Impl
+#endif // test for bfloat16 support
+#endif // KOKKOS_IMPL_BHALF_TYPE_DEFINED
+#endif // KOKKOS_SYCL_HALF_IMPL_TYPE_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp> //kokkos_malloc
+
+#include <impl/Kokkos_CheckedIntegerOps.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+namespace {
+
+// FIXME_SYCL Should be a multiple of the maximum subgroup size.
+static constexpr auto sizeScratchGrain = sizeof(Kokkos::SYCL::size_type[32]);
+
+std::size_t scratch_count(const std::size_t size) {
+ return (size + sizeScratchGrain - 1) / sizeScratchGrain;
+}
+
+} // namespace
+
+std::vector<std::optional<sycl::queue>*> SYCLInternal::all_queues;
+std::mutex SYCLInternal::mutex;
+
+Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> sycl_global_unique_token_locks(
+ bool deallocate) {
+ static Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> locks =
+ Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>();
+ if (!deallocate && locks.extent(0) == 0)
+ locks = Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>(
+ "Kokkos::UniqueToken<SYCL>::m_locks", SYCL().concurrency());
+ if (deallocate) locks = Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>();
+ return locks;
+}
+
+SYCLInternal::~SYCLInternal() {
+ if (!was_finalized || m_scratchSpace || m_scratchHost || m_scratchFlags) {
+ std::cerr << "Kokkos::SYCL ERROR: Failed to call "
+ "Kokkos::SYCL::finalize()"
+ << std::endl;
+ std::cerr.flush();
+ }
+}
+
+int SYCLInternal::verify_is_initialized(const char* const label) const {
+ if (!is_initialized()) {
+ Kokkos::abort((std::string("Kokkos::SYCL::") + label +
+ " : ERROR device not initialized\n")
+ .c_str());
+ }
+ return is_initialized();
+}
+SYCLInternal& SYCLInternal::singleton() {
+ static SYCLInternal self;
+ return self;
+}
+
+void SYCLInternal::initialize(const sycl::device& d) {
+ auto exception_handler = [](sycl::exception_list exceptions) {
+ bool asynchronous_error = false;
+ for (std::exception_ptr const& e : exceptions) {
+ try {
+ std::rethrow_exception(e);
+ } catch (sycl::exception const& e) {
+ std::cerr << e.what() << '\n';
+ asynchronous_error = true;
+ }
+ }
+ if (asynchronous_error)
+ Kokkos::Impl::throw_runtime_exception(
+ "There was an asynchronous SYCL error!\n");
+ };
+#ifdef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ initialize(
+ sycl::queue{d, exception_handler, sycl::property::queue::in_order()});
+#else
+ initialize(sycl::queue{d, exception_handler});
+#endif
+}
+
+// FIXME_SYCL
+void SYCLInternal::initialize(const sycl::queue& q) {
+ KOKKOS_EXPECTS(!is_initialized());
+
+#define KOKKOS_IMPL_CHECK_SYCL_BACKEND_SUPPORT(BACKEND, REQUIRED) \
+ if (BACKEND != REQUIRED) \
+ Kokkos::abort( \
+ "The SYCL execution space instance was initialized with an " \
+ "unsupported backend type! For this GPU architecture, only " #REQUIRED \
+ " is supported.")
+#if defined(KOKKOS_ARCH_INTEL_GPU)
+ KOKKOS_IMPL_CHECK_SYCL_BACKEND_SUPPORT(q.get_backend(),
+ sycl::backend::ext_oneapi_level_zero);
+#elif defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ KOKKOS_IMPL_CHECK_SYCL_BACKEND_SUPPORT(q.get_backend(),
+ sycl::backend::ext_oneapi_cuda);
+#elif defined(KOKKOS_ARCH_AMD_GPU)
+ KOKKOS_IMPL_CHECK_SYCL_BACKEND_SUPPORT(q.get_backend(),
+ sycl::backend::ext_oneapi_hip);
+#endif
+
+ if (was_finalized)
+ Kokkos::abort("Calling SYCL::initialize after SYCL::finalize is illegal\n");
+
+ m_queue = q;
+ // guard pushing to all_queues
+ {
+ std::scoped_lock lock(mutex);
+ all_queues.push_back(&m_queue);
+ }
+ const sycl::device& d = m_queue->get_device();
+
+ m_maxWorkgroupSize =
+ d.template get_info<sycl::info::device::max_work_group_size>();
+ // FIXME_SYCL this should give the correct value for NVIDIA GPUs
+ m_maxConcurrency =
+ m_maxWorkgroupSize * 2 *
+ d.template get_info<sycl::info::device::max_compute_units>();
+
+ m_maxShmemPerBlock =
+ d.template get_info<sycl::info::device::local_mem_size>();
+
+ for (auto& usm_mem : m_indirectKernelMem) {
+ usm_mem.reset(*m_queue, m_instance_id);
+ }
+
+#ifdef KOKKOS_IMPL_SYCL_DEVICE_GLOBAL_SUPPORTED
+ // Init the array for used for arbitrarily sized atomics
+ if (this == &singleton()) {
+ desul::Impl::init_lock_arrays();
+ desul::Impl::init_lock_arrays_sycl(*m_queue);
+ }
+#endif
+}
+
+int SYCLInternal::acquire_team_scratch_space() {
+ // Grab the next scratch memory allocation. We must make sure that the last
+ // kernel using the allocation has completed, so we wait for the event that
+ // was registered with that kernel.
+ int current_team_scratch = desul::atomic_fetch_inc_mod(
+ &m_current_team_scratch, m_n_team_scratch - 1,
+ desul::MemoryOrderRelaxed(), desul::MemoryScopeDevice());
+
+ m_team_scratch_event[current_team_scratch].wait_and_throw();
+
+ return current_team_scratch;
+}
+
+Kokkos::Impl::sycl_device_ptr<void> SYCLInternal::resize_team_scratch_space(
+ int scratch_pool_id, std::int64_t bytes, bool force_shrink) {
+ // Multiple ParallelFor/Reduce Teams can call this function at the same time
+ // and invalidate the m_team_scratch_ptr. We use a pool to avoid any race
+ // condition.
+ auto mem_space = Kokkos::SYCLDeviceUSMSpace(*m_queue);
+ if (m_team_scratch_current_size[scratch_pool_id] == 0 && bytes > 0) {
+ m_team_scratch_current_size[scratch_pool_id] = bytes;
+ m_team_scratch_ptr[scratch_pool_id] =
+ mem_space.allocate("Kokkos::SYCL::InternalTeamScratchMemory",
+ m_team_scratch_current_size[scratch_pool_id]);
+ }
+ if ((bytes > m_team_scratch_current_size[scratch_pool_id]) ||
+ ((bytes < m_team_scratch_current_size[scratch_pool_id]) &&
+ (force_shrink))) {
+ mem_space.deallocate(m_team_scratch_ptr[scratch_pool_id],
+ m_team_scratch_current_size[scratch_pool_id]);
+ m_team_scratch_current_size[scratch_pool_id] = bytes;
+ m_team_scratch_ptr[scratch_pool_id] =
+ mem_space.allocate("Kokkos::SYCL::InternalTeamScratchMemory",
+ m_team_scratch_current_size[scratch_pool_id]);
+ }
+ return m_team_scratch_ptr[scratch_pool_id];
+}
+
+void SYCLInternal::register_team_scratch_event(int scratch_pool_id,
+ sycl::event event) {
+ m_team_scratch_event[scratch_pool_id] = event;
+}
+
+uint32_t SYCLInternal::impl_get_instance_id() const { return m_instance_id; }
+
+void SYCLInternal::finalize() {
+ SYCLInternal::fence(*m_queue,
+ "Kokkos::SYCLInternal::finalize: fence on finalization",
+ m_instance_id);
+ was_finalized = true;
+
+ // The global_unique_token_locks array is static and should only be
+ // deallocated once by the defualt instance
+ if (this == &singleton()) {
+ Impl::sycl_global_unique_token_locks(true);
+#ifdef KOKKOS_IMPL_SYCL_DEVICE_GLOBAL_SUPPORTED
+ desul::Impl::finalize_lock_arrays();
+ desul::Impl::finalize_lock_arrays_sycl(*m_queue);
+#endif
+ }
+
+ auto device_mem_space = SYCLDeviceUSMSpace(*m_queue);
+ auto host_mem_space = SYCLHostUSMSpace(*m_queue);
+ if (nullptr != m_scratchSpace)
+ device_mem_space.deallocate(m_scratchSpace,
+ m_scratchSpaceCount * sizeScratchGrain);
+ if (nullptr != m_scratchHost)
+ host_mem_space.deallocate(m_scratchHost,
+ m_scratchHostCount * sizeScratchGrain);
+ if (nullptr != m_scratchFlags)
+ device_mem_space.deallocate(m_scratchFlags,
+ m_scratchFlagsCount * sizeScratchGrain);
+ m_syclDev = -1;
+ m_scratchSpaceCount = 0;
+ m_scratchSpace = nullptr;
+ m_scratchHostCount = 0;
+ m_scratchHost = nullptr;
+ m_scratchFlagsCount = 0;
+ m_scratchFlags = nullptr;
+
+ for (int i = 0; i < m_n_team_scratch; ++i) {
+ if (m_team_scratch_current_size[i] > 0) {
+ device_mem_space.deallocate(m_team_scratch_ptr[i],
+ m_team_scratch_current_size[i]);
+ m_team_scratch_current_size[i] = 0;
+ m_team_scratch_ptr[i] = nullptr;
+ }
+ }
+
+ for (auto& usm_mem : m_indirectKernelMem) usm_mem.reset();
+ // guard erasing from all_queues
+ {
+ std::scoped_lock lock(mutex);
+ all_queues.erase(std::find(all_queues.begin(), all_queues.end(), &m_queue));
+ }
+ m_queue.reset();
+}
+
+Kokkos::Impl::sycl_device_ptr<void> SYCLInternal::scratch_space(
+ const std::size_t size) {
+ if (verify_is_initialized("scratch_space") &&
+ m_scratchSpaceCount < scratch_count(size)) {
+ auto mem_space = Kokkos::SYCLDeviceUSMSpace(*m_queue);
+
+ if (nullptr != m_scratchSpace)
+ mem_space.deallocate(m_scratchSpace,
+ m_scratchSpaceCount * sizeScratchGrain);
+
+ m_scratchSpaceCount = scratch_count(size);
+
+ std::size_t alloc_size = Kokkos::Impl::multiply_overflow_abort(
+ m_scratchSpaceCount, sizeScratchGrain);
+ m_scratchSpace = static_cast<size_type*>(
+ mem_space.allocate("Kokkos::SYCL::InternalScratchSpace", alloc_size));
+ }
+
+ return m_scratchSpace;
+}
+
+Kokkos::Impl::sycl_host_ptr<void> SYCLInternal::scratch_host(
+ const std::size_t size) {
+ if (verify_is_initialized("scratch_unified") &&
+ m_scratchHostCount < scratch_count(size)) {
+ auto mem_space = Kokkos::SYCLHostUSMSpace(*m_queue);
+
+ if (nullptr != m_scratchHost)
+ mem_space.deallocate(m_scratchHost,
+ m_scratchHostCount * sizeScratchGrain);
+
+ m_scratchHostCount = scratch_count(size);
+
+ std::size_t alloc_size = Kokkos::Impl::multiply_overflow_abort(
+ m_scratchHostCount, sizeScratchGrain);
+ m_scratchHost = static_cast<size_type*>(
+ mem_space.allocate("Kokkos::SYCL::InternalScratchHost", alloc_size));
+ }
+
+ return m_scratchHost;
+}
+
+Kokkos::Impl::sycl_device_ptr<void> SYCLInternal::scratch_flags(
+ const std::size_t size) {
+ if (verify_is_initialized("scratch_flags") &&
+ m_scratchFlagsCount < scratch_count(size)) {
+ auto mem_space = Kokkos::SYCLDeviceUSMSpace(*m_queue);
+
+ if (nullptr != m_scratchFlags)
+ mem_space.deallocate(m_scratchFlags,
+ m_scratchFlagsCount * sizeScratchGrain);
+
+ m_scratchFlagsCount = scratch_count(size);
+
+ std::size_t alloc_size = Kokkos::Impl::multiply_overflow_abort(
+ m_scratchFlagsCount, sizeScratchGrain);
+ m_scratchFlags = static_cast<size_type*>(
+ mem_space.allocate("Kokkos::SYCL::InternalScratchFlags", alloc_size));
+
+ // We only zero-initialize the allocation when we actually allocate.
+ // It's the responsibility of the features using scratch_flags,
+ // namely parallel_reduce and parallel_scan, to reset the used values to 0.
+ auto memset_event = m_queue->memset(m_scratchFlags, 0,
+ m_scratchFlagsCount * sizeScratchGrain);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ m_queue->ext_oneapi_submit_barrier(std::vector{memset_event});
+#endif
+ }
+
+ return m_scratchFlags;
+}
+
+template <typename WAT>
+void SYCLInternal::fence_helper(WAT& wat, const std::string& name,
+ uint32_t instance_id) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::SYCL>(
+ name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id},
+ [&]() {
+ try {
+ wat.wait_and_throw();
+ } catch (sycl::exception const& e) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("There was a synchronous SYCL error:\n") += e.what());
+ }
+ });
+}
+template void SYCLInternal::fence_helper<sycl::queue>(sycl::queue&,
+ const std::string&,
+ uint32_t);
+template void SYCLInternal::fence_helper<sycl::event>(sycl::event&,
+ const std::string&,
+ uint32_t);
+
+// This function cycles through a pool of USM allocations for functors
+SYCLInternal::IndirectKernelMem& SYCLInternal::get_indirect_kernel_mem() {
+ // Thread safety: atomically increment round robin variable
+ // NB: atomic_fetch_inc_mod returns values in range [0-N], not
+ // [0-N) as might be expected.
+ size_t next_pool = desul::atomic_fetch_inc_mod(
+ &m_pool_next, m_usm_pool_size - 1, desul::MemoryOrderRelaxed(),
+ desul::MemoryScopeDevice());
+ return m_indirectKernelMem[next_pool];
+}
+
+template <sycl::usm::alloc Kind>
+size_t SYCLInternal::USMObjectMem<Kind>::reserve(size_t n) {
+ assert(m_q);
+
+ if (m_capacity < n) {
+ AllocationSpace alloc_space(*m_q);
+ if (m_data) alloc_space.deallocate(m_data, m_capacity);
+
+ m_data = alloc_space.allocate("Kokkos::SYCL::USMObjectMem", n);
+
+ if constexpr (sycl::usm::alloc::device == Kind)
+ m_staging.reset(new char[n]);
+ m_capacity = n;
+ }
+
+ return m_capacity;
+}
+
+template <sycl::usm::alloc Kind>
+void SYCLInternal::USMObjectMem<Kind>::reset() {
+ if (m_data) {
+ // This implies a fence since this class is not copyable
+ // and deallocating implies a fence across all registered queues.
+ AllocationSpace alloc_space(*m_q);
+ alloc_space.deallocate(m_data, m_capacity);
+
+ m_capacity = 0;
+ m_data = nullptr;
+ }
+ m_q.reset();
+}
+
+int SYCLInternal::m_syclDev;
+
+template class SYCLInternal::USMObjectMem<sycl::usm::alloc::shared>;
+template class SYCLInternal::USMObjectMem<sycl::usm::alloc::device>;
+template class SYCLInternal::USMObjectMem<sycl::usm::alloc::host>;
+
+} // namespace Impl
+} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_SYCL_INSTANCE_HPP_
#define KOKKOS_SYCL_INSTANCE_HPP_
#include <optional>
+// FIXME_SYCL
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
#include <CL/sycl.hpp>
+#endif
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_Profiling.hpp>
namespace Kokkos {
-namespace Experimental {
namespace Impl {
class SYCLInternal {
SYCLInternal() = default;
~SYCLInternal();
- SYCLInternal(const SYCLInternal&) = delete;
+ SYCLInternal(const SYCLInternal&) = delete;
SYCLInternal& operator=(const SYCLInternal&) = delete;
- SYCLInternal& operator=(SYCLInternal&&) = delete;
- SYCLInternal(SYCLInternal&&) = delete;
+ SYCLInternal& operator=(SYCLInternal&&) = delete;
+ SYCLInternal(SYCLInternal&&) = delete;
- sycl::device_ptr<void> scratch_space(const std::size_t size);
- sycl::device_ptr<void> scratch_flags(const std::size_t size);
- sycl::device_ptr<void> resize_team_scratch_space(std::int64_t bytes,
- bool force_shrink = false);
+ Kokkos::Impl::sycl_device_ptr<void> scratch_space(const std::size_t size);
+ Kokkos::Impl::sycl_device_ptr<void> scratch_flags(const std::size_t size);
+ Kokkos::Impl::sycl_host_ptr<void> scratch_host(const std::size_t size);
+ int acquire_team_scratch_space();
+ Kokkos::Impl::sycl_device_ptr<void> resize_team_scratch_space(
+ int scratch_pool_id, std::int64_t bytes, bool force_shrink = false);
+ void register_team_scratch_event(int scratch_pool_id, sycl::event event);
uint32_t impl_get_instance_id() const;
- int m_syclDev = 0;
+ static int m_syclDev;
size_t m_maxWorkgroupSize = 0;
uint32_t m_maxConcurrency = 0;
uint64_t m_maxShmemPerBlock = 0;
- std::size_t m_scratchSpaceCount = 0;
- sycl::device_ptr<size_type> m_scratchSpace = nullptr;
- std::size_t m_scratchFlagsCount = 0;
- sycl::device_ptr<size_type> m_scratchFlags = nullptr;
+ std::size_t m_scratchSpaceCount = 0;
+ Kokkos::Impl::sycl_device_ptr<size_type> m_scratchSpace = nullptr;
+ std::size_t m_scratchHostCount = 0;
+ Kokkos::Impl::sycl_host_ptr<size_type> m_scratchHost = nullptr;
+ std::size_t m_scratchFlagsCount = 0;
+ Kokkos::Impl::sycl_device_ptr<size_type> m_scratchFlags = nullptr;
// mutex to access shared memory
mutable std::mutex m_mutexScratchSpace;
- int64_t m_team_scratch_current_size = 0;
- sycl::device_ptr<void> m_team_scratch_ptr = nullptr;
+ // Team Scratch Level 1 Space
+ static constexpr int m_n_team_scratch = 10;
+ mutable int64_t m_team_scratch_current_size[m_n_team_scratch] = {};
+ mutable Kokkos::Impl::sycl_device_ptr<void>
+ m_team_scratch_ptr[m_n_team_scratch] = {};
+ mutable int m_current_team_scratch = 0;
+ mutable sycl::event m_team_scratch_event[m_n_team_scratch] = {};
mutable std::mutex m_team_scratch_mutex;
- uint32_t m_instance_id = Kokkos::Tools::Experimental::Impl::idForInstance<
- Kokkos::Experimental::SYCL>(reinterpret_cast<uintptr_t>(this));
+ uint32_t m_instance_id =
+ Kokkos::Tools::Experimental::Impl::idForInstance<Kokkos::SYCL>(
+ reinterpret_cast<uintptr_t>(this));
std::optional<sycl::queue> m_queue;
// Using std::vector<std::optional<sycl::queue>> reveals a compiler bug when
explicit USMObjectMem(sycl::queue q, uint32_t instance_id) noexcept
: m_q(std::move(q)), m_instance_id(instance_id) {}
- USMObjectMem(USMObjectMem const&) = delete;
- USMObjectMem(USMObjectMem&&) = delete;
- USMObjectMem& operator=(USMObjectMem&&) = delete;
+ USMObjectMem(USMObjectMem const&) = delete;
+ USMObjectMem(USMObjectMem&&) = delete;
+ USMObjectMem& operator=(USMObjectMem&&) = delete;
USMObjectMem& operator=(USMObjectMem const&) = delete;
~USMObjectMem() { reset(); };
size_t reserve(size_t n);
private:
- using AllocationSpace = std::conditional_t<
- Kind == sycl::usm::alloc::device,
- Kokkos::Experimental::SYCLDeviceUSMSpace,
- std::conditional_t<Kind == sycl::usm::alloc::shared,
- Kokkos::Experimental::SYCLSharedUSMSpace,
- Kokkos::Experimental::SYCLHostUSMSpace>>;
+ using AllocationSpace =
+ std::conditional_t<Kind == sycl::usm::alloc::device,
+ Kokkos::SYCLDeviceUSMSpace,
+ std::conditional_t<Kind == sycl::usm::alloc::shared,
+ Kokkos::SYCLSharedUSMSpace,
+ Kokkos::SYCLHostUSMSpace>>;
public:
// Performs either sycl::memcpy (for USM device memory) or std::memcpy
}
void fence() {
- SYCLInternal::fence(
- m_last_event,
- "Kokkos::Experimental::SYCLInternal::USMObject fence to wait for "
- "last event to finish",
- m_instance_id);
+ SYCLInternal::fence(m_last_event,
+ "Kokkos::SYCLInternal::USMObject fence to wait for "
+ "last event to finish",
+ m_instance_id);
}
void register_event(sycl::event event) {
return SYCLFunctionWrapper<Functor, Storage>(functor, storage);
}
} // namespace Impl
-} // namespace Experimental
} // namespace Kokkos
#if defined(SYCL_DEVICE_COPYABLE) && defined(KOKKOS_ARCH_INTEL_GPU)
template <typename Functor, typename Storage>
struct sycl::is_device_copyable<
- Kokkos::Experimental::Impl::SYCLFunctionWrapper<Functor, Storage, false>>
+ Kokkos::Impl::SYCLFunctionWrapper<Functor, Storage, false>>
: std::true_type {};
+#if (defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER < 20240000) || \
+ (defined(__LIBSYCL_MAJOR_VERSION) && __LIBSYCL_MAJOR_VERSION < 7)
+template <typename>
+struct NonTriviallyCopyableAndDeviceCopyable {
+ NonTriviallyCopyableAndDeviceCopyable(
+ const NonTriviallyCopyableAndDeviceCopyable&) {}
+};
+
+template <typename T>
+struct sycl::is_device_copyable<NonTriviallyCopyableAndDeviceCopyable<T>>
+ : std::true_type {};
+
+static_assert(
+ !std::is_trivially_copyable_v<
+ NonTriviallyCopyableAndDeviceCopyable<void>> &&
+ sycl::is_device_copyable_v<NonTriviallyCopyableAndDeviceCopyable<void>>);
+
template <typename Functor, typename Storage>
struct sycl::is_device_copyable<
- const Kokkos::Experimental::Impl::SYCLFunctionWrapper<Functor, Storage,
- false>>
+ const Kokkos::Impl::SYCLFunctionWrapper<Functor, Storage, false>,
+ std::enable_if_t<!sycl::is_device_copyable_v<
+ const NonTriviallyCopyableAndDeviceCopyable<Functor>>>>
: std::true_type {};
#endif
#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_MDRANGEPOLICY_HPP_
+#define KOKKOS_SYCL_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+
+template <>
+struct default_outer_direction<Kokkos::SYCL> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+template <>
+struct default_inner_direction<Kokkos::SYCL> {
+ using type = Iterate;
+ static constexpr Iterate value = Iterate::Left;
+};
+
+namespace Impl {
+
+// Settings for MDRangePolicy
+template <>
+inline TileSizeProperties get_tile_size_properties<Kokkos::SYCL>(
+ const Kokkos::SYCL& space) {
+ TileSizeProperties properties;
+ properties.max_threads =
+ space.impl_internal_space_instance()->m_maxWorkgroupSize;
+ properties.default_largest_tile_size = 16;
+ properties.default_tile_size = 2;
+ properties.max_total_tile_size = properties.max_threads;
+ return properties;
+}
+
+// Settings for TeamMDRangePolicy
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, Kokkos::SYCL, ThreadAndVector>
+ : AcceleratorBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // namespace Impl
+} // Namespace Kokkos
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKOS_SYCL_PARALLEL_RANGE_HPP_
-#define KOKKOS_SYCL_PARALLEL_RANGE_HPP_
+#ifndef KOKKOS_SYCL_PARALLEL_FOR_MDRANGE_HPP_
+#define KOKKOS_SYCL_PARALLEL_FOR_MDRANGE_HPP_
#include <impl/KokkosExp_IterateTileGPU.hpp>
+#ifdef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
#include <vector>
-
-namespace Kokkos::Impl {
-template <typename FunctorWrapper, typename Policy>
-struct FunctorWrapperRangePolicyParallelFor {
- using WorkTag = typename Policy::work_tag;
-
- void operator()(sycl::item<1> item) const {
- const typename Policy::index_type id = item.get_linear_id() + m_begin;
- if constexpr (std::is_void<WorkTag>::value)
- m_functor_wrapper.get_functor()(id);
- else
- m_functor_wrapper.get_functor()(WorkTag(), id);
- }
-
- typename Policy::index_type m_begin;
- FunctorWrapper m_functor_wrapper;
-};
-} // namespace Kokkos::Impl
+#endif
-template <class FunctorType, class... Traits>
-class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
- Kokkos::Experimental::SYCL> {
- public:
- using Policy = Kokkos::RangePolicy<Traits...>;
-
- private:
- using Member = typename Policy::member_type;
- using WorkTag = typename Policy::work_tag;
- using LaunchBounds = typename Policy::launch_bounds;
-
- const FunctorType m_functor;
- const Policy m_policy;
-
- template <typename Functor>
- static sycl::event sycl_direct_launch(const Policy& policy,
- const Functor& functor,
- const sycl::event& memcpy_event) {
- // Convenience references
- const Kokkos::Experimental::SYCL& space = policy.space();
- sycl::queue& q = space.sycl_queue();
-
- auto parallel_for_event = q.submit([&](sycl::handler& cgh) {
- FunctorWrapperRangePolicyParallelFor<Functor, Policy> f{policy.begin(),
- functor};
- sycl::range<1> range(policy.end() - policy.begin());
- cgh.depends_on(memcpy_event);
- cgh.parallel_for<FunctorWrapperRangePolicyParallelFor<Functor, Policy>>(
- range, f);
- });
- q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
-
- return parallel_for_event;
- }
-
- public:
- using functor_type = FunctorType;
-
- void execute() const {
- if (m_policy.begin() == m_policy.end()) return;
-
- Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
- indirectKernelMem = m_policy.space()
- .impl_internal_space_instance()
- ->get_indirect_kernel_mem();
-
- auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_functor, indirectKernelMem);
- sycl::event event = sycl_direct_launch(m_policy, functor_wrapper,
- functor_wrapper.get_copy_event());
- functor_wrapper.register_event(event);
- }
-
- ParallelFor(const ParallelFor&) = delete;
- ParallelFor(ParallelFor&&) = delete;
- ParallelFor& operator=(const ParallelFor&) = delete;
- ParallelFor& operator=(ParallelFor&&) = delete;
- ~ParallelFor() = default;
-
- ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
-};
-
-// ParallelFor
template <class FunctorType, class... Traits>
class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
- Kokkos::Experimental::SYCL> {
+ Kokkos::SYCL> {
public:
using Policy = Kokkos::MDRangePolicy<Traits...>;
private:
using array_index_type = typename Policy::array_index_type;
using index_type = typename Policy::index_type;
- using LaunchBounds = typename Policy::launch_bounds;
using WorkTag = typename Policy::work_tag;
const FunctorType m_functor;
const typename Policy::index_type m_num_tiles;
static constexpr Iterate inner_direction = Policy::inner_direction;
} m_policy;
- const Kokkos::Experimental::SYCL& m_space;
+ const Kokkos::SYCL& m_space;
sycl::nd_range<3> compute_ranges() const {
const auto& m_tile = m_policy.m_tile;
const BarePolicy bare_policy(m_policy);
- auto parallel_for_event = q.submit([&](sycl::handler& cgh) {
+ desul::ensure_sycl_lock_arrays_on_device(q);
+
+ auto cgh_lambda = [&](sycl::handler& cgh) {
const auto range = compute_ranges();
const sycl::range<3> global_range = range.get_global_range();
const sycl::range<3> local_range = range.get_local_range();
sycl::range<3>{global_range[2], global_range[1], global_range[0]},
sycl::range<3>{local_range[2], local_range[1], local_range[0]}};
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
cgh.parallel_for(sycl_swapped_range, [functor_wrapper, bare_policy](
sycl::nd_item<3> item) {
// swap back for correct index calculations in DeviceIterateTile
{global_x, global_y, global_z}, {local_x, local_y, local_z})
.exec_range();
});
- });
- q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
-
- return parallel_for_event;
+ };
+
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ return {};
+ } else
+#endif
+ {
+ auto parallel_for_event = q.submit(cgh_lambda);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
+#endif
+ return parallel_for_event;
+ }
}
public:
}
void execute() const {
- Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
- indirectKernelMem =
- m_space.impl_internal_space_instance()->get_indirect_kernel_mem();
+ Kokkos::Impl::SYCLInternal::IndirectKernelMem& indirectKernelMem =
+ m_space.impl_internal_space_instance()->get_indirect_kernel_mem();
- auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
- m_functor, indirectKernelMem);
+ auto functor_wrapper =
+ Impl::make_sycl_function_wrapper(m_functor, indirectKernelMem);
sycl::event event =
sycl_direct_launch(functor_wrapper, functor_wrapper.get_copy_event());
functor_wrapper.register_event(event);
}
- ParallelFor(const ParallelFor&) = delete;
- ParallelFor(ParallelFor&&) = delete;
- ParallelFor& operator=(const ParallelFor&) = delete;
- ParallelFor& operator=(ParallelFor&&) = delete;
- ~ParallelFor() = default;
-
ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
: m_functor(arg_functor),
m_policy(arg_policy),
m_space(arg_policy.space()) {}
};
-#endif // KOKKOS_SYCL_PARALLEL_RANGE_HPP_
+#endif // KOKKOS_SYCL_PARALLEL_FOR_MDRANGE_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_PARALLEL_FOR_RANGE_HPP_
+#define KOKKOS_SYCL_PARALLEL_FOR_RANGE_HPP_
+
+#ifdef SYCL_EXT_ONEAPI_AUTO_LOCAL_RANGE
+#include <Kokkos_BitManipulation.hpp>
+#endif
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+#include <vector>
+#endif
+
+namespace Kokkos::Impl {
+#ifndef SYCL_EXT_ONEAPI_AUTO_LOCAL_RANGE
+template <typename FunctorWrapper, typename Policy>
+struct FunctorWrapperRangePolicyParallelFor {
+ using WorkTag = typename Policy::work_tag;
+
+ void operator()(sycl::item<1> item) const {
+ const typename Policy::index_type id = item.get_linear_id() + m_begin;
+ if constexpr (std::is_void_v<WorkTag>)
+ m_functor_wrapper.get_functor()(id);
+ else
+ m_functor_wrapper.get_functor()(WorkTag(), id);
+ }
+
+ typename Policy::index_type m_begin;
+ FunctorWrapper m_functor_wrapper;
+};
+#endif
+
+// Same as above but for a user-provided workgroup size
+template <typename FunctorWrapper, typename Policy>
+struct FunctorWrapperRangePolicyParallelForCustom {
+ using WorkTag = typename Policy::work_tag;
+
+ void operator()(sycl::nd_item<1> item) const {
+ const typename Policy::index_type id = item.get_global_linear_id();
+ if (id < m_work_size) {
+ const auto shifted_id = id + m_begin;
+ if constexpr (std::is_void_v<WorkTag>)
+ m_functor_wrapper.get_functor()(shifted_id);
+ else
+ m_functor_wrapper.get_functor()(WorkTag(), shifted_id);
+ }
+ }
+
+ typename Policy::index_type m_begin;
+ FunctorWrapper m_functor_wrapper;
+ typename Policy::index_type m_work_size;
+};
+} // namespace Kokkos::Impl
+
+template <class FunctorType, class... Traits>
+class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::SYCL> {
+ public:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+ using Member = typename Policy::member_type;
+ using WorkTag = typename Policy::work_tag;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ template <typename Functor>
+ sycl::event sycl_direct_launch(const Policy& policy, const Functor& functor,
+ const sycl::event& memcpy_event) const {
+ // Convenience references
+ const Kokkos::SYCL& space = policy.space();
+ sycl::queue& q = space.sycl_queue();
+
+ desul::ensure_sycl_lock_arrays_on_device(q);
+
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+
+ if (policy.chunk_size() <= 1) {
+#ifdef SYCL_EXT_ONEAPI_AUTO_LOCAL_RANGE
+ const auto actual_range = policy.end() - policy.begin();
+ FunctorWrapperRangePolicyParallelForCustom<Functor, Policy> f{
+ policy.begin(), functor, actual_range};
+ // Round the actual range up to the closest power of two not exceeding
+ // the maximum workgroup size
+ const auto max_wgroup_size =
+ q.get_device().get_info<sycl::info::device::max_work_group_size>();
+ const auto wgroup_size_multiple = Kokkos::bit_floor(
+ std::min<std::size_t>(max_wgroup_size, actual_range));
+
+ const auto launch_range = (actual_range + wgroup_size_multiple - 1) /
+ wgroup_size_multiple * wgroup_size_multiple;
+ sycl::nd_range<1> range(
+ launch_range, sycl::ext::oneapi::experimental::auto_range<1>());
+ cgh.parallel_for<
+ FunctorWrapperRangePolicyParallelForCustom<Functor, Policy>>(range,
+ f);
+#else
+ FunctorWrapperRangePolicyParallelFor<Functor, Policy> f{policy.begin(),
+ functor};
+ sycl::range<1> range(policy.end() - policy.begin());
+ cgh.parallel_for<FunctorWrapperRangePolicyParallelFor<Functor, Policy>>(
+ range, f);
+#endif
+ } else {
+ // Use the chunk size as workgroup size. We need to make sure that the
+ // range the kernel is launched with is a multiple of the workgroup
+ // size. Hence, we need to restrict the execution of the functor in the
+ // kernel to the actual range.
+ const auto actual_range = policy.end() - policy.begin();
+ const auto wgroup_size = policy.chunk_size();
+ const auto launch_range =
+ (actual_range + wgroup_size - 1) / wgroup_size * wgroup_size;
+ FunctorWrapperRangePolicyParallelForCustom<Functor, Policy> f{
+ policy.begin(), functor, actual_range};
+ sycl::nd_range<1> range(launch_range, wgroup_size);
+ cgh.parallel_for<
+ FunctorWrapperRangePolicyParallelForCustom<Functor, Policy>>(range,
+ f);
+ }
+ };
+
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ return {};
+ } else
+#endif
+ {
+ auto parallel_for_event = q.submit(cgh_lambda);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
+#endif
+ return parallel_for_event;
+ }
+ }
+
+ public:
+ using functor_type = FunctorType;
+
+ void execute() const {
+ if (m_policy.begin() == m_policy.end()) return;
+
+ Kokkos::Impl::SYCLInternal::IndirectKernelMem& indirectKernelMem =
+ m_policy.space()
+ .impl_internal_space_instance()
+ ->get_indirect_kernel_mem();
+
+ auto functor_wrapper =
+ Impl::make_sycl_function_wrapper(m_functor, indirectKernelMem);
+ sycl::event event = sycl_direct_launch(m_policy, functor_wrapper,
+ functor_wrapper.get_copy_event());
+ functor_wrapper.register_event(event);
+ }
+
+ ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+#endif // KOKKOS_SYCL_PARALLEL_FOR_RANGE_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_PARALLEL_FOR_TEAM_HPP
+#define KOKKOS_SYCL_PARALLEL_FOR_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <SYCL/Kokkos_SYCL_Team.hpp>
+#include <SYCL/Kokkos_SYCL_TeamPolicy.hpp>
+
+#include <sstream>
+#include <vector>
+
+template <typename FunctorType, typename... Properties>
+class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+ Kokkos::SYCL> {
+ public:
+ using Policy = TeamPolicy<Properties...>;
+ using functor_type = FunctorType;
+ using size_type = ::Kokkos::SYCL::size_type;
+
+ private:
+ using member_type = typename Policy::member_type;
+ using work_tag = typename Policy::work_tag;
+ using launch_bounds = typename Policy::launch_bounds;
+
+ FunctorType const m_functor;
+ Policy const m_policy;
+ size_type const m_league_size;
+ int m_team_size;
+ size_type const m_vector_size;
+ int m_shmem_begin;
+ int m_shmem_size;
+ size_t m_scratch_size[2];
+
+ template <typename FunctorWrapper>
+ sycl::event sycl_direct_launch(const sycl_device_ptr<char> global_scratch_ptr,
+ const FunctorWrapper& functor_wrapper,
+ const sycl::event& memcpy_event) const {
+ // Convenience references
+ const Kokkos::SYCL& space = m_policy.space();
+ sycl::queue& q = space.sycl_queue();
+
+ desul::ensure_sycl_lock_arrays_on_device(q);
+
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+ // FIXME_SYCL accessors seem to need a size greater than zero at least for
+ // host queues
+ sycl::local_accessor<char, 1> team_scratch_memory_L0(
+ sycl::range<1>(
+ std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
+ cgh);
+
+ // Avoid capturing *this since it might not be trivially copyable
+ const auto shmem_begin = m_shmem_begin;
+ const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
+
+ auto lambda = [=](sycl::nd_item<2> item) {
+ const member_type team_member(
+ KOKKOS_IMPL_SYCL_GET_MULTI_PTR(team_scratch_memory_L0), shmem_begin,
+ scratch_size[0],
+ global_scratch_ptr + item.get_group(1) * scratch_size[1],
+ scratch_size[1], item, item.get_group_linear_id(),
+ item.get_group_range(1));
+ if constexpr (std::is_void<work_tag>::value)
+ functor_wrapper.get_functor()(team_member);
+ else
+ functor_wrapper.get_functor()(work_tag(), team_member);
+ };
+
+ static sycl::kernel kernel = [&] {
+ sycl::kernel_id functor_kernel_id =
+ sycl::get_kernel_id<decltype(lambda)>();
+ auto kernel_bundle =
+ sycl::get_kernel_bundle<sycl::bundle_state::executable>(
+ q.get_context(), std::vector{functor_kernel_id});
+ return kernel_bundle.get_kernel(functor_kernel_id);
+ }();
+ auto max_sg_size =
+ kernel
+ .get_info<sycl::info::kernel_device_specific::max_sub_group_size>(
+ q.get_device());
+ auto final_vector_size = std::min<int>(m_vector_size, max_sg_size);
+ // FIXME_SYCL For some reason, explicitly enforcing the kernel bundle to
+ // be used gives a runtime error.
+ // cgh.use_kernel_bundle(kernel_bundle);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+ cgh.parallel_for(
+ sycl::nd_range<2>(
+ sycl::range<2>(m_team_size, m_league_size * final_vector_size),
+ sycl::range<2>(m_team_size, final_vector_size)),
+ lambda);
+ };
+
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ return {};
+ } else
+#endif
+ {
+ auto parallel_for_event = q.submit(cgh_lambda);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
+#endif
+ return parallel_for_event;
+ }
+ }
+
+ public:
+ inline void execute() const {
+ if (m_league_size == 0) return;
+
+ auto& instance = *m_policy.space().impl_internal_space_instance();
+
+ // Only let one instance at a time resize the instance's scratch memory
+ // allocations.
+ std::scoped_lock<std::mutex> team_scratch_lock(
+ instance.m_team_scratch_mutex);
+
+ // Functor's reduce memory, team scan memory, and team shared memory depend
+ // upon team size.
+ int scratch_pool_id = instance.acquire_team_scratch_space();
+ const sycl_device_ptr<char> global_scratch_ptr =
+ static_cast<sycl_device_ptr<char>>(instance.resize_team_scratch_space(
+ scratch_pool_id,
+ static_cast<ptrdiff_t>(m_scratch_size[1]) * m_league_size));
+
+ Kokkos::Impl::SYCLInternal::IndirectKernelMem& indirectKernelMem =
+ instance.get_indirect_kernel_mem();
+
+ auto functor_wrapper =
+ Impl::make_sycl_function_wrapper(m_functor, indirectKernelMem);
+
+ sycl::event event = sycl_direct_launch(global_scratch_ptr, functor_wrapper,
+ functor_wrapper.get_copy_event());
+ functor_wrapper.register_event(event);
+ instance.register_team_scratch_event(scratch_pool_id, event);
+ }
+
+ ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
+ : m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_league_size(arg_policy.league_size()),
+ m_team_size(arg_policy.team_size()),
+ m_vector_size(arg_policy.impl_vector_length()) {
+ if (m_team_size < 0) {
+ m_team_size =
+ m_policy.team_size_recommended(arg_functor, ParallelForTag{});
+ if (m_team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelFor<SYCL, TeamPolicy> could not find a "
+ "valid execution configuration.");
+ }
+
+ m_shmem_begin = (sizeof(double) * (m_team_size + 2));
+ m_shmem_size =
+ (m_policy.scratch_size(0, m_team_size) +
+ FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
+ m_scratch_size[0] = m_shmem_size;
+ m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+
+ const auto& instance = *m_policy.space().impl_internal_space_instance();
+ if (static_cast<int>(instance.m_maxShmemPerBlock) <
+ m_shmem_size - m_shmem_begin) {
+ std::stringstream out;
+ out << "Kokkos::Impl::ParallelFor<SYCL> insufficient shared memory! "
+ "Requested "
+ << m_shmem_size - m_shmem_begin << " bytes but maximum is "
+ << instance.m_maxShmemPerBlock << '\n';
+ Kokkos::Impl::throw_runtime_exception(out.str());
+ }
+
+ const auto max_team_size =
+ m_policy.team_size_max(arg_functor, ParallelForTag{});
+ if (m_team_size > m_policy.team_size_max(arg_functor, ParallelForTag{}))
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelFor<SYCL> requested too large team size. The "
+ "maximal team_size is " +
+ std::to_string(max_team_size) + '!');
+ }
+};
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_PARALLEL_REDUCE_MDRANGE_HPP
+#define KOKKOS_SYCL_PARALLEL_REDUCE_MDRANGE_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Parallel_Reduce.hpp>
+#include <SYCL/Kokkos_SYCL_WorkgroupReduction.hpp>
+#include <Kokkos_BitManipulation.hpp>
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+#include <vector>
+#endif
+
+template <class CombinedFunctorReducerType, class... Traits>
+class Kokkos::Impl::ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::SYCL> {
+ public:
+ using Policy = Kokkos::MDRangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ private:
+ using value_type = typename ReducerType::value_type;
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ using WorkTag = typename Policy::work_tag;
+
+ // MDRangePolicy is not trivially copyable. Hence, replicate the data we
+ // really need in DeviceIterateTile in a trivially copyable struct.
+ struct BarePolicy {
+ using index_type = typename Policy::index_type;
+
+ BarePolicy(const Policy& policy)
+ : m_lower(policy.m_lower),
+ m_upper(policy.m_upper),
+ m_tile(policy.m_tile),
+ m_tile_end(policy.m_tile_end),
+ m_num_tiles(policy.m_num_tiles),
+ m_prod_tile_dims(policy.m_prod_tile_dims) {}
+
+ const typename Policy::point_type m_lower;
+ const typename Policy::point_type m_upper;
+ const typename Policy::tile_type m_tile;
+ const typename Policy::point_type m_tile_end;
+ const typename Policy::index_type m_num_tiles;
+ const typename Policy::index_type m_prod_tile_dims;
+ static constexpr Iterate inner_direction = Policy::inner_direction;
+ static constexpr int rank = Policy::rank;
+ };
+
+ public:
+ // V - View
+ template <typename View>
+ ParallelReduce(const CombinedFunctorReducerType& f, const Policy& p,
+ const View& v)
+ : m_functor_reducer(f),
+ m_policy(p),
+ m_space(p.space()),
+ m_result_ptr(v.data()),
+ m_result_ptr_device_accessible(
+ MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace,
+ typename View::memory_space>::accessible) {}
+
+ private:
+ template <typename CombinedFunctorReducerWrapper>
+ sycl::event sycl_direct_launch(
+ const CombinedFunctorReducerWrapper& functor_reducer_wrapper,
+ const sycl::event& memcpy_event) const {
+ // Convenience references
+ Kokkos::Impl::SYCLInternal& instance =
+ *m_space.impl_internal_space_instance();
+ sycl::queue& q = m_space.sycl_queue();
+
+ const typename Policy::index_type n_tiles = m_policy.m_num_tiles;
+ const unsigned int value_count =
+ m_functor_reducer.get_reducer().value_count();
+ sycl_device_ptr<value_type> results_ptr;
+ auto host_result_ptr =
+ (m_result_ptr && !m_result_ptr_device_accessible)
+ ? static_cast<sycl_host_ptr<value_type>>(
+ instance.scratch_host(sizeof(value_type) * value_count))
+ : nullptr;
+
+ sycl::event last_reduction_event;
+
+ desul::ensure_sycl_lock_arrays_on_device(q);
+
+ // If n_tiles==0 we only call init() and final() working with the global
+ // scratch memory but don't copy back to m_result_ptr yet.
+ if (n_tiles == 0) {
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+ results_ptr = static_cast<sycl_device_ptr<value_type>>(
+ instance.scratch_space(sizeof(value_type) * value_count));
+ auto device_accessible_result_ptr =
+ m_result_ptr_device_accessible
+ ? static_cast<sycl::global_ptr<value_type>>(m_result_ptr)
+ : static_cast<sycl::global_ptr<value_type>>(host_result_ptr);
+ cgh.single_task([=]() {
+ const CombinedFunctorReducerType& functor_reducer =
+ functor_reducer_wrapper.get_functor();
+ const ReducerType& reducer = functor_reducer.get_reducer();
+ reducer.init(results_ptr);
+ reducer.final(results_ptr);
+ if (device_accessible_result_ptr)
+ reducer.copy(device_accessible_result_ptr.get(), results_ptr.get());
+ });
+ };
+
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ } else
+#endif
+ {
+ last_reduction_event = q.submit(cgh_lambda);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(
+ std::vector<sycl::event>{last_reduction_event});
+#endif
+ }
+ } else {
+ // Otherwise (when n_tiles is not zero), we perform a reduction on the
+ // values in all workgroups separately, write the workgroup results back
+ // to global memory and recurse until only one workgroup does the
+ // reduction and thus gets the final value.
+ const int wgroup_size = Kokkos::bit_ceil(
+ static_cast<unsigned int>(m_policy.m_prod_tile_dims));
+
+ // FIXME_SYCL Find a better way to determine a good limit for the
+ // maximum number of work groups, also see
+ // https://github.com/intel/llvm/blob/756ba2616111235bba073e481b7f1c8004b34ee6/sycl/source/detail/reduction.cpp#L51-L62
+ size_t max_work_groups =
+ 2 * q.get_device().get_info<sycl::info::device::max_compute_units>();
+ int values_per_thread = 1;
+ size_t n_wgroups = n_tiles;
+ while (n_wgroups > max_work_groups) {
+ values_per_thread *= 2;
+ n_wgroups = (n_tiles + values_per_thread - 1) / values_per_thread;
+ }
+
+ results_ptr = static_cast<sycl_device_ptr<value_type>>(
+ instance.scratch_space(sizeof(value_type) * value_count * n_wgroups));
+ auto device_accessible_result_ptr =
+ m_result_ptr_device_accessible
+ ? static_cast<sycl::global_ptr<value_type>>(m_result_ptr)
+ : static_cast<sycl::global_ptr<value_type>>(host_result_ptr);
+ auto scratch_flags = static_cast<sycl_device_ptr<unsigned int>>(
+ instance.scratch_flags(sizeof(unsigned int)));
+
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+ sycl::local_accessor<value_type> local_mem(
+ sycl::range<1>(wgroup_size) * value_count, cgh);
+ sycl::local_accessor<unsigned int> num_teams_done(1, cgh);
+
+ const BarePolicy bare_policy = m_policy;
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+
+ // REMEMBER swap local x<->y to be conforming with Cuda/HIP
+ // implementation
+ cgh.parallel_for(
+ sycl::nd_range<1>{n_wgroups * wgroup_size, wgroup_size},
+ [=](sycl::nd_item<1> item) {
+ const int local_id = item.get_local_linear_id();
+ const CombinedFunctorReducerType& functor_reducer =
+ functor_reducer_wrapper.get_functor();
+ const FunctorType& functor = functor_reducer.get_functor();
+ const ReducerType& reducer = functor_reducer.get_reducer();
+
+ // In the first iteration, we call functor to initialize the local
+ // memory. Otherwise, the local memory is initialized with the
+ // results from the previous iteration that are stored in global
+ // memory.
+ using index_type = typename Policy::index_type;
+
+ // SWAPPED here to be conforming with CUDA implementation
+ const index_type local_x = 0;
+ const index_type local_y = item.get_local_id(0);
+ const index_type local_z = 0;
+ const index_type global_y = 0;
+ const index_type global_z = 0;
+ const index_type n_global_x = n_tiles;
+ const index_type n_global_y = 1;
+ const index_type n_global_z = 1;
+
+ if constexpr (!SYCLReduction::use_shuffle_based_algorithm<
+ ReducerType>) {
+ reference_type update =
+ reducer.init(&local_mem[local_id * value_count]);
+
+ for (index_type global_x = item.get_group(0);
+ global_x < n_tiles; global_x += item.get_group_range(0))
+ Kokkos::Impl::Reduce::DeviceIterateTile<
+ Policy::rank, BarePolicy, FunctorType,
+ typename Policy::work_tag, reference_type>(
+ bare_policy, functor, update,
+ {n_global_x, n_global_y, n_global_z},
+ {global_x, global_y, global_z},
+ {local_x, local_y, local_z})
+ .exec_range();
+ item.barrier(sycl::access::fence_space::local_space);
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, results_ptr, device_accessible_result_ptr,
+ value_count, reducer, false, wgroup_size);
+
+ if (local_id == 0) {
+ sycl::atomic_ref<unsigned, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ scratch_flags_ref(*scratch_flags);
+ num_teams_done[0] = ++scratch_flags_ref;
+ }
+ item.barrier(sycl::access::fence_space::local_space);
+ if (num_teams_done[0] == n_wgroups) {
+ if (local_id == 0) *scratch_flags = 0;
+ if (local_id >= static_cast<int>(n_wgroups))
+ reducer.init(&local_mem[local_id * value_count]);
+ else {
+ reducer.copy(&local_mem[local_id * value_count],
+ &results_ptr[local_id * value_count]);
+ for (unsigned int id = local_id + wgroup_size;
+ id < n_wgroups; id += wgroup_size) {
+ reducer.join(&local_mem[local_id * value_count],
+ &results_ptr[id * value_count]);
+ }
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, results_ptr,
+ device_accessible_result_ptr, value_count, reducer, true,
+ std::min<int>(n_wgroups, wgroup_size));
+ }
+ } else {
+ value_type local_value;
+ reference_type update = reducer.init(&local_value);
+
+ for (index_type global_x = item.get_group(0);
+ global_x < n_tiles; global_x += item.get_group_range(0))
+ Kokkos::Impl::Reduce::DeviceIterateTile<
+ Policy::rank, BarePolicy, FunctorType,
+ typename Policy::work_tag, reference_type>(
+ bare_policy, functor, update,
+ {n_global_x, n_global_y, n_global_z},
+ {global_x, global_y, global_z},
+ {local_x, local_y, local_z})
+ .exec_range();
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, local_value, results_ptr,
+ device_accessible_result_ptr, reducer, false, wgroup_size);
+
+ if (local_id == 0) {
+ sycl::atomic_ref<unsigned, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ scratch_flags_ref(*scratch_flags);
+ num_teams_done[0] = ++scratch_flags_ref;
+ }
+ item.barrier(sycl::access::fence_space::local_space);
+ if (num_teams_done[0] == n_wgroups) {
+ if (local_id == 0) *scratch_flags = 0;
+ if (local_id >= static_cast<int>(n_wgroups))
+ reducer.init(&local_value);
+ else {
+ local_value = results_ptr[local_id];
+ for (unsigned int id = local_id + wgroup_size;
+ id < n_wgroups; id += wgroup_size) {
+ reducer.join(&local_value, &results_ptr[id]);
+ }
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, local_value, results_ptr,
+ device_accessible_result_ptr, reducer, true,
+ std::min<int>(n_wgroups, wgroup_size));
+ }
+ }
+ });
+ };
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ } else
+#endif
+ {
+ last_reduction_event = q.submit(cgh_lambda);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(
+ std::vector<sycl::event>{last_reduction_event});
+#endif
+ }
+ }
+
+ // At this point, the reduced value is written to the entry in results_ptr
+ // and all that is left is to copy it back to the given result pointer if
+ // necessary.
+ // Using DeepCopy instead of fence+memcpy turned out to be up to 2x slower.
+ if (host_result_ptr) {
+ if constexpr (Policy::is_graph_kernel::value)
+ Kokkos::abort(
+ "parallel_reduce not implemented for graph kernels if result is "
+ "not device-accessible!");
+
+ m_space.fence(
+ "Kokkos::Impl::ParallelReduce<SYCL, MDRangePolicy>::execute: result "
+ "not device-accessible");
+ std::memcpy(m_result_ptr, host_result_ptr,
+ sizeof(value_type) * value_count);
+ }
+
+ return last_reduction_event;
+ }
+
+ public:
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy& policy, const Functor&) {
+ return policy.space().impl_internal_space_instance()->m_maxWorkgroupSize;
+ }
+
+ void execute() const {
+ Kokkos::Impl::SYCLInternal& instance =
+ *m_space.impl_internal_space_instance();
+
+ // Only let one instance at a time resize the instance's scratch memory
+ // allocations.
+ std::scoped_lock<std::mutex> scratch_buffers_lock(
+ instance.m_mutexScratchSpace);
+
+ using IndirectKernelMem = Kokkos::Impl::SYCLInternal::IndirectKernelMem;
+ IndirectKernelMem& indirectKernelMem = instance.get_indirect_kernel_mem();
+
+ auto functor_reducer_wrapper =
+ Impl::make_sycl_function_wrapper(m_functor_reducer, indirectKernelMem);
+
+ sycl::event event = sycl_direct_launch(
+ functor_reducer_wrapper, functor_reducer_wrapper.get_copy_event());
+ functor_reducer_wrapper.register_event(event);
+ }
+
+ private:
+ const CombinedFunctorReducerType m_functor_reducer;
+ const BarePolicy m_policy;
+ const Kokkos::SYCL& m_space;
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+};
+
+#endif /* KOKKOS_SYCL_PARALLEL_REDUCE_MDRANGE_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_PARALLEL_REDUCE_RANGE_HPP
+#define KOKKOS_SYCL_PARALLEL_REDUCE_RANGE_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_BitManipulation.hpp>
+#include <Kokkos_Parallel_Reduce.hpp>
+#include <SYCL/Kokkos_SYCL_WorkgroupReduction.hpp>
+#include <vector>
+
+template <class CombinedFunctorReducerType, class... Traits>
+class Kokkos::Impl::ParallelReduce<
+ CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>, Kokkos::SYCL> {
+ public:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ private:
+ using value_type = typename ReducerType::value_type;
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ using WorkTag = typename Policy::work_tag;
+
+ public:
+ // V - View
+ template <typename View>
+ ParallelReduce(const CombinedFunctorReducerType& f, const Policy& p,
+ const View& v)
+ : m_functor_reducer(f),
+ m_policy(p),
+ m_result_ptr(v.data()),
+ m_result_ptr_device_accessible(
+ MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace,
+ typename View::memory_space>::accessible) {}
+
+ private:
+ template <typename PolicyType, typename CombinedFunctorReducerWrapper>
+ sycl::event sycl_direct_launch(
+ const PolicyType& policy,
+ const CombinedFunctorReducerWrapper& functor_reducer_wrapper,
+ const sycl::event& memcpy_event) const {
+ // Convenience references
+ const Kokkos::SYCL& space = policy.space();
+ Kokkos::Impl::SYCLInternal& instance =
+ *space.impl_internal_space_instance();
+ sycl::queue& q = space.sycl_queue();
+
+ std::size_t size = policy.end() - policy.begin();
+ const unsigned int value_count =
+ m_functor_reducer.get_reducer().value_count();
+ sycl_device_ptr<value_type> results_ptr = nullptr;
+ auto host_result_ptr =
+ (m_result_ptr && !m_result_ptr_device_accessible)
+ ? static_cast<sycl_host_ptr<value_type>>(
+ instance.scratch_host(sizeof(value_type) * value_count))
+ : nullptr;
+ auto device_accessible_result_ptr =
+ m_result_ptr_device_accessible
+ ? static_cast<sycl::global_ptr<value_type>>(m_result_ptr)
+ : static_cast<sycl::global_ptr<value_type>>(host_result_ptr);
+
+ sycl::event last_reduction_event;
+
+ desul::ensure_sycl_lock_arrays_on_device(q);
+
+ // If size<=1 we only call init(), the functor and possibly final once
+ // working with the global scratch memory but don't copy back to
+ // m_result_ptr yet.
+ if (size <= 1) {
+ results_ptr = static_cast<sycl_device_ptr<value_type>>(
+ instance.scratch_space(sizeof(value_type) * value_count));
+
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+ const auto begin = policy.begin();
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+ cgh.single_task([=]() {
+ const CombinedFunctorReducerType& functor_reducer =
+ functor_reducer_wrapper.get_functor();
+ const FunctorType& functor = functor_reducer.get_functor();
+ const ReducerType& reducer = functor_reducer.get_reducer();
+ reference_type update = reducer.init(results_ptr);
+ if (size == 1) {
+ if constexpr (std::is_void_v<WorkTag>)
+ functor(begin, update);
+ else
+ functor(WorkTag(), begin, update);
+ }
+ reducer.final(results_ptr);
+ if (device_accessible_result_ptr != nullptr)
+ reducer.copy(device_accessible_result_ptr.get(), results_ptr.get());
+ });
+ };
+
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ } else
+#endif
+ {
+ last_reduction_event = q.submit(cgh_lambda);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(
+ std::vector<sycl::event>{last_reduction_event});
+#endif
+ }
+ } else {
+ // Otherwise (when size > 1), we perform a reduction on the values in all
+ // workgroups separately, write the workgroup results back to global
+ // memory and recurse until only one workgroup does the reduction and thus
+ // gets the final value.
+ auto scratch_flags = static_cast<sycl_device_ptr<unsigned int>>(
+ instance.scratch_flags(sizeof(unsigned int)));
+
+ auto reduction_lambda_factory =
+ [&](sycl::local_accessor<value_type> local_mem,
+ sycl::local_accessor<unsigned int> num_teams_done,
+ sycl_device_ptr<value_type> results_ptr, int values_per_thread) {
+ const auto begin = policy.begin();
+
+ auto lambda = [=](sycl::nd_item<1> item) {
+ const auto n_wgroups = item.get_group_range()[0];
+ const auto wgroup_size = item.get_local_range()[0];
+
+ const auto local_id = item.get_local_linear_id();
+ const auto global_id =
+ wgroup_size * item.get_group_linear_id() * values_per_thread +
+ local_id;
+ const CombinedFunctorReducerType& functor_reducer =
+ functor_reducer_wrapper.get_functor();
+ const FunctorType& functor = functor_reducer.get_functor();
+ const ReducerType& reducer = functor_reducer.get_reducer();
+
+ using index_type = typename Policy::index_type;
+ const auto upper_bound = std::min<index_type>(
+ global_id + values_per_thread * wgroup_size, size);
+
+ if constexpr (!SYCLReduction::use_shuffle_based_algorithm<
+ ReducerType>) {
+ reference_type update =
+ reducer.init(&local_mem[local_id * value_count]);
+ for (index_type id = global_id; id < upper_bound;
+ id += wgroup_size) {
+ if constexpr (std::is_void_v<WorkTag>)
+ functor(id + begin, update);
+ else
+ functor(WorkTag(), id + begin, update);
+ }
+ item.barrier(sycl::access::fence_space::local_space);
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, results_ptr, device_accessible_result_ptr,
+ value_count, reducer, false, std::min(size, wgroup_size));
+
+ if (local_id == 0) {
+ sycl::atomic_ref<unsigned, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ scratch_flags_ref(*scratch_flags);
+ num_teams_done[0] = ++scratch_flags_ref;
+ }
+ item.barrier(sycl::access::fence_space::local_space);
+ if (num_teams_done[0] == n_wgroups) {
+ if (local_id == 0) *scratch_flags = 0;
+ if (local_id >= n_wgroups)
+ reducer.init(&local_mem[local_id * value_count]);
+ else {
+ reducer.copy(&local_mem[local_id * value_count],
+ &results_ptr[local_id * value_count]);
+ for (unsigned int id = local_id + wgroup_size;
+ id < n_wgroups; id += wgroup_size) {
+ reducer.join(&local_mem[local_id * value_count],
+ &results_ptr[id * value_count]);
+ }
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, results_ptr,
+ device_accessible_result_ptr, value_count, reducer, true,
+ std::min(n_wgroups, wgroup_size));
+ }
+ } else {
+ value_type local_value;
+ reference_type update = reducer.init(&local_value);
+ for (index_type id = global_id; id < upper_bound;
+ id += wgroup_size) {
+ if constexpr (std::is_void_v<WorkTag>)
+ functor(id + begin, update);
+ else
+ functor(WorkTag(), id + begin, update);
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, local_value, results_ptr,
+ device_accessible_result_ptr, reducer, false,
+ std::min(size, wgroup_size));
+
+ if (local_id == 0) {
+ sycl::atomic_ref<unsigned, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ scratch_flags_ref(*scratch_flags);
+ num_teams_done[0] = ++scratch_flags_ref;
+ }
+ item.barrier(sycl::access::fence_space::local_space);
+ if (num_teams_done[0] == n_wgroups) {
+ if (local_id == 0) *scratch_flags = 0;
+ if (local_id >= n_wgroups)
+ reducer.init(&local_value);
+ else {
+ local_value = results_ptr[local_id];
+ for (unsigned int id = local_id + wgroup_size;
+ id < n_wgroups; id += wgroup_size) {
+ reducer.join(&local_value, &results_ptr[id]);
+ }
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, local_value, results_ptr,
+ device_accessible_result_ptr, reducer, true,
+ std::min(n_wgroups, wgroup_size));
+ }
+ }
+ };
+ return lambda;
+ };
+
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+ sycl::local_accessor<unsigned int> num_teams_done(1, cgh);
+
+ auto dummy_reduction_lambda =
+ reduction_lambda_factory({1, cgh}, num_teams_done, nullptr, 1);
+
+ static sycl::kernel kernel = [&] {
+ sycl::kernel_id functor_kernel_id =
+ sycl::get_kernel_id<decltype(dummy_reduction_lambda)>();
+ auto kernel_bundle =
+ sycl::get_kernel_bundle<sycl::bundle_state::executable>(
+ q.get_context(), std::vector{functor_kernel_id});
+ return kernel_bundle.get_kernel(functor_kernel_id);
+ }();
+ auto multiple = kernel.get_info<sycl::info::kernel_device_specific::
+ preferred_work_group_size_multiple>(
+ q.get_device());
+ // FIXME_SYCL The code below queries the kernel for the maximum subgroup
+ // size but it turns out that this is not accurate and choosing a larger
+ // subgroup size gives better peformance (and is what the oneAPI
+ // reduction algorithm does).
+#ifndef KOKKOS_ARCH_INTEL_GPU
+ auto max =
+ kernel
+ .get_info<sycl::info::kernel_device_specific::work_group_size>(
+ q.get_device());
+#else
+ auto max =
+ q.get_device().get_info<sycl::info::device::max_work_group_size>();
+#endif
+
+ auto max_local_memory =
+ q.get_device().get_info<sycl::info::device::local_mem_size>();
+ // The workgroup size is computed as the minimum of
+ // - the smallest power of two not less than the total work size
+ // - the largest power of two not exceeding the largest multiple of the
+ // recommended workgroup size not exceeding the maximum workgroup size
+ // - the largest power of two such that we don't use more than 99% (as a
+ // safe-guard) of the available local memory.
+ const auto wgroup_size = std::min(
+ {Kokkos::bit_ceil(size),
+ Kokkos::bit_floor(static_cast<size_t>(max / multiple) * multiple),
+ Kokkos::bit_floor(static_cast<size_t>(max_local_memory * .99) /
+ (sizeof(value_type) * value_count))});
+
+ // FIXME_SYCL Find a better way to determine a good limit for the
+ // maximum number of work groups, also see
+ // https://github.com/intel/llvm/blob/756ba2616111235bba073e481b7f1c8004b34ee6/sycl/source/detail/reduction.cpp#L51-L62
+ size_t max_work_groups =
+ 2 *
+ q.get_device().get_info<sycl::info::device::max_compute_units>();
+ int values_per_thread = 1;
+ size_t n_wgroups = (size + wgroup_size - 1) / wgroup_size;
+ while (n_wgroups > max_work_groups) {
+ values_per_thread *= 2;
+ n_wgroups = ((size + values_per_thread - 1) / values_per_thread +
+ wgroup_size - 1) /
+ wgroup_size;
+ }
+
+ results_ptr =
+ static_cast<sycl_device_ptr<value_type>>(instance.scratch_space(
+ sizeof(value_type) * value_count * n_wgroups));
+
+ sycl::local_accessor<value_type> local_mem(
+ sycl::range<1>(wgroup_size) * value_count, cgh);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+
+ auto reduction_lambda = reduction_lambda_factory(
+ local_mem, num_teams_done, results_ptr, values_per_thread);
+
+ cgh.parallel_for(
+ sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
+ reduction_lambda);
+ };
+
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ } else
+#endif
+ {
+ last_reduction_event = q.submit(cgh_lambda);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(
+ std::vector<sycl::event>{last_reduction_event});
+#endif
+ }
+ }
+
+ // At this point, the reduced value is written to the entry in results_ptr
+ // and all that is left is to copy it back to the given result pointer if
+ // necessary.
+ // Using DeepCopy instead of fence+memcpy turned out to be up to 2x slower.
+ if (host_result_ptr) {
+ if constexpr (Policy::is_graph_kernel::value)
+ Kokkos::abort(
+ "parallel_reduce not implemented for graph kernels if result is "
+ "not device-accessible!");
+
+ space.fence(
+ "Kokkos::Impl::ParallelReduce<SYCL, RangePolicy>::execute: result "
+ "not device-accessible");
+ std::memcpy(m_result_ptr, host_result_ptr,
+ sizeof(*m_result_ptr) * value_count);
+ }
+
+ return last_reduction_event;
+ }
+
+ public:
+ void execute() const {
+ Kokkos::Impl::SYCLInternal& instance =
+ *m_policy.space().impl_internal_space_instance();
+
+ // Only let one instance at a time resize the instance's scratch memory
+ // allocations.
+ std::scoped_lock<std::mutex> scratch_buffers_lock(
+ instance.m_mutexScratchSpace);
+
+ using IndirectKernelMem = Kokkos::Impl::SYCLInternal::IndirectKernelMem;
+ IndirectKernelMem& indirectKernelMem = instance.get_indirect_kernel_mem();
+
+ auto functor_reducer_wrapper =
+ Impl::make_sycl_function_wrapper(m_functor_reducer, indirectKernelMem);
+
+ sycl::event event =
+ sycl_direct_launch(m_policy, functor_reducer_wrapper,
+ functor_reducer_wrapper.get_copy_event());
+ functor_reducer_wrapper.register_event(event);
+ }
+
+ private:
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+};
+
+#endif /* KOKKOS_SYCL_PARALLEL_REDUCE_RANGE_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_PARALLEL_REDUCE_TEAM_HPP
+#define KOKKOS_SYCL_PARALLEL_REDUCE_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <SYCL/Kokkos_SYCL_Team.hpp>
+#include <SYCL/Kokkos_SYCL_TeamPolicy.hpp>
+#include <SYCL/Kokkos_SYCL_WorkgroupReduction.hpp>
+
+#include <sstream>
+#include <vector>
+
+template <class CombinedFunctorReducerType, class... Properties>
+class Kokkos::Impl::ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>,
+ Kokkos::SYCL> {
+ public:
+ using Policy = TeamPolicy<Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ private:
+ using member_type = typename Policy::member_type;
+ using WorkTag = typename Policy::work_tag;
+ using launch_bounds = typename Policy::launch_bounds;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+ using value_type = typename ReducerType::value_type;
+
+ public:
+ using functor_type = FunctorType;
+ using size_type = Kokkos::SYCL::size_type;
+
+ private:
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+ size_type m_shmem_begin;
+ size_type m_shmem_size;
+ size_t m_scratch_size[2];
+ const size_type m_league_size;
+ int m_team_size;
+ const size_type m_vector_size;
+
+ template <typename CombinedFunctorReducerWrapper>
+ sycl::event sycl_direct_launch(
+ const sycl_device_ptr<char> global_scratch_ptr,
+ const CombinedFunctorReducerWrapper& functor_reducer_wrapper,
+ const sycl::event& memcpy_event) const {
+ // Convenience references
+ const Kokkos::SYCL& space = m_policy.space();
+ Kokkos::Impl::SYCLInternal& instance =
+ *space.impl_internal_space_instance();
+ sycl::queue& q = space.sycl_queue();
+
+ const unsigned int value_count =
+ m_functor_reducer.get_reducer().value_count();
+ std::size_t size = std::size_t(m_league_size) * m_team_size * m_vector_size;
+ value_type* results_ptr = nullptr;
+ auto host_result_ptr =
+ (m_result_ptr && !m_result_ptr_device_accessible)
+ ? static_cast<sycl_host_ptr<value_type>>(
+ instance.scratch_host(sizeof(value_type) * value_count))
+ : nullptr;
+
+ sycl::event last_reduction_event;
+
+ desul::ensure_sycl_lock_arrays_on_device(q);
+
+ // If size<=1 we only call init(), the functor and possibly final once
+ // working with the global scratch memory but don't copy back to
+ // m_result_ptr yet.
+ if (size <= 1) {
+ results_ptr =
+ static_cast<sycl_device_ptr<value_type>>(instance.scratch_space(
+ sizeof(value_type) * std::max(value_count, 1u)));
+ auto device_accessible_result_ptr =
+ m_result_ptr_device_accessible
+ ? static_cast<sycl::global_ptr<value_type>>(m_result_ptr)
+ : static_cast<sycl::global_ptr<value_type>>(host_result_ptr);
+
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+ // FIXME_SYCL accessors seem to need a size greater than zero at least
+ // for host queues
+ sycl::local_accessor<char, 1> team_scratch_memory_L0(
+ sycl::range<1>(
+ std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
+ cgh);
+
+ // Avoid capturing *this since it might not be trivially copyable
+ const auto shmem_begin = m_shmem_begin;
+ const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+ cgh.parallel_for(
+ sycl::nd_range<2>(sycl::range<2>(1, 1), sycl::range<2>(1, 1)),
+ [=](sycl::nd_item<2> item) {
+ const CombinedFunctorReducerType& functor_reducer =
+ functor_reducer_wrapper.get_functor();
+ const FunctorType& functor = functor_reducer.get_functor();
+ const ReducerType& reducer = functor_reducer.get_reducer();
+
+ reference_type update = reducer.init(results_ptr);
+ if (size == 1) {
+ const member_type team_member(
+ KOKKOS_IMPL_SYCL_GET_MULTI_PTR(team_scratch_memory_L0),
+ shmem_begin, scratch_size[0], global_scratch_ptr,
+ scratch_size[1], item, item.get_group_linear_id(),
+ item.get_group_range(1));
+ if constexpr (std::is_void_v<WorkTag>)
+ functor(team_member, update);
+ else
+ functor(WorkTag(), team_member, update);
+ }
+ reducer.final(results_ptr);
+ if (device_accessible_result_ptr)
+ reducer.copy(device_accessible_result_ptr, &results_ptr[0]);
+ });
+ };
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ } else
+#endif
+ {
+ last_reduction_event = q.submit(cgh_lambda);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(
+ std::vector<sycl::event>{last_reduction_event});
+#endif
+ }
+ } else {
+ // Otherwise, (if the total range has more than one element) we perform a
+ // reduction on the values in all workgroups separately, write the
+ // workgroup results back to global memory and recurse until only one
+ // workgroup does the reduction and thus gets the final value.
+ auto cgh_lambda = [&](sycl::handler& cgh) {
+ auto scratch_flags = static_cast<sycl_device_ptr<unsigned int>>(
+ instance.scratch_flags(sizeof(unsigned int)));
+
+ // FIXME_SYCL accessors seem to need a size greater than zero at least
+ // for host queues
+ sycl::local_accessor<char, 1> team_scratch_memory_L0(
+ sycl::range<1>(
+ std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
+ cgh);
+
+ // Avoid capturing *this since it might not be trivially copyable
+ const auto shmem_begin = m_shmem_begin;
+ const auto league_size = m_league_size;
+ const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
+ sycl::local_accessor<unsigned int> num_teams_done(1, cgh);
+
+ auto team_reduction_factory =
+ [&](sycl::local_accessor<value_type, 1> local_mem,
+ sycl_device_ptr<value_type> results_ptr) {
+ auto device_accessible_result_ptr =
+ m_result_ptr_device_accessible
+ ? static_cast<sycl::global_ptr<value_type>>(m_result_ptr)
+ : static_cast<sycl::global_ptr<value_type>>(
+ host_result_ptr);
+ auto lambda = [=](sycl::nd_item<2> item) {
+ auto n_wgroups = item.get_group_range()[1];
+ int wgroup_size =
+ item.get_local_range()[0] * item.get_local_range()[1];
+ auto group_id = item.get_group_linear_id();
+ auto size = n_wgroups * wgroup_size;
+
+ const auto local_id = item.get_local_linear_id();
+ const CombinedFunctorReducerType& functor_reducer =
+ functor_reducer_wrapper.get_functor();
+ const FunctorType& functor = functor_reducer.get_functor();
+ const ReducerType& reducer = functor_reducer.get_reducer();
+
+ if constexpr (!SYCLReduction::use_shuffle_based_algorithm<
+ ReducerType>) {
+ reference_type update =
+ reducer.init(&local_mem[local_id * value_count]);
+ for (int league_rank = group_id; league_rank < league_size;
+ league_rank += n_wgroups) {
+ const member_type team_member(
+ KOKKOS_IMPL_SYCL_GET_MULTI_PTR(team_scratch_memory_L0),
+ shmem_begin, scratch_size[0],
+ global_scratch_ptr +
+ item.get_group(1) * scratch_size[1],
+ scratch_size[1], item, league_rank, league_size);
+ if constexpr (std::is_void_v<WorkTag>)
+ functor(team_member, update);
+ else
+ functor(WorkTag(), team_member, update);
+ }
+ item.barrier(sycl::access::fence_space::local_space);
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, results_ptr,
+ device_accessible_result_ptr, value_count, reducer, false,
+ std::min<std::size_t>(size,
+ item.get_local_range()[0] *
+ item.get_local_range()[1]));
+
+ if (local_id == 0) {
+ sycl::atomic_ref<unsigned, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ scratch_flags_ref(*scratch_flags);
+ num_teams_done[0] = ++scratch_flags_ref;
+ }
+ sycl::group_barrier(item.get_group());
+ if (num_teams_done[0] == n_wgroups) {
+ if (local_id == 0) *scratch_flags = 0;
+ if (local_id >= n_wgroups)
+ reducer.init(&local_mem[local_id * value_count]);
+ else {
+ reducer.copy(&local_mem[local_id * value_count],
+ &results_ptr[local_id * value_count]);
+ for (unsigned int id = local_id + wgroup_size;
+ id < n_wgroups; id += wgroup_size) {
+ reducer.join(&local_mem[local_id * value_count],
+ &results_ptr[id * value_count]);
+ }
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, results_ptr,
+ device_accessible_result_ptr, value_count, reducer,
+ true,
+ std::min(n_wgroups, item.get_local_range()[0] *
+ item.get_local_range()[1]));
+ }
+ } else {
+ value_type local_value;
+ reference_type update = reducer.init(&local_value);
+ for (int league_rank = group_id; league_rank < league_size;
+ league_rank += n_wgroups) {
+ const member_type team_member(
+ KOKKOS_IMPL_SYCL_GET_MULTI_PTR(team_scratch_memory_L0),
+ shmem_begin, scratch_size[0],
+ global_scratch_ptr +
+ item.get_group(1) * scratch_size[1],
+ scratch_size[1], item, league_rank, league_size);
+ if constexpr (std::is_void_v<WorkTag>)
+ functor(team_member, update);
+ else
+ functor(WorkTag(), team_member, update);
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, local_value, results_ptr,
+ device_accessible_result_ptr, reducer, false,
+ std::min<std::size_t>(size,
+ item.get_local_range()[0] *
+ item.get_local_range()[1]));
+
+ if (local_id == 0) {
+ sycl::atomic_ref<unsigned, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ scratch_flags_ref(*scratch_flags);
+ num_teams_done[0] = ++scratch_flags_ref;
+ }
+ item.barrier(sycl::access::fence_space::local_space);
+ if (num_teams_done[0] == n_wgroups) {
+ if (local_id == 0) *scratch_flags = 0;
+ if (local_id >= n_wgroups)
+ reducer.init(&local_value);
+ else {
+ local_value = results_ptr[local_id];
+ for (unsigned int id = local_id + wgroup_size;
+ id < n_wgroups; id += wgroup_size) {
+ reducer.join(&local_value, &results_ptr[id]);
+ }
+ }
+
+ SYCLReduction::workgroup_reduction<>(
+ item, local_mem, local_value, results_ptr,
+ device_accessible_result_ptr, reducer, true,
+ std::min(n_wgroups, item.get_local_range()[0] *
+ item.get_local_range()[1]));
+ }
+ }
+ };
+ return lambda;
+ };
+
+ auto dummy_reduction_lambda = team_reduction_factory({1, cgh}, nullptr);
+
+ static sycl::kernel kernel = [&] {
+ sycl::kernel_id functor_kernel_id =
+ sycl::get_kernel_id<decltype(dummy_reduction_lambda)>();
+ auto kernel_bundle =
+ sycl::get_kernel_bundle<sycl::bundle_state::executable>(
+ q.get_context(), std::vector{functor_kernel_id});
+ return kernel_bundle.get_kernel(functor_kernel_id);
+ }();
+ auto max_sg_size = kernel.get_info<
+ sycl::info::kernel_device_specific::max_sub_group_size>(
+ q.get_device());
+ auto final_vector_size = std::min<int>(m_vector_size, max_sg_size);
+ // FIXME_SYCL For some reason, explicitly enforcing the kernel bundle to
+ // be used gives a runtime error.
+
+ // cgh.use_kernel_bundle(kernel_bundle);
+
+ auto wgroup_size = m_team_size * final_vector_size;
+ std::size_t size = std::size_t(m_league_size) * wgroup_size;
+ sycl::local_accessor<value_type, 1> local_mem(
+ sycl::range<1>(wgroup_size) * std::max(value_count, 1u), cgh);
+
+ const auto init_size =
+ std::max<std::size_t>((size + wgroup_size - 1) / wgroup_size, 1);
+ results_ptr =
+ static_cast<sycl_device_ptr<value_type>>(instance.scratch_space(
+ sizeof(value_type) * std::max(value_count, 1u) * init_size));
+
+ size_t max_work_groups =
+ 2 *
+ q.get_device().get_info<sycl::info::device::max_compute_units>();
+ int values_per_thread = 1;
+ size_t n_wgroups = m_league_size;
+ while (n_wgroups > max_work_groups) {
+ values_per_thread *= 2;
+ n_wgroups =
+ ((size_t(m_league_size) * wgroup_size + values_per_thread - 1) /
+ values_per_thread +
+ wgroup_size - 1) /
+ wgroup_size;
+ }
+
+ auto reduction_lambda = team_reduction_factory(local_mem, results_ptr);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#endif
+
+ cgh.parallel_for(
+ sycl::nd_range<2>(
+ sycl::range<2>(m_team_size, n_wgroups * m_vector_size),
+ sycl::range<2>(m_team_size, m_vector_size)),
+ reduction_lambda);
+ };
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+ if constexpr (Policy::is_graph_kernel::value) {
+ sycl_attach_kernel_to_node(*this, cgh_lambda);
+ } else
+#endif
+ {
+ last_reduction_event = q.submit(cgh_lambda);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(
+ std::vector<sycl::event>{last_reduction_event});
+#endif
+ }
+ }
+
+ // At this point, the reduced value is written to the entry in results_ptr
+ // and all that is left is to copy it back to the given result pointer if
+ // necessary.
+ // Using DeepCopy instead of fence+memcpy turned out to be up to 2x slower.
+ if (host_result_ptr) {
+ if constexpr (Policy::is_graph_kernel::value)
+ Kokkos::abort(
+ "parallel_reduce not implemented for graph kernels if result is "
+ "not device-accessible!");
+
+ space.fence(
+ "Kokkos::Impl::ParallelReduce<SYCL, TeamPolicy>::execute: result not "
+ "device-accessible");
+ std::memcpy(m_result_ptr, host_result_ptr,
+ sizeof(*m_result_ptr) * value_count);
+ }
+
+ return last_reduction_event;
+ }
+
+ public:
+ inline void execute() {
+ Kokkos::Impl::SYCLInternal& instance =
+ *m_policy.space().impl_internal_space_instance();
+
+ // Only let one instance at a time resize the instance's scratch memory
+ // allocations.
+ std::scoped_lock<std::mutex> scratch_buffers_lock(
+ instance.m_mutexScratchSpace);
+ std::scoped_lock<std::mutex> team_scratch_lock(
+ instance.m_team_scratch_mutex);
+
+ // Functor's reduce memory, team scan memory, and team shared memory depend
+ // upon team size.
+ int scratch_pool_id = instance.acquire_team_scratch_space();
+ const sycl_device_ptr<char> global_scratch_ptr =
+ static_cast<sycl_device_ptr<char>>(instance.resize_team_scratch_space(
+ scratch_pool_id,
+ static_cast<ptrdiff_t>(m_scratch_size[1]) * m_league_size));
+
+ using IndirectKernelMem = Kokkos::Impl::SYCLInternal::IndirectKernelMem;
+ IndirectKernelMem& indirectKernelMem = instance.get_indirect_kernel_mem();
+
+ auto functor_reducer_wrapper =
+ Impl::make_sycl_function_wrapper(m_functor_reducer, indirectKernelMem);
+
+ sycl::event event =
+ sycl_direct_launch(global_scratch_ptr, functor_reducer_wrapper,
+ functor_reducer_wrapper.get_copy_event());
+ functor_reducer_wrapper.register_event(event);
+ instance.register_team_scratch_event(scratch_pool_id, event);
+ }
+
+ template <class ViewType>
+ ParallelReduce(CombinedFunctorReducerType const& arg_functor_reducer,
+ Policy const& arg_policy, ViewType const& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result.data()),
+ m_result_ptr_device_accessible(
+ MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace,
+ typename ViewType::memory_space>::accessible),
+ m_league_size(arg_policy.league_size()),
+ m_team_size(arg_policy.team_size()),
+ m_vector_size(arg_policy.impl_vector_length()) {
+ if (m_team_size < 0) {
+ m_team_size = m_policy.team_size_recommended(
+ m_functor_reducer.get_functor(), m_functor_reducer.get_reducer(),
+ ParallelReduceTag{});
+ if (m_team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelReduce<SYCL, TeamPolicy> could not find a "
+ "valid execution configuration.");
+ }
+
+ // Must be a power of two greater than two, get the one not bigger than the
+ // requested one.
+ if ((m_team_size & m_team_size - 1) || m_team_size < 2) {
+ int temp_team_size = 2;
+ while ((temp_team_size << 1) < m_team_size) temp_team_size <<= 1;
+ m_team_size = temp_team_size;
+ }
+
+ m_shmem_begin = (sizeof(double) * (m_team_size + 2));
+ m_shmem_size = (m_policy.scratch_size(0, m_team_size) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ m_functor_reducer.get_functor(), m_team_size));
+ m_scratch_size[0] = m_shmem_size;
+ m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+
+ const Kokkos::Impl::SYCLInternal& instance =
+ *m_policy.space().impl_internal_space_instance();
+ if (static_cast<int>(instance.m_maxShmemPerBlock) <
+ m_shmem_size - m_shmem_begin) {
+ std::stringstream out;
+ out << "Kokkos::Impl::ParallelFor<SYCL> insufficient shared memory! "
+ "Requested "
+ << m_shmem_size - m_shmem_begin << " bytes but maximum is "
+ << instance.m_maxShmemPerBlock << '\n';
+ Kokkos::Impl::throw_runtime_exception(out.str());
+ }
+
+ if (m_team_size > m_policy.team_size_max(m_functor_reducer.get_functor(),
+ m_functor_reducer.get_reducer(),
+ ParallelReduceTag{}))
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelFor<SYCL> requested too large team size.");
+ }
+};
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_PARALLEL_SCAN_RANGE_HPP
+#define KOKKOS_SYCL_PARALLEL_SCAN_RANGE_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <SYCL/Kokkos_SYCL_WorkgroupReduction.hpp>
+#include <memory>
+#include <vector>
+
+namespace Kokkos::Impl {
+
+// Perform a scan over a workgroup.
+// At the end of this function, the subgroup scans are stored in the local array
+// such that the last value (at position n_active_subgroups-1) contains the
+// total sum.
+template <int dim, typename ValueType, typename FunctorType>
+void workgroup_scan(sycl::nd_item<dim> item, const FunctorType& final_reducer,
+ sycl::local_accessor<ValueType> local_mem,
+ ValueType& local_value, int global_range) {
+ // subgroup scans
+ auto sg = item.get_sub_group();
+ const int sg_group_id = sg.get_group_id()[0];
+ const int id_in_sg = sg.get_local_id()[0];
+ const int local_range = std::min<int>(sg.get_local_range()[0], global_range);
+
+#if defined(KOKKOS_ARCH_INTEL_GPU) || defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ auto shuffle_combine = [&](int stride) {
+ if (stride < local_range) {
+ auto tmp = Kokkos::Impl::SYCLReduction::shift_group_right(sg, local_value,
+ stride);
+ if (id_in_sg >= stride) final_reducer.join(&local_value, &tmp);
+ }
+ };
+ shuffle_combine(1);
+ shuffle_combine(2);
+ shuffle_combine(4);
+ shuffle_combine(8);
+ shuffle_combine(16);
+ KOKKOS_ASSERT(local_range <= 32);
+#else
+ for (int stride = 1; stride < local_range; stride <<= 1) {
+ auto tmp =
+ Kokkos::Impl::SYCLReduction::shift_group_right(sg, local_value, stride);
+ if (id_in_sg >= stride) final_reducer.join(&local_value, &tmp);
+ }
+#endif
+
+ const int max_subgroup_size = sg.get_max_local_range()[0];
+ const int n_active_subgroups =
+ (global_range + max_subgroup_size - 1) / max_subgroup_size;
+
+ if (id_in_sg == local_range - 1 && sg_group_id < n_active_subgroups)
+ local_mem[sg_group_id] = local_value;
+ local_value =
+ Kokkos::Impl::SYCLReduction::shift_group_right(sg, local_value, 1);
+ if (id_in_sg == 0) final_reducer.init(&local_value);
+ sycl::group_barrier(item.get_group());
+
+ // scan subgroup results using the first subgroup
+ if (n_active_subgroups > 1) {
+ if (sg_group_id == 0) {
+ const int n_rounds = (n_active_subgroups + local_range - 1) / local_range;
+ for (int round = 0; round < n_rounds; ++round) {
+ const int idx = id_in_sg + round * local_range;
+ const auto upper_bound =
+ std::min(local_range, n_active_subgroups - round * local_range);
+ auto local_sg_value = local_mem[idx < n_active_subgroups ? idx : 0];
+#if defined(KOKKOS_ARCH_INTEL_GPU) || defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ auto shuffle_combine_sg = [&](int stride) {
+ if (stride < upper_bound) {
+ auto tmp = Kokkos::Impl::SYCLReduction::shift_group_right(
+ sg, local_sg_value, stride);
+ if (id_in_sg >= stride) {
+ if (idx < n_active_subgroups)
+ final_reducer.join(&local_sg_value, &tmp);
+ else
+ local_sg_value = tmp;
+ }
+ }
+ };
+ shuffle_combine_sg(1);
+ shuffle_combine_sg(2);
+ shuffle_combine_sg(4);
+ shuffle_combine_sg(8);
+ shuffle_combine_sg(16);
+ KOKKOS_ASSERT(upper_bound <= 32);
+#else
+ for (int stride = 1; stride < upper_bound; stride <<= 1) {
+ auto tmp = Kokkos::Impl::SYCLReduction::shift_group_right(
+ sg, local_sg_value, stride);
+ if (id_in_sg >= stride) {
+ if (idx < n_active_subgroups)
+ final_reducer.join(&local_sg_value, &tmp);
+ else
+ local_sg_value = tmp;
+ }
+ }
+#endif
+ if (idx < n_active_subgroups) {
+ local_mem[idx] = local_sg_value;
+ if (round > 0)
+ final_reducer.join(&local_mem[idx],
+ &local_mem[round * local_range - 1]);
+ }
+ if (round + 1 < n_rounds) sycl::group_barrier(sg);
+ }
+ }
+ sycl::group_barrier(item.get_group());
+ }
+
+ // add results to all subgroups
+ if (sg_group_id > 0)
+ final_reducer.join(&local_value, &local_mem[sg_group_id - 1]);
+}
+
+template <class FunctorType, class ValueType, class... Traits>
+class ParallelScanSYCLBase {
+ public:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+
+ protected:
+ using Member = typename Policy::member_type;
+ using WorkTag = typename Policy::work_tag;
+ using LaunchBounds = typename Policy::launch_bounds;
+
+ public:
+ using Analysis = FunctorAnalysis<FunctorPatternInterface::SCAN, Policy,
+ FunctorType, ValueType>;
+ using pointer_type = typename Analysis::pointer_type;
+ using value_type = typename Analysis::value_type;
+ using reference_type = typename Analysis::reference_type;
+ using functor_type = FunctorType;
+ using size_type = Kokkos::SYCL::size_type;
+ using index_type = typename Policy::index_type;
+
+ protected:
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>
+ m_functor_reducer;
+ const Policy m_policy;
+ sycl_host_ptr<value_type> m_scratch_host = nullptr;
+ pointer_type m_result_ptr;
+ const bool m_result_ptr_device_accessible;
+
+ private:
+ template <typename FunctorWrapper>
+ sycl::event sycl_direct_launch(const FunctorWrapper& functor_wrapper,
+ sycl::event memcpy_event) {
+ // Convenience references
+ const Kokkos::SYCL& space = m_policy.space();
+ Kokkos::Impl::SYCLInternal& instance =
+ *space.impl_internal_space_instance();
+ sycl::queue& q = space.sycl_queue();
+
+ const auto size = m_policy.end() - m_policy.begin();
+
+ auto scratch_flags = static_cast<sycl_device_ptr<unsigned int>>(
+ instance.scratch_flags(sizeof(unsigned int)));
+
+ const auto begin = m_policy.begin();
+
+ // Initialize global memory
+ auto scan_lambda_factory = [&](sycl::local_accessor<value_type> local_mem,
+ sycl::local_accessor<unsigned int>
+ num_teams_done,
+ sycl_device_ptr<value_type> global_mem_,
+ sycl_device_ptr<value_type> group_results_) {
+ auto lambda = [=](sycl::nd_item<1> item) {
+ auto global_mem = global_mem_;
+ auto group_results = group_results_;
+
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>&
+ functor_reducer = functor_wrapper.get_functor();
+ const FunctorType& functor = functor_reducer.get_functor();
+ const typename Analysis::Reducer& reducer =
+ functor_reducer.get_reducer();
+
+ const auto n_wgroups = item.get_group_range()[0];
+ const int wgroup_size = item.get_local_range()[0];
+
+ const int local_id = item.get_local_linear_id();
+ const index_type global_id = item.get_global_linear_id();
+
+ // Initialize local memory
+ value_type local_value;
+ reducer.init(&local_value);
+ if (global_id < size) {
+ if constexpr (std::is_void<WorkTag>::value)
+ functor(global_id + begin, local_value, false);
+ else
+ functor(WorkTag(), global_id + begin, local_value, false);
+ }
+
+ workgroup_scan<>(item, reducer, local_mem, local_value, wgroup_size);
+
+ // Write results to global memory
+ if (global_id < size) global_mem[global_id] = local_value;
+
+ if (local_id == wgroup_size - 1) {
+ group_results[item.get_group_linear_id()] =
+ local_mem[item.get_sub_group().get_group_range()[0] - 1];
+
+ sycl::atomic_ref<unsigned, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ scratch_flags_ref(*scratch_flags);
+ num_teams_done[0] = ++scratch_flags_ref;
+ }
+ item.barrier(sycl::access::fence_space::global_space);
+ if (num_teams_done[0] == n_wgroups) {
+ if (local_id == 0) *scratch_flags = 0;
+ value_type total;
+ reducer.init(&total);
+
+ for (unsigned int offset = 0; offset < n_wgroups;
+ offset += wgroup_size) {
+ index_type id = local_id + offset;
+ if (id < static_cast<index_type>(n_wgroups))
+ local_value = group_results[id];
+ else
+ reducer.init(&local_value);
+ workgroup_scan<>(
+ item, reducer, local_mem, local_value,
+ std::min<index_type>(n_wgroups - offset, wgroup_size));
+ if (id < static_cast<index_type>(n_wgroups)) {
+ reducer.join(&local_value, &total);
+ group_results[id] = local_value;
+ }
+ reducer.join(
+ &total,
+ &local_mem[item.get_sub_group().get_group_range()[0] - 1]);
+ if (offset + wgroup_size < n_wgroups)
+ item.barrier(sycl::access::fence_space::global_space);
+ }
+ }
+ };
+ return lambda;
+ };
+
+ size_t wgroup_size;
+ size_t n_wgroups;
+ sycl_device_ptr<value_type> global_mem;
+ sycl_device_ptr<value_type> group_results;
+
+ desul::ensure_sycl_lock_arrays_on_device(q);
+
+ auto perform_work_group_scans = q.submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<unsigned int> num_teams_done(1, cgh);
+
+ auto dummy_scan_lambda =
+ scan_lambda_factory({1, cgh}, num_teams_done, nullptr, nullptr);
+
+ static sycl::kernel kernel = [&] {
+ sycl::kernel_id functor_kernel_id =
+ sycl::get_kernel_id<decltype(dummy_scan_lambda)>();
+ auto kernel_bundle =
+ sycl::get_kernel_bundle<sycl::bundle_state::executable>(
+ q.get_context(), std::vector{functor_kernel_id});
+ return kernel_bundle.get_kernel(functor_kernel_id);
+ }();
+ auto multiple = kernel.get_info<sycl::info::kernel_device_specific::
+ preferred_work_group_size_multiple>(
+ q.get_device());
+ auto max =
+ kernel.get_info<sycl::info::kernel_device_specific::work_group_size>(
+ q.get_device());
+
+ wgroup_size = static_cast<size_t>(max / multiple) * multiple;
+ n_wgroups = (size + wgroup_size - 1) / wgroup_size;
+
+ // Compute the total amount of memory we will need.
+ // We need to allocate memory for the whole range (rounded towards the
+ // next multiple of the workgroup size) and for one element per workgroup
+ // that will contain the sum of the previous workgroups totals.
+ // FIXME_SYCL consider only storing one value per block and recreate
+ // initial results in the end before doing the final pass
+ global_mem =
+ static_cast<sycl_device_ptr<value_type>>(instance.scratch_space(
+ n_wgroups * (wgroup_size + 1) * sizeof(value_type)));
+ m_scratch_host = static_cast<sycl_host_ptr<value_type>>(
+ instance.scratch_host(sizeof(value_type)));
+
+ group_results = global_mem + n_wgroups * wgroup_size;
+
+ // Store subgroup totals in local space
+ const auto min_subgroup_size =
+ q.get_device()
+ .template get_info<sycl::info::device::sub_group_sizes>()
+ .front();
+ sycl::local_accessor<value_type> local_mem(
+ sycl::range<1>((wgroup_size + min_subgroup_size - 1) /
+ min_subgroup_size),
+ cgh);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(memcpy_event);
+#else
+ (void)memcpy_event;
+#endif
+
+ auto scan_lambda = scan_lambda_factory(local_mem, num_teams_done,
+ global_mem, group_results);
+ cgh.parallel_for(sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
+ scan_lambda);
+ });
+
+ // Write results to global memory
+ auto update_global_results = q.submit([&](sycl::handler& cgh) {
+ // The compiler failed with CL_INVALID_ARG_VALUE if using m_result_ptr
+ // directly.
+ pointer_type result_ptr = m_result_ptr_device_accessible
+ ? m_result_ptr
+ : static_cast<pointer_type>(m_scratch_host);
+
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ cgh.depends_on(perform_work_group_scans);
+#endif
+
+ cgh.parallel_for(
+ sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
+ [=](sycl::nd_item<1> item) {
+ const index_type global_id = item.get_global_linear_id();
+ const CombinedFunctorReducer<
+ FunctorType, typename Analysis::Reducer>& functor_reducer =
+ functor_wrapper.get_functor();
+ const FunctorType& functor = functor_reducer.get_functor();
+ const typename Analysis::Reducer& reducer =
+ functor_reducer.get_reducer();
+
+ if (global_id < size) {
+ value_type update = global_mem[global_id];
+
+ reducer.join(&update, &group_results[item.get_group_linear_id()]);
+
+ if constexpr (std::is_void<WorkTag>::value)
+ functor(global_id + begin, update, true);
+ else
+ functor(WorkTag(), global_id + begin, update, true);
+
+ if (global_id == size - 1) *result_ptr = update;
+ }
+ });
+ });
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(
+ std::vector<sycl::event>{update_global_results});
+#endif
+ return update_global_results;
+ }
+
+ public:
+ template <typename PostFunctor>
+ void impl_execute(const PostFunctor& post_functor) {
+ if (m_policy.begin() == m_policy.end()) return;
+
+ auto& instance = *m_policy.space().impl_internal_space_instance();
+
+ // Only let one instance at a time resize the instance's scratch memory
+ // allocations.
+ std::scoped_lock<std::mutex> scratch_buffers_lock(
+ instance.m_mutexScratchSpace);
+
+ Kokkos::Impl::SYCLInternal::IndirectKernelMem& indirectKernelMem =
+ instance.get_indirect_kernel_mem();
+
+ auto functor_wrapper =
+ Impl::make_sycl_function_wrapper(m_functor_reducer, indirectKernelMem);
+
+ sycl::event event =
+ sycl_direct_launch(functor_wrapper, functor_wrapper.get_copy_event());
+ functor_wrapper.register_event(event);
+ post_functor();
+ }
+
+ ParallelScanSYCLBase(const FunctorType& arg_functor, const Policy& arg_policy,
+ pointer_type arg_result_ptr,
+ bool arg_result_ptr_device_accessible)
+ : m_functor_reducer(arg_functor, typename Analysis::Reducer{arg_functor}),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_ptr),
+ m_result_ptr_device_accessible(arg_result_ptr_device_accessible) {}
+};
+
+} // namespace Kokkos::Impl
+
+template <class FunctorType, class... Traits>
+class Kokkos::Impl::ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::SYCL>
+ : private ParallelScanSYCLBase<FunctorType, void, Traits...> {
+ public:
+ using Base = ParallelScanSYCLBase<FunctorType, void, Traits...>;
+
+ inline void execute() {
+ Base::impl_execute([]() {});
+ }
+
+ ParallelScan(const FunctorType& arg_functor,
+ const typename Base::Policy& arg_policy)
+ : Base(arg_functor, arg_policy, nullptr, false) {}
+};
+
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReturnType, class... Traits>
+class Kokkos::Impl::ParallelScanWithTotal<
+ FunctorType, Kokkos::RangePolicy<Traits...>, ReturnType, Kokkos::SYCL>
+ : public ParallelScanSYCLBase<FunctorType, ReturnType, Traits...> {
+ public:
+ using Base = ParallelScanSYCLBase<FunctorType, ReturnType, Traits...>;
+
+ const Kokkos::SYCL& m_exec;
+
+ inline void execute() {
+ Base::impl_execute([&]() {
+ const long long nwork = Base::m_policy.end() - Base::m_policy.begin();
+ if (nwork > 0 && !Base::m_result_ptr_device_accessible) {
+ // Using DeepCopy instead of fence+memcpy turned out to be up to 2x
+ // slower.
+ m_exec.fence(
+ "Kokkos::Impl::ParallelReduce<SYCL, MDRangePolicy>::execute: "
+ "result not device-accessible");
+ const int size = Base::m_functor_reducer.get_reducer().value_size();
+ std::memcpy(Base::m_result_ptr, Base::m_scratch_host, size);
+ }
+ });
+ }
+
+ template <class ViewType>
+ ParallelScanWithTotal(const FunctorType& arg_functor,
+ const typename Base::Policy& arg_policy,
+ const ViewType& arg_result_view)
+ : Base(arg_functor, arg_policy, arg_result_view.data(),
+ MemorySpaceAccess<SYCLDeviceUSMSpace,
+ typename ViewType::memory_space>::accessible),
+ m_exec(arg_policy.space()) {}
+};
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_HostSpace.hpp>
+#include <SYCL/Kokkos_SYCL.hpp>
+#include <SYCL/Kokkos_SYCL_Space.hpp>
+#include <SYCL/Kokkos_SYCL_DeepCopy.hpp>
+#include <SYCL/Kokkos_SYCL_Instance.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+namespace Kokkos {
+namespace Impl {
+
+void DeepCopySYCL(void* dst, const void* src, size_t n) {
+ Impl::SYCLInternal::singleton().m_queue->memcpy(dst, src, n);
+}
+
+void DeepCopyAsyncSYCL(const Kokkos::SYCL& instance, void* dst, const void* src,
+ size_t n) {
+ sycl::queue& q = *instance.impl_internal_space_instance()->m_queue;
+ auto event = q.memcpy(dst, src, n);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ q.ext_oneapi_submit_barrier(std::vector<sycl::event>{event});
+#endif
+}
+
+void DeepCopyAsyncSYCL(void* dst, const void* src, size_t n) {
+ Impl::SYCLInternal::singleton().m_queue->memcpy(dst, src, n);
+ SYCL().fence("Kokkos::Impl::DeepCopyAsyncSYCL: fence after memcpy");
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+namespace {
+
+std::string_view get_memory_space_name(sycl::usm::alloc allocation_kind) {
+ switch (allocation_kind) {
+ case sycl::usm::alloc::host: return Kokkos::SYCLHostUSMSpace::name();
+ case sycl::usm::alloc::device: return Kokkos::SYCLDeviceUSMSpace::name();
+ case sycl::usm::alloc::shared: return Kokkos::SYCLSharedUSMSpace::name();
+ default:
+ Kokkos::abort("bug: unknown sycl allocation type");
+ return "unreachable";
+ }
+}
+
+} // namespace
+
+namespace Kokkos {
+
+SYCLDeviceUSMSpace::SYCLDeviceUSMSpace()
+ : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
+SYCLDeviceUSMSpace::SYCLDeviceUSMSpace(sycl::queue queue)
+ : m_queue(std::move(queue)) {}
+
+SYCLSharedUSMSpace::SYCLSharedUSMSpace()
+ : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
+SYCLSharedUSMSpace::SYCLSharedUSMSpace(sycl::queue queue)
+ : m_queue(std::move(queue)) {}
+
+SYCLHostUSMSpace::SYCLHostUSMSpace()
+ : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
+SYCLHostUSMSpace::SYCLHostUSMSpace(sycl::queue queue)
+ : m_queue(std::move(queue)) {}
+
+void* allocate_sycl(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle,
+ const sycl::usm::alloc allocation_kind,
+ const sycl::queue& queue) {
+ void* const hostPtr = sycl::malloc(arg_alloc_size, queue, allocation_kind);
+
+ if (hostPtr == nullptr) {
+ Kokkos::Impl::throw_bad_alloc(get_memory_space_name(allocation_kind),
+ arg_alloc_size, arg_label);
+ }
+
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, hostPtr,
+ reported_size);
+ }
+
+ return hostPtr;
+}
+
+void* SYCLDeviceUSMSpace::allocate(const Kokkos::SYCL& exec_space,
+ const size_t arg_alloc_size) const {
+ return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+
+void* SYCLDeviceUSMSpace::allocate(const Kokkos::SYCL& exec_space,
+ const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return allocate_sycl(arg_label, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()),
+ sycl::usm::alloc::device,
+ *exec_space.impl_internal_space_instance()->m_queue);
+}
+
+void* SYCLDeviceUSMSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void* SYCLDeviceUSMSpace::allocate(const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return allocate_sycl(arg_label, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()),
+ sycl::usm::alloc::device, m_queue);
+}
+
+void* SYCLSharedUSMSpace::allocate(const SYCL& exec_space,
+ const size_t arg_alloc_size) const {
+ return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+void* SYCLSharedUSMSpace::allocate(const SYCL& exec_space,
+ const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return allocate_sycl(arg_label, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()),
+ sycl::usm::alloc::shared,
+ *exec_space.impl_internal_space_instance()->m_queue);
+}
+
+void* SYCLSharedUSMSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+void* SYCLSharedUSMSpace::allocate(const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return allocate_sycl(arg_label, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()),
+ sycl::usm::alloc::shared, m_queue);
+}
+
+void* SYCLHostUSMSpace::allocate(const SYCL& exec_space,
+ const size_t arg_alloc_size) const {
+ return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+void* SYCLHostUSMSpace::allocate(const SYCL& exec_space, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return allocate_sycl(arg_label, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()),
+ sycl::usm::alloc::host,
+ *exec_space.impl_internal_space_instance()->m_queue);
+}
+
+void* SYCLHostUSMSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+void* SYCLHostUSMSpace::allocate(const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ return allocate_sycl(arg_label, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()),
+ sycl::usm::alloc::host, m_queue);
+}
+
+void sycl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle,
+ const sycl::queue& queue) {
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+
+ SYCL::impl_static_fence(
+ "Kokkos::Impl::sycl_deallocate: fence before deallocate");
+ sycl::free(arg_alloc_ptr, queue);
+}
+
+void SYCLDeviceUSMSpace::deallocate(void* const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void SYCLDeviceUSMSpace::deallocate(const char* arg_label,
+ void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()), m_queue);
+}
+
+void SYCLSharedUSMSpace::deallocate(void* const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void SYCLSharedUSMSpace::deallocate(const char* arg_label,
+ void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()), m_queue);
+}
+
+void SYCLHostUSMSpace::deallocate(void* const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void SYCLHostUSMSpace::deallocate(const char* arg_label,
+ void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
+ Kokkos::Tools::make_space_handle(name()), m_queue);
+}
+
+} // namespace Kokkos
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::SYCLDeviceUSMSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::SYCLSharedUSMSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(
+ Kokkos::SYCLHostUSMSpace);
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_SYCLSPACE_HPP
+#define KOKKOS_SYCLSPACE_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+
+#ifdef KOKKOS_ENABLE_SYCL
+#include <Kokkos_Concepts.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <SYCL/Kokkos_SYCL_Instance.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+namespace Kokkos {
+
+namespace Impl {
+template <typename T>
+struct is_sycl_type_space : public std::false_type {};
+} // namespace Impl
+
+class SYCLDeviceUSMSpace {
+ public:
+ using execution_space = SYCL;
+ using memory_space = SYCLDeviceUSMSpace;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+ using size_type = Impl::SYCLInternal::size_type;
+
+ SYCLDeviceUSMSpace();
+ explicit SYCLDeviceUSMSpace(sycl::queue queue);
+
+ void* allocate(const SYCL& exec_space,
+ const std::size_t arg_alloc_size) const;
+ void* allocate(const SYCL& exec_space, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+ void* allocate(const std::size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ void deallocate(void* const arg_alloc_ptr,
+ const std::size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ static constexpr const char* name() { return "SYCLDeviceUSM"; };
+
+ private:
+ sycl::queue m_queue;
+};
+
+class SYCLSharedUSMSpace {
+ public:
+ using execution_space = SYCL;
+ using memory_space = SYCLSharedUSMSpace;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+ using size_type = Impl::SYCLInternal::size_type;
+
+ SYCLSharedUSMSpace();
+ explicit SYCLSharedUSMSpace(sycl::queue queue);
+
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+ void* allocate(const SYCL& exec_space,
+ const std::size_t arg_alloc_size) const;
+ void* allocate(const SYCL& exec_space, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+ void* allocate(const std::size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ void deallocate(void* const arg_alloc_ptr,
+ const std::size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ static constexpr const char* name() { return "SYCLSharedUSM"; };
+
+ private:
+ sycl::queue m_queue;
+};
+
+class SYCLHostUSMSpace {
+ public:
+ using execution_space = HostSpace::execution_space;
+ using memory_space = SYCLHostUSMSpace;
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+ using size_type = Impl::SYCLInternal::size_type;
+
+ SYCLHostUSMSpace();
+ explicit SYCLHostUSMSpace(sycl::queue queue);
+
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const size_t arg_alloc_size) const {
+ return allocate(arg_alloc_size);
+ }
+ template <typename ExecutionSpace>
+ void* allocate(const ExecutionSpace&, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const {
+ return allocate(arg_label, arg_alloc_size, arg_logical_size);
+ }
+ void* allocate(const SYCL& exec_space,
+ const std::size_t arg_alloc_size) const;
+ void* allocate(const SYCL& exec_space, const char* arg_label,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+ void* allocate(const std::size_t arg_alloc_size) const;
+ void* allocate(const char* arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ void deallocate(void* const arg_alloc_ptr,
+ const std::size_t arg_alloc_size) const;
+ void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size = 0) const;
+
+ static constexpr const char* name() { return "SYCLHostUSM"; };
+
+ private:
+ sycl::queue m_queue;
+};
+
+namespace Impl {
+
+template <>
+struct is_sycl_type_space<Kokkos::SYCLDeviceUSMSpace> : public std::true_type {
+};
+
+template <>
+struct is_sycl_type_space<Kokkos::SYCLSharedUSMSpace> : public std::true_type {
+};
+
+template <>
+struct is_sycl_type_space<Kokkos::SYCLHostUSMSpace> : public std::true_type {};
+
+static_assert(
+ Kokkos::Impl::MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace,
+ Kokkos::SYCLDeviceUSMSpace>::assignable);
+
+static_assert(
+ Kokkos::Impl::MemorySpaceAccess<Kokkos::SYCLSharedUSMSpace,
+ Kokkos::SYCLSharedUSMSpace>::assignable);
+
+static_assert(
+ Kokkos::Impl::MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace,
+ Kokkos::SYCLDeviceUSMSpace>::assignable);
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::SYCLDeviceUSMSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = false };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::SYCLSharedUSMSpace> {
+ // HostSpace::execution_space != SYCLSharedUSMSpace::execution_space
+ enum : bool { assignable = false };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::SYCLHostUSMSpace> {
+ // HostSpace::execution_space ==
+ // SYCLHostUSMSpace::execution_space
+ enum : bool { assignable = true };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace, Kokkos::HostSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = false };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace,
+ Kokkos::SYCLSharedUSMSpace> {
+ // SYCLDeviceUSMSpace::execution_space == SYCLSharedUSMSpace::execution_space
+ enum : bool { assignable = true };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace, Kokkos::SYCLHostUSMSpace> {
+ // SYCLDeviceUSMSpace::execution_space !=
+ // SYCLHostUSMSpace::execution_space
+ enum : bool { assignable = false };
+ enum : bool { accessible = true }; // SYCLDeviceUSMSpace::execution_space
+ enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// SYCLSharedUSMSpace::execution_space == SYCL
+// SYCLSharedUSMSpace accessible to both SYCL and Host
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLSharedUSMSpace, Kokkos::HostSpace> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = false }; // SYCL cannot access HostSpace
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLSharedUSMSpace,
+ Kokkos::SYCLDeviceUSMSpace> {
+ // SYCLSharedUSMSpace::execution_space == SYCLDeviceUSMSpace::execution_space
+ // Can access SYCLSharedUSMSpace from Host but cannot access
+ // SYCLDeviceUSMSpace from Host
+ enum : bool { assignable = false };
+
+ // SYCLSharedUSMSpace::execution_space can access SYCLDeviceUSMSpace
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLSharedUSMSpace, Kokkos::SYCLHostUSMSpace> {
+ // SYCLSharedUSMSpace::execution_space !=
+ // SYCLHostUSMSpace::execution_space
+ enum : bool { assignable = false };
+ enum : bool { accessible = true }; // SYCLSharedUSMSpace::execution_space
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLHostUSMSpace, Kokkos::HostSpace> {
+ enum : bool { assignable = false }; // Cannot access from SYCL
+ enum : bool { accessible = true }; // SYCLHostUSMSpace::execution_space
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLHostUSMSpace, Kokkos::SYCLDeviceUSMSpace> {
+ enum : bool { assignable = false }; // Cannot access from Host
+ enum : bool { accessible = false };
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLHostUSMSpace, Kokkos::SYCLSharedUSMSpace> {
+ enum : bool { assignable = false }; // different execution_space
+ enum : bool { accessible = true }; // same accessibility
+ enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::SYCLDeviceUSMSpace,
+ Kokkos::ScratchMemorySpace<Kokkos::SYCL>> {
+ enum : bool { assignable = false };
+ enum : bool { accessible = true };
+ enum : bool { deepcopy = false };
+};
+
+} // namespace Impl
+
+} // namespace Kokkos
+
+KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_SPECIALIZATION(
+ Kokkos::SYCLDeviceUSMSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::SYCLSharedUSMSpace);
+KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(Kokkos::SYCLHostUSMSpace);
+
+#endif
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_SYCL_TEAM_HPP
#define KOKKOS_SYCL_TEAM_HPP
#ifdef KOKKOS_ENABLE_SYCL
#include <utility>
+#include <SYCL/Kokkos_SYCL_WorkgroupReduction.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
*/
class SYCLTeamMember {
public:
- using execution_space = Kokkos::Experimental::SYCL;
+ using execution_space = Kokkos::SYCL;
using scratch_memory_space = execution_space::scratch_memory_space;
+ using team_handle = SYCLTeamMember;
private:
mutable sycl::local_ptr<void> m_team_reduce;
scratch_memory_space m_team_shared;
int m_team_reduce_size;
sycl::nd_item<2> m_item;
+ int m_league_rank;
+ int m_league_size;
public:
KOKKOS_INLINE_FUNCTION
return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
}
- KOKKOS_INLINE_FUNCTION int league_rank() const {
- return m_item.get_group_linear_id();
- }
- KOKKOS_INLINE_FUNCTION int league_size() const {
- return m_item.get_group_range(1);
- }
+ KOKKOS_INLINE_FUNCTION int league_rank() const { return m_league_rank; }
+ KOKKOS_INLINE_FUNCTION int league_size() const { return m_league_size; }
KOKKOS_INLINE_FUNCTION int team_rank() const {
return m_item.get_local_id(0);
}
team_reduce(ReducerType const& reducer,
typename ReducerType::value_type& value) const noexcept {
using value_type = typename ReducerType::value_type;
+ using wrapped_reducer_type =
+ typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<SYCL>, ReducerType,
+ value_type>::Reducer;
+ impl_team_reduce(wrapped_reducer_type(reducer), value);
+ reducer.reference() = value;
+ }
+
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<WrappedReducerType>::value>
+ impl_team_reduce(
+ WrappedReducerType const& wrapped_reducer,
+ typename WrappedReducerType::value_type& value) const noexcept {
+ using value_type = typename WrappedReducerType::value_type;
auto sg = m_item.get_sub_group();
const auto sub_group_range = sg.get_local_range()[0];
const unsigned int team_rank_ = team_rank();
// First combine the values in the same subgroup
+#if defined(KOKKOS_ARCH_INTEL_GPU) || defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ auto shuffle_combine = [&](int shift) {
+ if (vector_range * shift < sub_group_range) {
+ const value_type tmp = Kokkos::Impl::SYCLReduction::shift_group_left(
+ sg, value, vector_range * shift);
+ if (team_rank_ + shift < team_size_) wrapped_reducer.join(&value, &tmp);
+ }
+ };
+ shuffle_combine(1);
+ shuffle_combine(2);
+ shuffle_combine(4);
+ shuffle_combine(8);
+ shuffle_combine(16);
+ KOKKOS_ASSERT(sub_group_range <= 32);
+#else
for (unsigned int shift = 1; vector_range * shift < sub_group_range;
shift <<= 1) {
- const value_type tmp = sg.shuffle_down(value, vector_range * shift);
- if (team_rank_ + shift < team_size_) reducer.join(value, tmp);
+ auto tmp = Kokkos::Impl::SYCLReduction::shift_group_left(
+ sg, value, vector_range * shift);
+ if (team_rank_ + shift < team_size_) wrapped_reducer.join(&value, &tmp);
}
- value = sg.shuffle(value, 0);
+#endif
+ value = Kokkos::Impl::SYCLReduction::select_from_group(sg, value, 0);
- // We need to chunk up the whole reduction because we might not have
- // allocated enough memory.
- const auto n_subgroups = sg.get_group_range()[0];
- const unsigned int maximum_work_range =
- std::min<int>(m_team_reduce_size / sizeof(value_type), n_subgroups);
+ const int n_subgroups = sg.get_group_range()[0];
+ if (n_subgroups == 1) {
+ return;
+ }
+
+ // It was found experimentally that 16 is a good value for Intel PVC.
+ // Since there is a maximum number of 1024 threads with subgroup size 16,
+ // we have a maximum of 64 subgroups per workgroup which means 64/16=4
+ // rounds for loading values into the reduction_array, and 16 redundant
+ // reduction steps executed by every thread.
+ constexpr int step_width = 16;
+ auto tmp_alloc = sycl::ext::oneapi::group_local_memory_for_overwrite<
+ value_type[step_width]>(m_item.get_group());
+ auto& reduction_array = *tmp_alloc;
const auto id_in_sg = sg.get_local_id()[0];
- auto reduction_array =
- static_cast<sycl::local_ptr<value_type>>(m_team_reduce);
- // Load values into the first maximum_work_range values of the reduction
+ // Load values into the first step_width values of the reduction
// array in chunks. This means that only sub groups with an id in the
// corresponding chunk load values.
- const auto group_id = sg.get_group_id()[0];
- if (id_in_sg == 0 && group_id < maximum_work_range)
+ const int group_id = sg.get_group_id()[0];
+ if (id_in_sg == 0 && group_id < step_width)
reduction_array[group_id] = value;
sycl::group_barrier(m_item.get_group());
- for (unsigned int start = maximum_work_range; start < n_subgroups;
- start += maximum_work_range) {
+ for (int start = step_width; start < n_subgroups; start += step_width) {
if (id_in_sg == 0 && group_id >= start &&
- group_id <
- std::min<unsigned int>(start + maximum_work_range, n_subgroups))
- reducer.join(reduction_array[group_id - start], value);
+ group_id < std::min(start + step_width, n_subgroups))
+ wrapped_reducer.join(&reduction_array[group_id - start], &value);
sycl::group_barrier(m_item.get_group());
}
- // Let the first subgroup do the final reduction
- if (group_id == 0) {
- const auto local_range = sg.get_local_range()[0];
- auto result =
- reduction_array[id_in_sg < maximum_work_range ? id_in_sg : 0];
- // In case the maximum_work_range is larger than the range of the first
- // subgroup, we first combine the items with a higher index.
- for (unsigned int offset = local_range; offset < maximum_work_range;
- offset += local_range)
- if (id_in_sg + offset < maximum_work_range)
- reducer.join(result, reduction_array[id_in_sg + offset]);
- sycl::group_barrier(sg);
-
- // Now do the actual subgroup reduction.
- const auto min_range =
- std::min<unsigned int>(maximum_work_range, local_range);
- for (unsigned int stride = 1; stride < min_range; stride <<= 1) {
- const auto tmp = sg.shuffle_down(result, stride);
- if (id_in_sg + stride < min_range) reducer.join(result, tmp);
- }
- if (id_in_sg == 0) reduction_array[0] = result;
- }
- sycl::group_barrier(m_item.get_group());
+ // Do the final reduction for all threads redundantly
+ value = reduction_array[0];
+ for (int i = 1; i < std::min(step_width, n_subgroups); ++i)
+ wrapped_reducer.join(&value, &reduction_array[i]);
- reducer.reference() = reduction_array[0];
- // Make sure that the reduction array hasn't been modified in the meantime.
- m_item.barrier(sycl::access::fence_space::local_space);
+ // Make sure that every thread is done using the reduction array.
+ sycl::group_barrier(m_item.get_group());
}
//--------------------------------------------------------------------------
// First combine the values in the same subgroup
for (unsigned int stride = 1; vector_range * stride < sub_group_range;
stride <<= 1) {
- auto tmp = sg.shuffle_up(value, vector_range * stride);
+ auto tmp = Kokkos::Impl::SYCLReduction::shift_group_right(
+ sg, value, vector_range * stride);
if (id_in_sg >= vector_range * stride) value += tmp;
}
sub_group_range, n_active_subgroups - round * sub_group_range);
auto local_value = base_data[idx];
for (unsigned int stride = 1; stride < upper_bound; stride <<= 1) {
- auto tmp = sg.shuffle_up(local_value, stride);
+ auto tmp = Kokkos::Impl::SYCLReduction::shift_group_right(
+ sg, local_value, stride);
if (id_in_sg >= stride) {
if (idx < n_active_subgroups)
local_value += tmp;
}
auto total = base_data[n_active_subgroups - 1];
- const auto update = sg.shuffle_up(value, vector_range);
- Type intermediate = (group_id > 0 ? base_data[group_id - 1] : 0) +
- (id_in_sg >= vector_range ? update : 0);
+ const auto update =
+ Kokkos::Impl::SYCLReduction::shift_group_right(sg, value, vector_range);
+ Type intermediate = (group_id > 0 ? base_data[group_id - 1] : Type{0}) +
+ (id_in_sg >= vector_range ? update : Type{0});
if (global_accum) {
if (id_in_sg == sub_group_range - 1 &&
KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
vector_reduce(ReducerType const& reducer,
typename ReducerType::value_type& value) const {
+ using value_type = typename ReducerType::value_type;
+ using wrapped_reducer_type =
+ typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<SYCL>, ReducerType,
+ value_type>::Reducer;
+ impl_vector_reduce(wrapped_reducer_type(reducer), value);
+ reducer.reference() = value;
+ }
+
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<WrappedReducerType>::value>
+ impl_vector_reduce(WrappedReducerType const& wrapped_reducer,
+ typename WrappedReducerType::value_type& value) const {
const auto tidx1 = m_item.get_local_id(1);
const auto grange1 = m_item.get_local_range(1);
if (grange1 == 1) return;
// Intra vector lane shuffle reduction:
- typename ReducerType::value_type tmp(value);
- typename ReducerType::value_type tmp2 = tmp;
+ typename WrappedReducerType::value_type tmp(value);
+ typename WrappedReducerType::value_type tmp2 = tmp;
for (int i = grange1; (i >>= 1);) {
- tmp2 = sg.shuffle_down(tmp, i);
+ tmp2 = Kokkos::Impl::SYCLReduction::shift_group_left(sg, tmp, i);
if (static_cast<int>(tidx1) < i) {
- reducer.join(tmp, tmp2);
+ wrapped_reducer.join(&tmp, &tmp2);
}
}
// because floating point summation is not associative
// and thus different threads could have different results.
- tmp2 = sg.shuffle(tmp, (sg.get_local_id() / grange1) * grange1);
+ tmp2 = Kokkos::Impl::SYCLReduction::select_from_group(
+ sg, tmp, (sg.get_local_id() / grange1) * grange1);
value = tmp2;
- reducer.reference() = tmp2;
}
//----------------------------------------
// Private for the driver
KOKKOS_INLINE_FUNCTION
- SYCLTeamMember(sycl::local_ptr<void> shared, const int shared_begin,
- const int shared_size,
- sycl::device_ptr<void> scratch_level_1_ptr,
- const int scratch_level_1_size, const sycl::nd_item<2> item)
+ SYCLTeamMember(sycl::local_ptr<void> shared, const std::size_t shared_begin,
+ const std::size_t shared_size,
+ sycl_device_ptr<void> scratch_level_1_ptr,
+ const std::size_t scratch_level_1_size,
+ const sycl::nd_item<2> item, const int arg_league_rank,
+ const int arg_league_size)
: m_team_reduce(shared),
m_team_shared(static_cast<sycl::local_ptr<char>>(shared) + shared_begin,
shared_size, scratch_level_1_ptr, scratch_level_1_size),
m_team_reduce_size(shared_begin),
- m_item(item) {}
+ m_item(item),
+ m_league_rank(arg_league_rank),
+ m_league_size(arg_league_size) {}
public:
// Declare to avoid unused private member warnings which are trigger
parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
iType, Impl::SYCLTeamMember>& loop_boundaries,
const Closure& closure, const ReducerType& reducer) {
- typename ReducerType::value_type value;
- reducer.init(value);
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::SYCLTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start +
loop_boundaries.member.item().get_local_id(0);
closure(i, value);
}
- loop_boundaries.member.team_reduce(reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
}
/** \brief Inter-thread parallel_reduce assuming summation.
parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
iType, Impl::SYCLTeamMember>& loop_boundaries,
const Closure& closure, ValueType& result) {
- ValueType val;
- Kokkos::Sum<ValueType> reducer(val);
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::SYCLTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
- reducer.init(reducer.reference());
+ wrapped_reducer_type wrapped_reducer(closure);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start +
loop_boundaries.member.item().get_local_id(0);
i < loop_boundaries.end;
i += loop_boundaries.member.item().get_local_range(0)) {
- closure(i, val);
+ closure(i, value);
}
- loop_boundaries.member.team_reduce(reducer, val);
- result = reducer.reference();
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value);
+ result = value;
}
/** \brief Inter-thread parallel exclusive prefix sum.
* final == true.
*/
// This is the same code as in CUDA and largely the same as in OpenMPTarget
-template <typename iType, typename FunctorType>
+template <typename iType, typename FunctorType, typename ValueType>
KOKKOS_INLINE_FUNCTION void parallel_scan(
const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
loop_bounds,
- const FunctorType& lambda) {
- // Extract value_type from lambda
- using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void,
- FunctorType>::value_type;
+ const FunctorType& lambda, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using closure_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+ static_assert(std::is_same_v<closure_value_type, ValueType>,
+ "Non-matching value types of closure and return type");
const auto start = loop_bounds.start;
const auto end = loop_bounds.end;
const auto team_size = member.team_size();
const auto team_rank = member.team_rank();
const auto nchunk = (end - start + team_size - 1) / team_size;
- value_type accum = 0;
+ ValueType accum = 0;
// each team has to process one or more chunks of the prefix scan
for (iType i = 0; i < nchunk; ++i) {
auto ii = start + i * team_size + team_rank;
// local accumulation for this chunk
- value_type local_accum = 0;
+ ValueType local_accum = 0;
// user updates value with prefix value
if (ii < loop_bounds.end) lambda(ii, local_accum, false);
// perform team scan
// broadcast last value to rest of the team
member.team_broadcast(accum, team_size - 1);
}
+
+ return_val = accum;
+}
+
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
+ loop_bounds,
+ const FunctorType& lambda) {
+ using value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+
+ value_type scan_val;
+ parallel_scan(loop_bounds, lambda, scan_val);
}
template <typename iType, class Closure>
parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
iType, Impl::SYCLTeamMember>& loop_boundaries,
const Closure& closure, const ReducerType& reducer) {
- typename ReducerType::value_type value;
- reducer.init(value);
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::SYCLTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
const iType tidx0 = loop_boundaries.member.item().get_local_id(0);
const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
i < loop_boundaries.end; i += grange0 * grange1)
closure(i, value);
- loop_boundaries.member.vector_reduce(reducer, value);
- loop_boundaries.member.team_reduce(reducer, value);
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
}
template <typename iType, class Closure, typename ValueType>
parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
iType, Impl::SYCLTeamMember>& loop_boundaries,
const Closure& closure, ValueType& result) {
- ValueType val;
- Kokkos::Sum<ValueType> reducer(val);
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::SYCLTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
- reducer.init(reducer.reference());
+ wrapped_reducer_type wrapped_reducer(closure);
+ value_type value;
+ wrapped_reducer.init(&value);
const iType tidx0 = loop_boundaries.member.item().get_local_id(0);
const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
for (iType i = loop_boundaries.start + tidx0 * grange1 + tidx1;
i < loop_boundaries.end; i += grange0 * grange1)
- closure(i, val);
+ closure(i, value);
+
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ loop_boundaries.member.impl_team_reduce(wrapped_reducer, value);
- loop_boundaries.member.vector_reduce(reducer);
- loop_boundaries.member.team_reduce(reducer);
- result = reducer.reference();
+ wrapped_reducer.final(&value);
+ result = value;
}
//----------------------------------------------------------------------------
parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
iType, Impl::SYCLTeamMember> const& loop_boundaries,
Closure const& closure, ReducerType const& reducer) {
- reducer.init(reducer.reference());
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::SYCLTeamMember::execution_space>, ReducerType,
+ value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
const iType grange1 = loop_boundaries.member.item().get_local_range(1);
for (iType i = loop_boundaries.start + tidx1; i < loop_boundaries.end;
i += grange1)
- closure(i, reducer.reference());
+ closure(i, value);
- loop_boundaries.member.vector_reduce(reducer);
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
}
/** \brief Intra-thread vector parallel_reduce.
parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
iType, Impl::SYCLTeamMember> const& loop_boundaries,
Closure const& closure, ValueType& result) {
- result = ValueType();
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::SYCLTeamMember::execution_space>, Closure,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(closure);
+ value_type value;
+ wrapped_reducer.init(&value);
const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
const int grange1 = loop_boundaries.member.item().get_local_range(1);
for (iType i = loop_boundaries.start + tidx1; i < loop_boundaries.end;
i += grange1)
- closure(i, result);
+ closure(i, value);
- loop_boundaries.member.vector_reduce(Kokkos::Sum<ValueType>(result));
+ loop_boundaries.member.impl_vector_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value);
+ result = value;
}
//----------------------------------------------------------------------------
iType, Impl::SYCLTeamMember>& loop_boundaries,
const Closure& closure, const ReducerType& reducer) {
using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
value_type accum;
reducer.init(accum);
// This sets i's val to i-1's contribution to make the latter shfl_up an
// exclusive scan -- the final accumulation of i's val will be included in
// the second closure call later.
- if (i < loop_boundaries.end && tidx1 > 0) closure(i - 1, val, false);
+ if (i - 1 < loop_boundaries.end && tidx1 > 0) closure(i - 1, val, false);
// Bottom up exclusive scan in triangular pattern where each SYCL thread is
// the root of a reduction tree from the zeroth "lane" to itself.
// [t] += [t-4] if t >= 4
// ...
for (int j = 1; j < static_cast<int>(grange1); j <<= 1) {
- value_type tmp = sg.shuffle_up(val, j);
+ value_type tmp =
+ Kokkos::Impl::SYCLReduction::shift_group_right(sg, val, j);
if (j <= static_cast<int>(tidx1)) {
reducer.join(val, tmp);
}
// Update i's contribution into the val and add it to accum for next round
if (i < loop_boundaries.end) closure(i, val, true);
- accum = sg.shuffle(val, mask + vector_offset);
+ accum = Kokkos::Impl::SYCLReduction::select_from_group(
+ sg, val, mask + vector_offset);
}
+ reducer.reference() = accum;
}
/** \brief Intra-thread vector parallel exclusive prefix sum.
loop_boundaries,
const Closure& closure) {
using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
value_type dummy;
parallel_scan(loop_boundaries, closure, Kokkos::Sum<value_type>{dummy});
}
+/** \brief Intra-thread vector parallel exclusive prefix sum.
+ *
+ * Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to all vector lanes in the
+ * thread and a scan operation is performed.
+ * The last call to closure has final == true.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
+ loop_boundaries,
+ const Closure& closure, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using closure_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
+ static_assert(std::is_same<closure_value_type, ValueType>::value,
+ "Non-matching value types of closure and return type");
+
+ ValueType accum;
+ parallel_scan(loop_boundaries, closure, Kokkos::Sum<ValueType>{accum});
+
+ return_val = accum;
+}
+
} // namespace Kokkos
namespace Kokkos {
const auto grange1 = item.get_local_range(1);
const auto sg = item.get_sub_group();
if (item.get_local_id(1) == 0) lambda(val);
- val = sg.shuffle(val, (sg.get_local_id() / grange1) * grange1);
+ val = Kokkos::Impl::SYCLReduction::select_from_group(
+ sg, val, (sg.get_local_id() / grange1) * grange1);
}
template <class FunctorType, class ValueType>
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_TEAM_POLICY_HPP
+#define KOKKOS_SYCL_TEAM_POLICY_HPP
+
+#include <SYCL/Kokkos_SYCL_Team.hpp>
+
+#include <vector>
+
+template <typename... Properties>
+class Kokkos::Impl::TeamPolicyInternal<Kokkos::SYCL, Properties...>
+ : public PolicyTraits<Properties...> {
+ public:
+ using execution_policy = TeamPolicyInternal;
+
+ using traits = PolicyTraits<Properties...>;
+
+ template <typename ExecSpace, typename... OtherProperties>
+ friend class TeamPolicyInternal;
+
+ private:
+ typename traits::execution_space m_space;
+ int m_league_size;
+ int m_team_size;
+ int m_vector_length;
+ size_t m_team_scratch_size[2];
+ size_t m_thread_scratch_size[2];
+ int m_chunk_size;
+ bool m_tune_team_size;
+ bool m_tune_vector_length;
+
+ public:
+ using execution_space = Kokkos::SYCL;
+
+ template <class... OtherProperties>
+ TeamPolicyInternal(TeamPolicyInternal<OtherProperties...> const& p) {
+ m_league_size = p.m_league_size;
+ m_team_size = p.m_team_size;
+ m_vector_length = p.m_vector_length;
+ m_team_scratch_size[0] = p.m_team_scratch_size[0];
+ m_team_scratch_size[1] = p.m_team_scratch_size[1];
+ m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+ m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+ m_chunk_size = p.m_chunk_size;
+ m_space = p.m_space;
+ m_tune_team_size = p.m_tune_team_size;
+ m_tune_vector_length = p.m_tune_vector_length;
+ }
+
+ template <typename FunctorType>
+ int team_size_max(FunctorType const& f, ParallelForTag const&) const {
+ return internal_team_size_max_for(f);
+ }
+
+ template <class FunctorType>
+ inline int team_size_max(const FunctorType& f,
+ const ParallelReduceTag&) const {
+ return internal_team_size_max_reduce<void>(f);
+ }
+
+ template <class FunctorType, class ReducerType>
+ inline int team_size_max(const FunctorType& f, const ReducerType& /*r*/,
+ const ParallelReduceTag&) const {
+ return internal_team_size_max_reduce<typename ReducerType::value_type>(f);
+ }
+
+ template <typename FunctorType>
+ int team_size_recommended(FunctorType const& f, ParallelForTag const&) const {
+ return internal_team_size_recommended_for(f);
+ }
+
+ template <typename FunctorType>
+ inline int team_size_recommended(FunctorType const& f,
+ ParallelReduceTag const&) const {
+ return internal_team_size_recommended_reduce<void>(f);
+ }
+
+ template <class FunctorType, class ReducerType>
+ int team_size_recommended(FunctorType const& f, ReducerType const&,
+ ParallelReduceTag const&) const {
+ return internal_team_size_recommended_reduce<
+ typename ReducerType::value_type>(f);
+ }
+ inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
+ inline bool impl_auto_team_size() const { return m_tune_team_size; }
+ // FIXME_SYCL This is correct in most cases, but not necessarily in case a
+ // custom sycl::queue is used to initialize the execution space.
+ static int vector_length_max() {
+ std::vector<size_t> sub_group_sizes =
+ execution_space{}
+ .impl_internal_space_instance()
+ ->m_queue->get_device()
+ .template get_info<sycl::info::device::sub_group_sizes>();
+ return *std::max_element(sub_group_sizes.begin(), sub_group_sizes.end());
+ }
+
+ private:
+ static int verify_requested_vector_length(int requested_vector_length) {
+ int test_vector_length =
+ std::min(requested_vector_length, vector_length_max());
+
+ // Allow only power-of-two vector_length
+ if (!(is_integral_power_of_two(test_vector_length))) {
+ int test_pow2 = 1;
+ while (test_pow2 < test_vector_length) test_pow2 <<= 1;
+ test_vector_length = test_pow2 >> 1;
+ }
+
+ return test_vector_length;
+ }
+
+ public:
+ static int scratch_size_max(int level) {
+ return level == 0 ? 1024 * 32
+ : // FIXME_SYCL arbitrarily setting this to 32kB
+ 20 * 1024 * 1024; // FIXME_SYCL arbitrarily setting this to 20MB
+ }
+ inline void impl_set_vector_length(size_t size) { m_vector_length = size; }
+ inline void impl_set_team_size(size_t size) { m_team_size = size; }
+ int impl_vector_length() const { return m_vector_length; }
+
+ int team_size() const { return m_team_size; }
+
+ int league_size() const { return m_league_size; }
+
+ size_t scratch_size(int level, int team_size_ = -1) const {
+ if (team_size_ < 0) team_size_ = m_team_size;
+ return m_team_scratch_size[level] +
+ team_size_ * m_thread_scratch_size[level];
+ }
+
+ size_t team_scratch_size(int level) const {
+ return m_team_scratch_size[level];
+ }
+
+ size_t thread_scratch_size(int level) const {
+ return m_thread_scratch_size[level];
+ }
+
+ typename traits::execution_space space() const { return m_space; }
+
+ TeamPolicyInternal()
+ : m_space(typename traits::execution_space()),
+ m_league_size(0),
+ m_team_size(-1),
+ m_vector_length(0),
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(vector_length_max()),
+ m_tune_team_size(false),
+ m_tune_vector_length(false) {}
+
+ /** \brief Specify league size, request team size */
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ int team_size_request, int vector_length_request = 1)
+ : m_space(space_),
+ m_league_size(league_size_),
+ m_team_size(team_size_request),
+ m_vector_length(
+ (vector_length_request > 0)
+ ? verify_requested_vector_length(vector_length_request)
+ : (verify_requested_vector_length(1))),
+ m_team_scratch_size{0, 0},
+ m_thread_scratch_size{0, 0},
+ m_chunk_size(vector_length_max()),
+ m_tune_team_size(bool(team_size_request <= 0)),
+ m_tune_vector_length(bool(vector_length_request <= 0)) {
+ // FIXME_SYCL Check that league size is permissible,
+ // https://github.com/intel/llvm/pull/4064
+
+ // Make sure total block size is permissible
+ if (m_team_size * m_vector_length >
+ static_cast<int>(
+ m_space.impl_internal_space_instance()->m_maxWorkgroupSize)) {
+ Impl::throw_runtime_exception(
+ std::string("Kokkos::TeamPolicy<SYCL> the team size is too large. "
+ "Team size x vector length is " +
+ std::to_string(m_team_size * m_vector_length) +
+ " but must be smaller than ") +
+ std::to_string(
+ m_space.impl_internal_space_instance()->m_maxWorkgroupSize));
+ }
+ }
+
+ /** \brief Specify league size, request team size */
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ int vector_length_request = 1)
+ : TeamPolicyInternal(space_, league_size_, -1, vector_length_request) {}
+ // FLAG
+ /** \brief Specify league size and team size, request vector length*/
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */
+ )
+ : TeamPolicyInternal(space_, league_size_, team_size_request, -1)
+
+ {}
+
+ /** \brief Specify league size, request team size and vector length*/
+ TeamPolicyInternal(const execution_space space_, int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ const Kokkos::AUTO_t& /* vector_length_request */
+
+ )
+ : TeamPolicyInternal(space_, league_size_, -1, -1)
+
+ {}
+
+ TeamPolicyInternal(int league_size_, int team_size_request,
+ int vector_length_request = 1)
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+ team_size_request, vector_length_request) {}
+
+ TeamPolicyInternal(int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ int vector_length_request = 1)
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+ vector_length_request) {}
+
+ /** \brief Specify league size and team size, request vector length*/
+ TeamPolicyInternal(int league_size_, int team_size_request,
+ const Kokkos::AUTO_t& /* vector_length_request */
+
+ )
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+ team_size_request, -1)
+
+ {}
+
+ /** \brief Specify league size, request team size and vector length*/
+ TeamPolicyInternal(int league_size_,
+ const Kokkos::AUTO_t& /* team_size_request */,
+ const Kokkos::AUTO_t& /* vector_length_request */
+
+ )
+ : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+ -1) {}
+
+ int chunk_size() const { return m_chunk_size; }
+
+ TeamPolicyInternal& set_chunk_size(typename traits::index_type chunk_size_) {
+ m_chunk_size = chunk_size_;
+ return *this;
+ }
+
+ /** \brief set per team scratch size for a specific level of the scratch
+ * hierarchy */
+ TeamPolicyInternal& set_scratch_size(int level,
+ PerTeamValue const& per_team) {
+ m_team_scratch_size[level] = per_team.value;
+ return *this;
+ }
+
+ /** \brief set per thread scratch size for a specific level of the scratch
+ * hierarchy */
+ TeamPolicyInternal& set_scratch_size(int level,
+ PerThreadValue const& per_thread) {
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ /** \brief set per thread and per team scratch size for a specific level of
+ * the scratch hierarchy */
+ TeamPolicyInternal& set_scratch_size(int level, PerTeamValue const& per_team,
+ PerThreadValue const& per_thread) {
+ m_team_scratch_size[level] = per_team.value;
+ m_thread_scratch_size[level] = per_thread.value;
+ return *this;
+ }
+
+ using member_type = Kokkos::Impl::SYCLTeamMember;
+
+ protected:
+ template <class FunctorType>
+ int internal_team_size_max_for(const FunctorType& /*f*/) const {
+ // nested_reducer_memsize = (sizeof(double) * (m_team_size + 2)
+ // custom: m_team_scratch_size[0] + m_thread_scratch_size[0] * m_team_size
+ // total:
+ // 2*sizeof(double)+m_team_scratch_size[0]
+ // + m_team_size(sizeof(double)+m_thread_scratch_size[0])
+ const int max_threads_for_memory =
+ (space().impl_internal_space_instance()->m_maxShmemPerBlock -
+ 2 * sizeof(double) - m_team_scratch_size[0]) /
+ (sizeof(double) + m_thread_scratch_size[0]);
+ return std::min({
+ int(m_space.impl_internal_space_instance()->m_maxWorkgroupSize),
+ // FIXME_SYCL Avoid requesting too many registers on NVIDIA GPUs.
+#if defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ 256,
+#endif
+ max_threads_for_memory
+ }) /
+ impl_vector_length();
+ }
+
+ template <class ValueType, class FunctorType>
+ int internal_team_size_max_reduce(const FunctorType& f) const {
+ using Analysis =
+ FunctorAnalysis<FunctorPatternInterface::REDUCE, TeamPolicyInternal,
+ FunctorType, ValueType>;
+ using value_type = typename Analysis::value_type;
+ const int value_count = Analysis::value_count(f);
+
+ // nested_reducer_memsize = (sizeof(double) * (m_team_size + 2)
+ // reducer_memsize = sizeof(value_type) * m_team_size * value_count
+ // custom: m_team_scratch_size[0] + m_thread_scratch_size[0] * m_team_size
+ // total:
+ // 2*sizeof(double)+m_team_scratch_size[0]
+ // + m_team_size(sizeof(double)+sizeof(value_type)*value_count
+ // +m_thread_scratch_size[0])
+ const int max_threads_for_memory =
+ (space().impl_internal_space_instance()->m_maxShmemPerBlock -
+ 2 * sizeof(double) - m_team_scratch_size[0]) /
+ (sizeof(double) + sizeof(value_type) * value_count +
+ m_thread_scratch_size[0]);
+ return std::min<int>({
+ int(m_space.impl_internal_space_instance()->m_maxWorkgroupSize),
+ // FIXME_SYCL Avoid requesting too many registers on NVIDIA GPUs.
+#if defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ 256,
+#endif
+ max_threads_for_memory
+ }) /
+ impl_vector_length();
+ }
+
+ template <class FunctorType>
+ int internal_team_size_recommended_for(const FunctorType& f) const {
+ // FIXME_SYCL improve
+ return 1 << Kokkos::Impl::int_log2(internal_team_size_max_for(f));
+ }
+
+ template <class ValueType, class FunctorType>
+ int internal_team_size_recommended_reduce(const FunctorType& f) const {
+ // FIXME_SYCL improve
+ return 1 << Kokkos::Impl::int_log2(
+ internal_team_size_max_reduce<ValueType>(f));
+ }
+};
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_SYCL_UNIQUE_TOKEN_HPP
#define KOKKOS_SYCL_UNIQUE_TOKEN_HPP
#include <impl/Kokkos_ConcurrentBitset.hpp>
-#include <Kokkos_SYCL_Space.hpp>
+#include <SYCL/Kokkos_SYCL_Space.hpp>
#include <Kokkos_UniqueToken.hpp>
namespace Kokkos {
-namespace Experimental {
namespace Impl {
Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> sycl_global_unique_token_locks(
bool deallocate = false);
}
+namespace Experimental {
+
// both global and instance Unique Tokens are implemented in the same way
// the global version has one shared static lock array underneath
// but it can't be a static member variable since we need to acces it on device
using size_type = int32_t;
explicit UniqueToken(execution_space const& = execution_space())
- : m_locks(Impl::sycl_global_unique_token_locks()) {}
+ : m_locks(Kokkos::Impl::sycl_global_unique_token_locks()) {}
KOKKOS_DEFAULTED_FUNCTION
UniqueToken(const UniqueToken&) = default;
/// \brief acquire value such that 0 <= value < size()
KOKKOS_INLINE_FUNCTION
size_type impl_acquire() const {
+#if defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20250000
+ auto item = sycl::ext::oneapi::this_work_item::get_nd_item<3>();
+#else
auto item = sycl::ext::oneapi::experimental::this_nd_item<3>();
+#endif
std::size_t threadIdx[3] = {item.get_local_id(2), item.get_local_id(1),
item.get_local_id(0)};
std::size_t blockIdx[3] = {item.get_group(2), item.get_group(1),
- item.get_group(0)};
+ item.get_group(0)};
std::size_t blockDim[3] = {item.get_local_range(2), item.get_local_range(1),
item.get_local_range(0)};
}
// Make sure that all writes in the previous lock owner are visible to me
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
desul::atomic_thread_fence(desul::MemoryOrderAcquire(),
desul::MemoryScopeDevice());
-#else
- Kokkos::memory_fence();
-#endif
return idx;
}
KOKKOS_INLINE_FUNCTION
void release(size_type idx) const noexcept {
// Make sure my writes are visible to the next lock owner
-#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
desul::atomic_thread_fence(desul::MemoryOrderRelease(),
desul::MemoryScopeDevice());
-#else
- Kokkos::memory_fence();
-#endif
(void)Kokkos::atomic_exchange(&m_locks(idx), 0);
}
};
public:
UniqueToken()
: UniqueToken<SYCL, UniqueTokenScope::Global>(
- Kokkos::Experimental::SYCL().concurrency()) {}
+ Kokkos::SYCL().concurrency()) {}
explicit UniqueToken(execution_space const& arg)
: UniqueToken<SYCL, UniqueTokenScope::Global>(
- Kokkos::Experimental::SYCL().concurrency(), arg) {}
+ Kokkos::SYCL().concurrency(), arg) {}
explicit UniqueToken(size_type max_size)
: UniqueToken<SYCL, UniqueTokenScope::Global>(max_size) {}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_WORKGROUP_REDUCTION_HPP
+#define KOKKOS_SYCL_WORKGROUP_REDUCTION_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos::Impl::SYCLReduction {
+
+template <int N>
+struct TrivialWrapper {
+ std::byte array[N];
+};
+
+// shuffle down
+template <typename T>
+T shift_group_left(sycl::sub_group sg, T x,
+ sycl::sub_group::linear_id_type delta) {
+ if constexpr (std::is_trivially_copyable_v<T>)
+ return sycl::shift_group_left(sg, x, delta);
+ else {
+ auto tmp = sycl::shift_group_left(
+ sg, reinterpret_cast<TrivialWrapper<sizeof(T)>&>(x), delta);
+ return reinterpret_cast<T&>(tmp);
+ }
+}
+
+// shuffle up
+template <typename T>
+T shift_group_right(sycl::sub_group sg, T x,
+ sycl::sub_group::linear_id_type delta) {
+ if constexpr (std::is_trivially_copyable_v<T>)
+ return sycl::shift_group_right(sg, x, delta);
+ else {
+ auto tmp = sycl::shift_group_right(
+ sg, reinterpret_cast<TrivialWrapper<sizeof(T)>&>(x), delta);
+ return reinterpret_cast<T&>(tmp);
+ }
+}
+
+// shuffle
+template <typename T>
+T select_from_group(sycl::sub_group sg, T x,
+ sycl::sub_group::id_type remote_local_id) {
+ if constexpr (std::is_trivially_copyable_v<T>)
+ return sycl::select_from_group(sg, x, remote_local_id);
+ else {
+ auto tmp = sycl::select_from_group(
+ sg, reinterpret_cast<TrivialWrapper<sizeof(T)>&>(x), remote_local_id);
+ return reinterpret_cast<T&>(tmp);
+ }
+}
+
+// FIXME_SYCL For some types, shuffle reductions are competitive with local
+// memory reductions but they are significantly slower for the value type used
+// in combined reductions with multiple double arguments.
+template <class ReducerType>
+inline constexpr bool use_shuffle_based_algorithm = false;
+// std::is_reference_v<typename ReducerType::reference_type>;
+
+template <typename ValueType, typename ReducerType, int dim>
+std::enable_if_t<!use_shuffle_based_algorithm<ReducerType>> workgroup_reduction(
+ sycl::nd_item<dim>& item, sycl::local_accessor<ValueType> local_mem,
+ sycl_device_ptr<ValueType> results_ptr,
+ sycl::global_ptr<ValueType> device_accessible_result_ptr,
+ const unsigned int value_count_, const ReducerType& final_reducer,
+ bool final, unsigned int max_size) {
+ const unsigned int value_count =
+ std::is_reference_v<typename ReducerType::reference_type> ? 1
+ : value_count_;
+ const int local_id = item.get_local_linear_id();
+
+ // Perform the actual workgroup reduction in each subgroup
+ // separately.
+ auto sg = item.get_sub_group();
+ auto* result = &local_mem[local_id * value_count];
+ const int id_in_sg = sg.get_local_id()[0];
+ const auto local_range =
+ std::min<unsigned int>(sg.get_local_range()[0], max_size);
+ const auto upper_stride_bound =
+ std::min<unsigned int>(local_range - id_in_sg, max_size - local_id);
+ for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+ if (stride < upper_stride_bound)
+ final_reducer.join(result, &local_mem[(local_id + stride) * value_count]);
+ sycl::group_barrier(sg);
+ }
+ sycl::group_barrier(item.get_group());
+
+ // Do the final reduction only using the first subgroup.
+ if (sg.get_group_id()[0] == 0) {
+ const unsigned int n_subgroups = sg.get_group_range()[0];
+ const int max_subgroup_size = sg.get_max_local_range()[0];
+ auto* result_ = &local_mem[id_in_sg * max_subgroup_size * value_count];
+ // In case the number of subgroups is larger than the range of
+ // the first subgroup, we first combine the items with a higher
+ // index.
+ for (unsigned int offset = local_range; offset < n_subgroups;
+ offset += local_range)
+ if (id_in_sg + offset < n_subgroups)
+ final_reducer.join(
+ result_,
+ &local_mem[(id_in_sg + offset) * max_subgroup_size * value_count]);
+ sycl::group_barrier(sg);
+
+ // Then, we proceed as before.
+ for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+ if (id_in_sg + stride < n_subgroups)
+ final_reducer.join(
+ result_,
+ &local_mem[(id_in_sg + stride) * max_subgroup_size * value_count]);
+ sycl::group_barrier(sg);
+ }
+
+ // Finally, we copy the workgroup results back to global memory
+ // to be used in the next iteration. If this is the last
+ // iteration, i.e., there is only one workgroup also call
+ // final() if necessary.
+ if (id_in_sg == 0) {
+ if (final) {
+ final_reducer.final(&local_mem[0]);
+ if (device_accessible_result_ptr != nullptr)
+ final_reducer.copy(&device_accessible_result_ptr[0], &local_mem[0]);
+ else
+ final_reducer.copy(&results_ptr[0], &local_mem[0]);
+ } else
+ final_reducer.copy(
+ &results_ptr[(item.get_group_linear_id()) * value_count],
+ &local_mem[0]);
+ }
+ }
+}
+
+template <typename ValueType, typename ReducerType, int dim>
+std::enable_if_t<use_shuffle_based_algorithm<ReducerType>> workgroup_reduction(
+ sycl::nd_item<dim>& item, sycl::local_accessor<ValueType> local_mem,
+ ValueType local_value, sycl_device_ptr<ValueType> results_ptr,
+ sycl::global_ptr<ValueType> device_accessible_result_ptr,
+ const ReducerType& final_reducer, bool final, unsigned int max_size) {
+ const auto local_id = item.get_local_linear_id();
+
+ // Perform the actual workgroup reduction in each subgroup
+ // separately.
+ auto sg = item.get_sub_group();
+ const int id_in_sg = sg.get_local_id()[0];
+ const int local_range = std::min<int>(sg.get_local_range()[0], max_size);
+
+ const auto upper_stride_bound =
+ std::min<int>(local_range - id_in_sg, max_size - local_id);
+#if defined(KOKKOS_ARCH_INTEL_GPU) || defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ auto shuffle_combine = [&](int stride) {
+ if (stride < local_range) {
+ auto tmp = Kokkos::Impl::SYCLReduction::shift_group_left(sg, local_value,
+ stride);
+ if (stride < upper_stride_bound) final_reducer.join(&local_value, &tmp);
+ }
+ };
+ shuffle_combine(1);
+ shuffle_combine(2);
+ shuffle_combine(4);
+ shuffle_combine(8);
+ shuffle_combine(16);
+ KOKKOS_ASSERT(local_range <= 32);
+#else
+ for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+ auto tmp =
+ Kokkos::Impl::SYCLReduction::shift_group_left(sg, local_value, stride);
+ if (stride < upper_stride_bound) final_reducer.join(&local_value, &tmp);
+ }
+#endif
+
+ // Copy the subgroup results into the first positions of the
+ // reduction array.
+ const int max_subgroup_size = sg.get_max_local_range()[0];
+ const int n_active_subgroups =
+ (max_size + max_subgroup_size - 1) / max_subgroup_size;
+ const int sg_group_id = sg.get_group_id()[0];
+ if (id_in_sg == 0 && sg_group_id <= n_active_subgroups)
+ local_mem[sg_group_id] = local_value;
+
+ item.barrier(sycl::access::fence_space::local_space);
+
+ // Do the final reduction only using the first subgroup.
+ if (sg.get_group_id()[0] == 0) {
+ auto sg_value = local_mem[id_in_sg < n_active_subgroups ? id_in_sg : 0];
+
+ // In case the number of subgroups is larger than the range of
+ // the first subgroup, we first combine the items with a higher
+ // index.
+ if (n_active_subgroups > local_range) {
+ for (int offset = local_range; offset < n_active_subgroups;
+ offset += local_range)
+ if (id_in_sg + offset < n_active_subgroups) {
+ final_reducer.join(&sg_value, &local_mem[(id_in_sg + offset)]);
+ }
+ sg.barrier();
+ }
+
+ // Then, we proceed as before.
+#if defined(KOKKOS_ARCH_INTEL_GPU) || defined(KOKKOS_IMPL_ARCH_NVIDIA_GPU)
+ auto shuffle_combine_sg = [&](int stride) {
+ if (stride < local_range) {
+ auto tmp =
+ Kokkos::Impl::SYCLReduction::shift_group_left(sg, sg_value, stride);
+ if (id_in_sg + stride < n_active_subgroups)
+ final_reducer.join(&sg_value, &tmp);
+ }
+ };
+ shuffle_combine_sg(1);
+ shuffle_combine_sg(2);
+ shuffle_combine_sg(4);
+ shuffle_combine_sg(8);
+ shuffle_combine_sg(16);
+ KOKKOS_ASSERT(local_range <= 32);
+#else
+ for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+ auto tmp =
+ Kokkos::Impl::SYCLReduction::shift_group_left(sg, sg_value, stride);
+ if (id_in_sg + stride < n_active_subgroups)
+ final_reducer.join(&sg_value, &tmp);
+ }
+#endif
+
+ // Finally, we copy the workgroup results back to global memory
+ // to be used in the next iteration. If this is the last
+ // iteration, i.e., there is only one workgroup also call
+ // final() if necessary.
+ if (id_in_sg == 0) {
+ if (final) {
+ final_reducer.final(&sg_value);
+ if (device_accessible_result_ptr != nullptr)
+ device_accessible_result_ptr[0] = sg_value;
+ else
+ results_ptr[0] = sg_value;
+ } else
+ results_ptr[(item.get_group_linear_id())] = sg_value;
+ }
+ }
+}
+
+} // namespace Kokkos::Impl::SYCLReduction
+
+#endif /* KOKKOS_SYCL_WORKGROUP_REDUCTION_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_ZEROMEMSET_HPP
+#define KOKKOS_SYCL_ZEROMEMSET_HPP
+
+#include <impl/Kokkos_ZeroMemset_fwd.hpp>
+#include <SYCL/Kokkos_SYCL.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct ZeroMemset<Kokkos::SYCL> {
+ ZeroMemset(const Kokkos::SYCL& exec_space, void* dst, size_t cnt) {
+ auto event =
+ exec_space.impl_internal_space_instance()->m_queue->memset(dst, 0, cnt);
+#ifndef KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+ exec_space.impl_internal_space_instance()
+ ->m_queue->ext_oneapi_submit_barrier(std::vector<sycl::event>{event});
+#endif
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // !defined(KOKKOS_SYCL_ZEROMEMSET_HPP)
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Core.hpp>
-#include <Kokkos_Serial.hpp>
+#include <Serial/Kokkos_Serial.hpp>
#include <impl/Kokkos_Traits.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_ExecSpaceManager.hpp>
namespace Kokkos {
namespace Impl {
+std::vector<SerialInternal*> SerialInternal::all_instances;
+std::mutex SerialInternal::all_instances_mutex;
+
bool SerialInternal::is_initialized() { return m_is_initialized; }
void SerialInternal::initialize() {
Impl::SharedAllocationRecord<void, void>::tracking_enable();
- // Init the array of locks used for arbitrarily sized atomics
- Impl::init_lock_array_host_space();
-
m_is_initialized = true;
+
+ // guard pushing to all_instances
+ {
+ std::scoped_lock lock(all_instances_mutex);
+ all_instances.push_back(this);
+ }
}
void SerialInternal::finalize() {
m_thread_team_data.scratch_assign(nullptr, 0, 0, 0, 0, 0);
}
- Kokkos::Profiling::finalize();
-
m_is_initialized = false;
+
+ // guard erasing from all_instances
+ {
+ std::scoped_lock lock(all_instances_mutex);
+ auto it = std::find(all_instances.begin(), all_instances.end(), this);
+ if (it == all_instances.end())
+ Kokkos::abort(
+ "Execution space instance to be removed couldn't be found!");
+ std::swap(*it, all_instances.back());
+ all_instances.pop_back();
+ }
}
SerialInternal& SerialInternal::singleton() {
m_thread_team_data.disband_team();
m_thread_team_data.disband_pool();
- space.deallocate("Kokkos::Serial::scratch_mem",
- m_thread_team_data.scratch_buffer(),
- m_thread_team_data.scratch_bytes());
+ // impl_deallocate doesn't fence which we try to avoid here since that
+ // interferes with the using the m_instance_mutex for ensuring proper
+ // kernel enqueuing
+ space.impl_deallocate("Kokkos::Serial::scratch_mem",
+ m_thread_team_data.scratch_buffer(),
+ m_thread_team_data.scratch_bytes());
}
if (pool_reduce_bytes < old_pool_reduce) {
HostThreadTeamData::scratch_size(pool_reduce_bytes, team_reduce_bytes,
team_shared_bytes, thread_local_bytes);
- void* ptr = nullptr;
- try {
- ptr = space.allocate("Kokkos::Serial::scratch_mem", alloc_bytes);
- } catch (Kokkos::Experimental::RawMemoryAllocationFailure const& failure) {
- // For now, just rethrow the error message the existing way
- Kokkos::Impl::throw_runtime_exception(failure.get_error_message());
- }
+ void* ptr = space.allocate("Kokkos::Serial::scratch_mem", alloc_bytes);
m_thread_team_data.scratch_assign(static_cast<char*>(ptr), alloc_bytes,
pool_reduce_bytes, team_reduce_bytes,
} // namespace Impl
Serial::Serial()
-#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
- : m_space_instance(&Impl::SerialInternal::singleton()) {
-}
-#else
: m_space_instance(&Impl::SerialInternal::singleton(),
- [](Impl::SerialInternal*) {}) {
+ [](Impl::SerialInternal*) {}) {}
+
+Serial::Serial(NewInstance)
+ : m_space_instance(new Impl::SerialInternal, [](Impl::SerialInternal* ptr) {
+ ptr->finalize();
+ delete ptr;
+ }) {
+ m_space_instance->initialize();
}
-#endif
void Serial::print_configuration(std::ostream& os, bool /*verbose*/) const {
os << "Host Serial Execution Space:\n";
os << " KOKKOS_ENABLE_SERIAL: yes\n";
- os << "Serial Atomics:\n";
- os << " KOKKOS_ENABLE_SERIAL_ATOMICS: ";
-#ifdef KOKKOS_ENABLE_SERIAL_ATOMICS
- os << "yes\n";
-#else
- os << "no\n";
+#ifdef KOKKOS_ENABLE_ATOMICS_BYPASS
+ os << "Kokkos atomics disabled\n";
#endif
os << "\nSerial Runtime Configuration:\n";
} // namespace Impl
-#ifdef KOKKOS_ENABLE_CXX14
-namespace Tools {
-namespace Experimental {
-constexpr DeviceType DeviceTypeTraits<Serial>::id;
-}
-} // namespace Tools
-#endif
-
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
/// \file Kokkos_Serial.hpp
/// \brief Declaration and definition of Kokkos::Serial device.
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_SERIAL_HPP
#define KOKKOS_SERIAL_HPP
#include <cstddef>
#include <iosfwd>
+#include <iterator>
#include <mutex>
#include <thread>
#include <Kokkos_Core_fwd.hpp>
-#include <Kokkos_TaskScheduler.hpp>
#include <Kokkos_Layout.hpp>
#include <Kokkos_HostSpace.hpp>
#include <Kokkos_ScratchSpace.hpp>
static SerialInternal& singleton();
- std::mutex m_thread_team_data_mutex;
+ std::mutex m_instance_mutex;
+
+ static std::vector<SerialInternal*> all_instances;
+ static std::mutex all_instances_mutex;
// Resize thread team data scratch memory
void resize_thread_team_data(size_t pool_reduce_bytes,
};
} // namespace Impl
+struct NewInstance {
+ explicit NewInstance() = default;
+};
+
/// \class Serial
/// \brief Kokkos device for non-parallel execution
///
Serial();
+ explicit Serial(NewInstance);
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ template <typename T = void>
+ KOKKOS_DEPRECATED_WITH_COMMENT(
+ "Serial execution space should be constructed explicitly.")
+ Serial(NewInstance)
+ : Serial(NewInstance{}) {}
+#endif
+
/// \brief True if and only if this method is being called in a
/// thread-parallel function.
///
/// For the Serial device, this method <i>always</i> returns false,
/// because parallel_for or parallel_reduce with the Serial device
/// always execute sequentially.
- inline static int in_parallel() { return false; }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED inline static int in_parallel() { return false; }
+#endif
/// \brief Wait until all dispatched functors complete.
///
name,
Kokkos::Tools::Experimental::SpecialSynchronizationCases::
GlobalDeviceSynchronization,
- []() {}); // TODO: correct device ID
+ []() {
+ std::lock_guard<std::mutex> lock_all_instances(
+ Impl::SerialInternal::all_instances_mutex);
+ for (auto* instance_ptr : Impl::SerialInternal::all_instances) {
+ std::lock_guard<std::mutex> lock_instance(
+ instance_ptr->m_instance_mutex);
+ }
+ }); // TODO: correct device ID
Kokkos::memory_fence();
}
"Kokkos::Serial::fence: Unnamed Instance Fence") const {
Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Serial>(
name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1},
- []() {}); // TODO: correct device ID
+ [this]() {
+ auto* internal_instance = this->impl_internal_space_instance();
+ std::lock_guard<std::mutex> lock(internal_instance->m_instance_mutex);
+ }); // TODO: correct device ID
Kokkos::memory_fence();
}
/** \brief Return the maximum amount of concurrency. */
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
static int concurrency() { return 1; }
+#else
+ int concurrency() const { return 1; }
+#endif
//! Print configuration information to the given output stream.
void print_configuration(std::ostream& os, bool verbose = false) const;
static const char* name();
Impl::SerialInternal* impl_internal_space_instance() const {
-#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
- return m_space_instance;
-#else
return m_space_instance.get();
-#endif
}
private:
-#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
- Impl::SerialInternal* m_space_instance;
-#else
Kokkos::Impl::HostSharedPtr<Impl::SerialInternal> m_space_instance;
-#endif
+ friend bool operator==(Serial const& lhs, Serial const& rhs) {
+ return lhs.impl_internal_space_instance() ==
+ rhs.impl_internal_space_instance();
+ }
+ friend bool operator!=(Serial const& lhs, Serial const& rhs) {
+ return !(lhs == rhs);
+ }
//--------------------------------------------------------------------------
};
namespace Kokkos {
namespace Impl {
-// We only need to provide a specialization for Serial if there is a host
-// parallel execution space since the specialization for
-// DefaultHostExecutionSpace is defined elsewhere.
-struct DummyExecutionSpace;
-template <class DT, class... DP>
-struct ZeroMemset<
- std::conditional_t<!std::is_same<Serial, DefaultHostExecutionSpace>::value,
- Serial, DummyExecutionSpace>,
- DT, DP...> : public ZeroMemset<DefaultHostExecutionSpace, DT, DP...> {
- using Base = ZeroMemset<DefaultHostExecutionSpace, DT, DP...>;
- using Base::Base;
-
- ZeroMemset(const Serial&, const View<DT, DP...>& dst,
- typename View<DT, DP...>::const_value_type& value)
- : Base(dst, value) {}
-};
-
template <>
struct MemorySpaceAccess<Kokkos::Serial::memory_space,
Kokkos::Serial::scratch_memory_space> {
} // namespace Impl
} // namespace Kokkos
+namespace Kokkos::Experimental {
+
+template <class... Args>
+std::vector<Serial> partition_space(const Serial&, Args...) {
+ static_assert(
+ (... && std::is_arithmetic_v<Args>),
+ "Kokkos Error: partitioning arguments must be integers or floats");
+ std::vector<Serial> instances;
+ instances.reserve(sizeof...(Args));
+ std::generate_n(std::back_inserter(instances), sizeof...(Args),
+ []() { return Serial{NewInstance{}}; });
+ return instances;
+}
+
+template <class T>
+std::vector<Serial> partition_space(const Serial&,
+ std::vector<T> const& weights) {
+ static_assert(
+ std::is_arithmetic_v<T>,
+ "Kokkos Error: partitioning arguments must be integers or floats");
+
+ // We only care about the number of instances to create and ignore weights
+ // otherwise.
+ std::vector<Serial> instances;
+ instances.reserve(weights.size());
+ std::generate_n(std::back_inserter(instances), weights.size(),
+ []() { return Serial{NewInstance{}}; });
+ return instances;
+}
+
+} // namespace Kokkos::Experimental
+
#include <Serial/Kokkos_Serial_Parallel_Range.hpp>
#include <Serial/Kokkos_Serial_Parallel_MDRange.hpp>
#include <Serial/Kokkos_Serial_Parallel_Team.hpp>
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
#include <Serial/Kokkos_Serial_Task.hpp>
+#endif
#include <Serial/Kokkos_Serial_UniqueToken.hpp>
#endif // defined( KOKKOS_ENABLE_SERIAL )
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SERIAL_MDRANGEPOLICY_HPP_
+#define KOKKOS_SERIAL_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// Settings for TeamMDRangePolicy
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, Serial, ThreadAndVector>
+ : HostBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SERIAL_PARALLEL_MDRANGE_HPP
+#define KOKKOS_SERIAL_PARALLEL_MDRANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Serial> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = typename MDRangePolicy::impl_range_policy;
+
+ using iterate_type = typename Kokkos::Impl::HostIterateTile<
+ MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
+
+ const iterate_type m_iter;
+
+ void exec() const {
+ const typename Policy::member_type e = m_iter.m_rp.m_num_tiles;
+ for (typename Policy::member_type i = 0; i < e; ++i) {
+ m_iter(i);
+ }
+ }
+
+ public:
+ inline void execute() const {
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads
+ auto* internal_instance =
+ m_iter.m_rp.space().impl_internal_space_instance();
+ std::lock_guard<std::mutex> lock(internal_instance->m_instance_mutex);
+#endif
+ this->exec();
+ }
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+ inline ParallelFor(const FunctorType& arg_functor,
+ const MDRangePolicy& arg_policy)
+ : m_iter(arg_policy, arg_functor) {}
+};
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>, Kokkos::Serial> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = typename MDRangePolicy::impl_range_policy;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using WorkTag = typename MDRangePolicy::work_tag;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ using iterate_type = typename Kokkos::Impl::HostIterateTile<
+ MDRangePolicy, CombinedFunctorReducerType, WorkTag, reference_type>;
+ const iterate_type m_iter;
+ const pointer_type m_result_ptr;
+
+ inline void exec(reference_type update) const {
+ const typename Policy::member_type e = m_iter.m_rp.m_num_tiles;
+ for (typename Policy::member_type i = 0; i < e; ++i) {
+ m_iter(i, update);
+ }
+ }
+
+ public:
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy&, const Functor&) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+ inline void execute() const {
+ const ReducerType& reducer = m_iter.m_func.get_reducer();
+ const size_t pool_reduce_size = reducer.value_size();
+ const size_t team_reduce_size = 0; // Never shrinks
+ const size_t team_shared_size = 0; // Never shrinks
+ const size_t thread_local_size = 0; // Never shrinks
+
+ auto* internal_instance =
+ m_iter.m_rp.space().impl_internal_space_instance();
+
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads, lock resize_thread_team_data
+ std::lock_guard<std::mutex> instance_lock(
+ internal_instance->m_instance_mutex);
+#endif
+ internal_instance->resize_thread_team_data(
+ pool_reduce_size, team_reduce_size, team_shared_size,
+ thread_local_size);
+
+ pointer_type ptr =
+ m_result_ptr
+ ? m_result_ptr
+ : pointer_type(
+ internal_instance->m_thread_team_data.pool_reduce_local());
+
+ reference_type update = reducer.init(ptr);
+
+ this->exec(update);
+
+ reducer.final(ptr);
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const MDRangePolicy& arg_policy,
+ const ViewType& arg_result_view)
+ : m_iter(arg_policy, arg_functor_reducer),
+ m_result_ptr(arg_result_view.data()) {
+ static_assert(Kokkos::is_view<ViewType>::value,
+ "Kokkos::Serial reduce result must be a View");
+
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::Serial reduce result must be a View accessible from "
+ "HostSpace");
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKO_SERIAL_PARALLEL_RANGE_HPP
-#define KOKKO_SERIAL_PARALLEL_RANGE_HPP
+#ifndef KOKKOS_SERIAL_PARALLEL_RANGE_HPP
+#define KOKKOS_SERIAL_PARALLEL_RANGE_HPP
#include <Kokkos_Parallel.hpp>
const Policy m_policy;
template <class TagType>
- std::enable_if_t<std::is_void<TagType>::value> exec() const {
+ std::enable_if_t<std::is_void_v<TagType>> exec() const {
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
m_functor(i);
}
template <class TagType>
- std::enable_if_t<!std::is_void<TagType>::value> exec() const {
+ std::enable_if_t<!std::is_void_v<TagType>> exec() const {
const TagType t{};
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
public:
inline void execute() const {
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads
+ auto* internal_instance = m_policy.space().impl_internal_space_instance();
+ std::lock_guard<std::mutex> lock(internal_instance->m_instance_mutex);
+#endif
this->template exec<typename Policy::work_tag>();
}
/*--------------------------------------------------------------------------*/
-template <class FunctorType, class ReducerType, class... Traits>
-class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>,
Kokkos::Serial> {
private:
- using Policy = Kokkos::RangePolicy<Traits...>;
- using WorkTag = typename Policy::work_tag;
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkTag = typename Policy::work_tag;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
- void>;
-
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
+ const CombinedFunctorReducerType m_functor_reducer;
const Policy m_policy;
- const ReducerType m_reducer;
const pointer_type m_result_ptr;
template <class TagType>
- inline std::enable_if_t<std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<std::is_void_v<TagType>> exec(
reference_type update) const {
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- m_functor(i, update);
+ m_functor_reducer.get_functor()(i, update);
}
}
template <class TagType>
- inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<!std::is_void_v<TagType>> exec(
reference_type update) const {
const TagType t{};
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- m_functor(t, i, update);
+ m_functor_reducer.get_functor()(t, i, update);
}
}
public:
inline void execute() const {
const size_t pool_reduce_size =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+ m_functor_reducer.get_reducer().value_size();
const size_t team_reduce_size = 0; // Never shrinks
const size_t team_shared_size = 0; // Never shrinks
const size_t thread_local_size = 0; // Never shrinks
auto* internal_instance = m_policy.space().impl_internal_space_instance();
- // Need to lock resize_thread_team_data
- std::lock_guard<std::mutex> lock(
- internal_instance->m_thread_team_data_mutex);
+
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads, lock resize_thread_team_data
+ std::lock_guard<std::mutex> instance_lock(
+ internal_instance->m_instance_mutex);
+#endif
internal_instance->resize_thread_team_data(
pool_reduce_size, team_reduce_size, team_shared_size,
thread_local_size);
: pointer_type(
internal_instance->m_thread_team_data.pool_reduce_local());
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- reference_type update = final_reducer.init(ptr);
+ reference_type update = m_functor_reducer.get_reducer().init(ptr);
this->template exec<WorkTag>(update);
- final_reducer.final(ptr);
+ m_functor_reducer.get_reducer().final(ptr);
}
- template <class HostViewType>
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const HostViewType& arg_result_view,
- std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_functor(arg_functor),
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result_view)
+ : m_functor_reducer(arg_functor_reducer),
m_policy(arg_policy),
- m_reducer(InvalidType()),
m_result_ptr(arg_result_view.data()) {
- static_assert(Kokkos::is_view<HostViewType>::value,
+ static_assert(Kokkos::is_view<ViewType>::value,
"Kokkos::Serial reduce result must be a View");
static_assert(
- Kokkos::Impl::MemorySpaceAccess<typename HostViewType::memory_space,
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
Kokkos::HostSpace>::accessible,
- "Kokkos::Serial reduce result must be a View in HostSpace");
- }
-
- inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()) {
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
+ "Kokkos::Serial reduce result must be a View accessible from "
+ "HostSpace");
}
};
using WorkTag = typename Policy::work_tag;
using Analysis =
- FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+ FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType, void>;
using pointer_type = typename Analysis::pointer_type;
using reference_type = typename Analysis::reference_type;
- const FunctorType m_functor;
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>
+ m_functor_reducer;
const Policy m_policy;
template <class TagType>
- inline std::enable_if_t<std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<std::is_void_v<TagType>> exec(
reference_type update) const {
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- m_functor(i, update, true);
+ m_functor_reducer.get_functor()(i, update, true);
}
}
template <class TagType>
- inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<!std::is_void_v<TagType>> exec(
reference_type update) const {
const TagType t{};
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- m_functor(t, i, update, true);
+ m_functor_reducer.get_functor()(t, i, update, true);
}
}
public:
inline void execute() const {
- const size_t pool_reduce_size = Analysis::value_size(m_functor);
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
+ const size_t pool_reduce_size = final_reducer.value_size();
const size_t team_reduce_size = 0; // Never shrinks
const size_t team_shared_size = 0; // Never shrinks
const size_t thread_local_size = 0; // Never shrinks
- // Need to lock resize_thread_team_data
auto* internal_instance = m_policy.space().impl_internal_space_instance();
- std::lock_guard<std::mutex> lock(
- internal_instance->m_thread_team_data_mutex);
+
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads, lock resize_thread_team_data
+ std::lock_guard<std::mutex> instance_lock(
+ internal_instance->m_instance_mutex);
+#endif
+
internal_instance->resize_thread_team_data(
pool_reduce_size, team_reduce_size, team_shared_size,
thread_local_size);
- typename Analysis::Reducer final_reducer(&m_functor);
-
reference_type update = final_reducer.init(pointer_type(
internal_instance->m_thread_team_data.pool_reduce_local()));
}
inline ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
- : m_functor(arg_functor), m_policy(arg_policy) {}
+ : m_functor_reducer(arg_functor, typename Analysis::Reducer{arg_functor}),
+ m_policy(arg_policy) {}
};
/*--------------------------------------------------------------------------*/
using Policy = Kokkos::RangePolicy<Traits...>;
using WorkTag = typename Policy::work_tag;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+ using Analysis = FunctorAnalysis<FunctorPatternInterface::SCAN, Policy,
+ FunctorType, ReturnType>;
+ using value_type = typename Analysis::value_type;
using pointer_type = typename Analysis::pointer_type;
using reference_type = typename Analysis::reference_type;
- const FunctorType m_functor;
+ const CombinedFunctorReducer<FunctorType, typename Analysis::Reducer>
+ m_functor_reducer;
const Policy m_policy;
- ReturnType& m_returnvalue;
+ const pointer_type m_result_ptr;
template <class TagType>
- inline std::enable_if_t<std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<std::is_void_v<TagType>> exec(
reference_type update) const {
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- m_functor(i, update, true);
+ m_functor_reducer.get_functor()(i, update, true);
}
}
template <class TagType>
- inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<!std::is_void_v<TagType>> exec(
reference_type update) const {
const TagType t{};
const typename Policy::member_type e = m_policy.end();
for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
- m_functor(t, i, update, true);
+ m_functor_reducer.get_functor()(t, i, update, true);
}
}
public:
inline void execute() {
- const size_t pool_reduce_size = Analysis::value_size(m_functor);
+ const size_t pool_reduce_size =
+ m_functor_reducer.get_reducer().value_size();
const size_t team_reduce_size = 0; // Never shrinks
const size_t team_shared_size = 0; // Never shrinks
const size_t thread_local_size = 0; // Never shrinks
- // Need to lock resize_thread_team_data
auto* internal_instance = m_policy.space().impl_internal_space_instance();
- std::lock_guard<std::mutex> lock(
- internal_instance->m_thread_team_data_mutex);
+
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads, lock resize_thread_team_data
+ std::lock_guard<std::mutex> instance_lock(
+ internal_instance->m_instance_mutex);
+#endif
+
internal_instance->resize_thread_team_data(
pool_reduce_size, team_reduce_size, team_shared_size,
thread_local_size);
- typename Analysis::Reducer final_reducer(&m_functor);
+ const typename Analysis::Reducer& final_reducer =
+ m_functor_reducer.get_reducer();
reference_type update = final_reducer.init(pointer_type(
internal_instance->m_thread_team_data.pool_reduce_local()));
this->template exec<WorkTag>(update);
- m_returnvalue = update;
+ *m_result_ptr = update;
}
- inline ParallelScanWithTotal(const FunctorType& arg_functor,
- const Policy& arg_policy,
- ReturnType& arg_returnvalue)
- : m_functor(arg_functor),
+ template <class ViewType,
+ class Enable = std::enable_if_t<Kokkos::is_view<ViewType>::value>>
+ ParallelScanWithTotal(const FunctorType& arg_functor,
+ const Policy& arg_policy,
+ const ViewType& arg_result_view)
+ : m_functor_reducer(arg_functor, typename Analysis::Reducer{arg_functor}),
m_policy(arg_policy),
- m_returnvalue(arg_returnvalue) {}
+ m_result_ptr(arg_result_view.data()) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::Serial parallel_scan result must be host-accessible!");
+ }
};
} // namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKO_SERIAL_PARALLEL_TEAM_HPP
-#define KOKKO_SERIAL_PARALLEL_TEAM_HPP
+#ifndef KOKKOS_SERIAL_PARALLEL_TEAM_HPP
+#define KOKKOS_SERIAL_PARALLEL_TEAM_HPP
#include <Kokkos_Parallel.hpp>
int m_league_size;
int m_chunk_size;
+ Kokkos::Serial m_space;
+
public:
//! Tag this class as a kokkos execution policy
using execution_policy = TeamPolicyInternal;
//! Execution space of this execution policy:
using execution_space = Kokkos::Serial;
- const typename traits::execution_space& space() const {
- static typename traits::execution_space m_space;
- return m_space;
- }
+ const typename traits::execution_space& space() const { return m_space; }
template <class ExecSpace, class... OtherProperties>
friend class TeamPolicyInternal;
return (level == 0 ? 1024 * 32 : 20 * 1024 * 1024);
}
/** \brief Specify league size, request team size */
- TeamPolicyInternal(const execution_space&, int league_size_request,
+ TeamPolicyInternal(const execution_space& space, int league_size_request,
int team_size_request, int /* vector_length_request */ = 1)
: m_team_scratch_size{0, 0},
m_thread_scratch_size{0, 0},
m_league_size(league_size_request),
- m_chunk_size(32) {
+ m_chunk_size(32),
+ m_space(space) {
if (team_size_request > 1)
Kokkos::abort("Kokkos::abort: Requested Team Size is too large!");
}
const size_t m_shared;
template <class TagType>
- inline std::enable_if_t<std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<std::is_void_v<TagType>> exec(
HostThreadTeamData& data) const {
for (int ileague = 0; ileague < m_league; ++ileague) {
m_functor(Member(data, ileague, m_league));
}
template <class TagType>
- inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<!std::is_void_v<TagType>> exec(
HostThreadTeamData& data) const {
const TagType t{};
for (int ileague = 0; ileague < m_league; ++ileague) {
const size_t thread_local_size = 0; // Never shrinks
auto* internal_instance = m_policy.space().impl_internal_space_instance();
- // Need to lock resize_thread_team_data
- std::lock_guard<std::mutex> lock(
- internal_instance->m_thread_team_data_mutex);
+
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads, lock resize_thread_team_data
+ std::lock_guard<std::mutex> instance_lock(
+ internal_instance->m_instance_mutex);
+#endif
+
internal_instance->resize_thread_team_data(
pool_reduce_size, team_reduce_size, team_shared_size,
thread_local_size);
/*--------------------------------------------------------------------------*/
-template <class FunctorType, class ReducerType, class... Properties>
-class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
- ReducerType, Kokkos::Serial> {
+template <class CombinedFunctorReducerType, class... Properties>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>, Kokkos::Serial> {
private:
- enum { TEAM_REDUCE_SIZE = 512 };
+ static constexpr int TEAM_REDUCE_SIZE = 512;
- using Policy = TeamPolicyInternal<Kokkos::Serial, Properties...>;
+ using Policy = TeamPolicyInternal<Kokkos::Serial, Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
using Member = typename Policy::member_type;
using WorkTag = typename Policy::work_tag;
- using ReducerConditional =
- Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
- FunctorType, ReducerType>;
- using ReducerTypeFwd = typename ReducerConditional::type;
- using WorkTagFwd =
- std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
- void>;
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
- using Analysis =
- FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
-
- using pointer_type = typename Analysis::pointer_type;
- using reference_type = typename Analysis::reference_type;
-
- const FunctorType m_functor;
+ const CombinedFunctorReducerType m_functor_reducer;
const Policy m_policy;
const int m_league;
- const ReducerType m_reducer;
pointer_type m_result_ptr;
size_t m_shared;
template <class TagType>
- inline std::enable_if_t<std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<std::is_void_v<TagType>> exec(
HostThreadTeamData& data, reference_type update) const {
for (int ileague = 0; ileague < m_league; ++ileague) {
- m_functor(Member(data, ileague, m_league), update);
+ m_functor_reducer.get_functor()(Member(data, ileague, m_league), update);
}
}
template <class TagType>
- inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+ inline std::enable_if_t<!std::is_void_v<TagType>> exec(
HostThreadTeamData& data, reference_type update) const {
const TagType t{};
for (int ileague = 0; ileague < m_league; ++ileague) {
- m_functor(t, Member(data, ileague, m_league), update);
+ m_functor_reducer.get_functor()(t, Member(data, ileague, m_league),
+ update);
}
}
public:
inline void execute() const {
const size_t pool_reduce_size =
- Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+ m_functor_reducer.get_reducer().value_size();
const size_t team_reduce_size = TEAM_REDUCE_SIZE;
const size_t team_shared_size = m_shared;
const size_t thread_local_size = 0; // Never shrinks
auto* internal_instance = m_policy.space().impl_internal_space_instance();
- // Need to lock resize_thread_team_data
- std::lock_guard<std::mutex> lock(
- internal_instance->m_thread_team_data_mutex);
+
+ // caused a possibly codegen-related slowdown, especially in GCC 9-11
+ // with KOKKOS_ARCH_NATIVE
+ // https://github.com/kokkos/kokkos/issues/7268
+#ifndef KOKKOS_ENABLE_ATOMICS_BYPASS
+ // Make sure kernels are running sequentially even when using multiple
+ // threads, lock resize_thread_team_data
+ std::lock_guard<std::mutex> instance_lock(
+ internal_instance->m_instance_mutex);
+#endif
+
internal_instance->resize_thread_team_data(
pool_reduce_size, team_reduce_size, team_shared_size,
thread_local_size);
: pointer_type(
internal_instance->m_thread_team_data.pool_reduce_local());
- typename Analysis::Reducer final_reducer(
- &ReducerConditional::select(m_functor, m_reducer));
-
- reference_type update = final_reducer.init(ptr);
+ reference_type update = m_functor_reducer.get_reducer().init(ptr);
this->template exec<WorkTag>(internal_instance->m_thread_team_data, update);
- final_reducer.final(ptr);
+ m_functor_reducer.get_reducer().final(ptr);
}
template <class ViewType>
- ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
- const ViewType& arg_result,
- std::enable_if_t<Kokkos::is_view<ViewType>::value &&
- !Kokkos::is_reducer<ReducerType>::value,
- void*> = nullptr)
- : m_functor(arg_functor),
+ ParallelReduce(const CombinedFunctorReducerType& arg_functor_reducer,
+ const Policy& arg_policy, const ViewType& arg_result)
+ : m_functor_reducer(arg_functor_reducer),
m_policy(arg_policy),
m_league(arg_policy.league_size()),
- m_reducer(InvalidType()),
m_result_ptr(arg_result.data()),
m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(m_functor, 1)) {
+ FunctorTeamShmemSize<FunctorType>::value(
+ m_functor_reducer.get_functor(), 1)) {
static_assert(Kokkos::is_view<ViewType>::value,
"Reduction result on Kokkos::Serial must be a Kokkos::View");
static_assert(
Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
Kokkos::HostSpace>::accessible,
- "Reduction result on Kokkos::Serial must be a Kokkos::View in "
+ "Kokkos::Serial reduce result must be a View accessible from "
"HostSpace");
}
-
- inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
- const ReducerType& reducer)
- : m_functor(arg_functor),
- m_policy(arg_policy),
- m_league(arg_policy.league_size()),
- m_reducer(reducer),
- m_result_ptr(reducer.view().data()),
- m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
- FunctorTeamShmemSize<FunctorType>::value(arg_functor, 1)) {
- /*static_assert( std::is_same< typename ViewType::memory_space
- , Kokkos::HostSpace >::value
- , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
- );*/
- }
};
} // namespace Impl
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <Serial/Kokkos_Serial_Task.hpp>
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<Kokkos::Serial, typename Kokkos::Serial::memory_space>;
+
+}
+} // namespace Kokkos
+
+#else
+void KOKKOS_CORE_SRC_IMPL_SERIAL_TASK_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_SERIAL_TASK_HPP
#define KOKKOS_IMPL_SERIAL_TASK_HPP
#include <Kokkos_TaskScheduler_fwd.hpp>
-#include <impl/Kokkos_TaskQueue.hpp>
-#include <Kokkos_Serial.hpp>
+#include <Serial/Kokkos_Serial.hpp>
#include <impl/Kokkos_HostThreadTeam.hpp>
+#include <impl/Kokkos_TaskQueue.hpp>
+#include <impl/Kokkos_TaskTeamMember.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
template <class Scheduler>
class TaskQueueSpecializationConstrained<
- Scheduler,
- std::enable_if_t<std::is_same<typename Scheduler::execution_space,
- Kokkos::Serial>::value>> {
+ Scheduler, std::enable_if_t<std::is_same_v<
+ typename Scheduler::execution_space, Kokkos::Serial>>> {
public:
// Note: Scheduler may be an incomplete type at class scope (but not inside
// of the methods, obviously)
using task_base_type = TaskBase;
using queue_type = typename scheduler_type::queue_type;
- task_base_type* const end = (task_base_type*)task_base_type::EndTag;
+ auto* const end = reinterpret_cast<task_base_type*>(task_base_type::EndTag);
execution_space serial_execution_space;
auto& data = serial_execution_space.impl_internal_space_instance()
using task_base_type = TaskBase;
using queue_type = typename scheduler_type::queue_type;
- task_base_type* const end = (task_base_type*)task_base_type::EndTag;
+ auto* const end = reinterpret_cast<task_base_type*>(task_base_type::EndTag);
execution_space serial_execution_space;
} // namespace Impl
} // namespace Kokkos
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_SERIAL_UNIQUE_TOKEN_HPP
#define KOKKOS_SERIAL_UNIQUE_TOKEN_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP
+#define KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+ Kokkos::Serial> {
+ private:
+ using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+
+ Policy m_policy;
+ FunctorType m_functor;
+
+ template <class TagType>
+ std::enable_if_t<std::is_void_v<TagType>> exec_one(
+ const std::int32_t w) const noexcept {
+ m_functor(w);
+ }
+
+ template <class TagType>
+ std::enable_if_t<!std::is_void_v<TagType>> exec_one(
+ const std::int32_t w) const noexcept {
+ const TagType t{};
+ m_functor(t, w);
+ }
+
+ public:
+ inline void execute() const noexcept {
+ // Spin until COMPLETED_TOKEN.
+ // END_TOKEN indicates no work is currently available.
+
+ for (std::int32_t w = Policy::END_TOKEN;
+ Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+ if (Policy::END_TOKEN != w) {
+ exec_one<typename Policy::work_tag>(w);
+ m_policy.completed_work(w);
+ }
+ }
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* #define KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SERIAL_ZEROMEMSET_HPP
+#define KOKKOS_SERIAL_ZEROMEMSET_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_ZeroMemset_fwd.hpp>
+#include <Serial/Kokkos_Serial.hpp>
+
+#include <type_traits>
+#include <cstring>
+
+namespace Kokkos {
+namespace Impl {
+
+// We only need to provide a specialization for Serial if there is a host
+// parallel execution space since the specialization for
+// DefaultHostExecutionSpace is defined elsewhere.
+struct DummyExecutionSpace;
+template <>
+struct ZeroMemset<
+ std::conditional_t<!std::is_same_v<Serial, DefaultHostExecutionSpace>,
+ Serial, DummyExecutionSpace>> {
+ ZeroMemset(const Serial&, void* dst, size_t cnt) { std::memset(dst, 0, cnt); }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // !defined(KOKKOS_SERIAL_ZEROMEMSET_HPP)
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
#ifndef KOKKOS_THREADS_HPP
#define KOKKOS_THREADS_HPP
/*--------------------------------------------------------------------------*/
-namespace Kokkos {
-namespace Impl {
-class ThreadsExec;
-enum class fence_is_static { yes, no };
-} // namespace Impl
-} // namespace Kokkos
-
-/*--------------------------------------------------------------------------*/
-
namespace Kokkos {
/** \brief Execution space for a pool of C++11 threads on a CPU. */
/// \brief True if and only if this method is being called in a
/// thread-parallel function.
- static int in_parallel();
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ KOKKOS_DEPRECATED static int in_parallel();
+#endif
/// \brief Print configuration information to the given output stream.
void print_configuration(std::ostream& os, bool verbose = false) const;
"Kokkos::Threads::fence: Unnamed Instance Fence") const;
/** \brief Return the maximum amount of concurrency. */
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
static int concurrency();
+#else
+ int concurrency() const;
+#endif
/// \brief Free any resources being consumed by the device.
///
static const char* name();
//@}
//----------------------------------------
+ private:
+ friend bool operator==(Threads const&, Threads const&) { return true; }
+ friend bool operator!=(Threads const&, Threads const&) { return false; }
};
namespace Tools {
} // namespace Impl
} // namespace Kokkos
-/*--------------------------------------------------------------------------*/
-
-#include <Kokkos_ExecPolicy.hpp>
-#include <Kokkos_Parallel.hpp>
-#include <Threads/Kokkos_ThreadsExec.hpp>
-#include <Threads/Kokkos_ThreadsTeam.hpp>
-#include <Threads/Kokkos_Threads_Parallel_Range.hpp>
-#include <Threads/Kokkos_Threads_Parallel_MDRange.hpp>
-#include <Threads/Kokkos_Threads_Parallel_Team.hpp>
-#include <Threads/Kokkos_Threads_UniqueToken.hpp>
-
-#include <KokkosExp_MDRangePolicy.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
#endif /* #if defined( KOKKOS_ENABLE_THREADS ) */
#endif /* #define KOKKOS_THREADS_HPP */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#include "Threads/Kokkos_Threads_Instance.hpp"
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <utility>
+#include <iostream>
+#include <sstream>
+#include <thread>
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_CPUDiscovery.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+namespace {
+
+// std::thread compatible driver.
+// Recovery from an exception would require constant intra-thread health
+// verification; which would negatively impact runtime. As such simply
+// abort the process.
+void internal_cppthread_driver() {
+ try {
+ ThreadsInternal::driver();
+ } catch (const std::exception &x) {
+ std::cerr << "Exception thrown from worker thread: " << x.what()
+ << std::endl;
+ std::cerr.flush();
+ std::abort();
+ } catch (...) {
+ std::cerr << "Exception thrown from worker thread" << std::endl;
+ std::cerr.flush();
+ std::abort();
+ }
+}
+
+ThreadsInternal s_threads_process;
+ThreadsInternal *s_threads_exec[ThreadsInternal::MAX_THREAD_COUNT] = {nullptr};
+std::thread::id s_threads_pid[ThreadsInternal::MAX_THREAD_COUNT];
+std::pair<unsigned, unsigned>
+ s_threads_coord[ThreadsInternal::MAX_THREAD_COUNT];
+
+int s_thread_pool_size[3] = {0, 0, 0};
+
+using s_current_function_type = void (*)(ThreadsInternal &, const void *);
+std::atomic<s_current_function_type> s_current_function;
+std::atomic<const void *> s_current_function_arg = nullptr;
+
+inline unsigned fan_size(const unsigned rank, const unsigned size) {
+ const unsigned rank_rev = size - (rank + 1);
+ unsigned count = 0;
+ for (unsigned n = 1; (rank_rev + n < size) && !(rank_rev & n); n <<= 1) {
+ ++count;
+ }
+ return count;
+}
+
+void wait_yield(std::atomic<ThreadState> &flag, const ThreadState value) {
+ while (value == flag) {
+ std::this_thread::yield();
+ }
+}
+
+} // namespace
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+bool ThreadsInternal::is_process() {
+ static const std::thread::id master_pid = std::this_thread::get_id();
+
+ return master_pid == std::this_thread::get_id();
+}
+
+//----------------------------------------------------------------------------
+
+void execute_function_noop(ThreadsInternal &, const void *) {}
+
+void ThreadsInternal::driver() {
+ SharedAllocationRecord<void, void>::tracking_enable();
+
+ ThreadsInternal this_thread;
+
+ while (this_thread.m_pool_state == ThreadState::Active) {
+ (*s_current_function)(this_thread, s_current_function_arg);
+
+ // Deactivate thread and wait for reactivation
+ this_thread.m_pool_state = ThreadState::Inactive;
+
+ wait_yield(this_thread.m_pool_state, ThreadState::Inactive);
+ }
+}
+
+ThreadsInternal::ThreadsInternal()
+ : m_pool_base(nullptr),
+ m_scratch(nullptr),
+ m_scratch_reduce_end(0),
+ m_scratch_thread_end(0),
+ m_pool_rank(0),
+ m_pool_size(0),
+ m_pool_fan_size(0),
+ m_pool_state(ThreadState::Terminating) {
+ if (&s_threads_process != this) {
+ // The code in the if is executed by a spawned thread not by the root
+ // thread
+ ThreadsInternal *const nil = nullptr;
+
+ // Which entry in 's_threads_exec', possibly determined from hwloc binding
+ const int entry =
+ reinterpret_cast<size_t>(s_current_function_arg.load()) <
+ size_t(s_thread_pool_size[0])
+ ? reinterpret_cast<size_t>(s_current_function_arg.load())
+ : size_t(Kokkos::hwloc::bind_this_thread(s_thread_pool_size[0],
+ s_threads_coord));
+
+ // Given a good entry set this thread in the 's_threads_exec' array
+ if (entry < s_thread_pool_size[0] &&
+ nil == atomic_compare_exchange(s_threads_exec + entry, nil, this)) {
+ m_pool_base = s_threads_exec;
+ m_pool_rank = s_thread_pool_size[0] - (entry + 1);
+ m_pool_rank_rev = s_thread_pool_size[0] - (pool_rank() + 1);
+ m_pool_size = s_thread_pool_size[0];
+ m_pool_fan_size = fan_size(m_pool_rank, m_pool_size);
+ m_pool_state = ThreadState::Active;
+
+ s_threads_pid[m_pool_rank] = std::this_thread::get_id();
+
+ // Inform spawning process that the threads_exec entry has been set.
+ s_threads_process.m_pool_state = ThreadState::Active;
+ } else {
+ // Inform spawning process that the threads_exec entry could not be set.
+ s_threads_process.m_pool_state = ThreadState::Terminating;
+ }
+ } else {
+ // Enables 'parallel_for' to execute on unitialized Threads device
+ m_pool_rank = 0;
+ m_pool_size = 1;
+ m_pool_state = ThreadState::Inactive;
+
+ s_threads_pid[m_pool_rank] = std::this_thread::get_id();
+ }
+}
+
+ThreadsInternal::~ThreadsInternal() {
+ const unsigned entry = m_pool_size - (m_pool_rank + 1);
+
+ if (m_scratch) {
+ Kokkos::kokkos_free<Kokkos::HostSpace>(m_scratch);
+ m_scratch = nullptr;
+ }
+
+ m_pool_base = nullptr;
+ m_scratch_reduce_end = 0;
+ m_scratch_thread_end = 0;
+ m_pool_rank = 0;
+ m_pool_size = 0;
+ m_pool_fan_size = 0;
+
+ m_pool_state = ThreadState::Terminating;
+
+ if (&s_threads_process != this && entry < MAX_THREAD_COUNT) {
+ ThreadsInternal *const nil = nullptr;
+
+ atomic_compare_exchange(s_threads_exec + entry, this, nil);
+
+ s_threads_process.m_pool_state = ThreadState::Terminating;
+ }
+}
+
+ThreadsInternal *ThreadsInternal::get_thread(const int init_thread_rank) {
+ ThreadsInternal *const th =
+ init_thread_rank < s_thread_pool_size[0]
+ ? s_threads_exec[s_thread_pool_size[0] - (init_thread_rank + 1)]
+ : nullptr;
+
+ if (nullptr == th || th->m_pool_rank != init_thread_rank) {
+ std::ostringstream msg;
+ msg << "Kokkos::Impl::ThreadsInternal::get_thread ERROR : "
+ << "thread " << init_thread_rank << " of " << s_thread_pool_size[0];
+ if (nullptr == th) {
+ msg << " does not exist";
+ } else {
+ msg << " has wrong thread_rank " << th->m_pool_rank;
+ }
+ Kokkos::Impl::throw_runtime_exception(msg.str());
+ }
+
+ return th;
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+void ThreadsInternal::verify_is_process(const std::string &name,
+ const bool initialized) {
+ if (!is_process()) {
+ std::string msg(name);
+ msg.append(
+ " FAILED : Called by a worker thread, can only be called by the master "
+ "process.");
+ Kokkos::Impl::throw_runtime_exception(msg);
+ }
+
+ if (initialized && 0 == s_thread_pool_size[0]) {
+ std::string msg(name);
+ msg.append(" FAILED : Threads not initialized.");
+ Kokkos::Impl::throw_runtime_exception(msg);
+ }
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+KOKKOS_DEPRECATED int ThreadsInternal::in_parallel() {
+ // A thread function is in execution and
+ // the function argument is not the special threads process argument and
+ // the master process is a worker or is not the master process.
+ return s_current_function && (&s_threads_process != s_current_function_arg) &&
+ (s_threads_process.m_pool_base || !is_process());
+}
+#endif
+void ThreadsInternal::fence() {
+ fence("Kokkos::ThreadsInternal::fence: Unnamed Instance Fence");
+}
+void ThreadsInternal::fence(const std::string &name) {
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Threads>(
+ name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1},
+ internal_fence);
+}
+
+// Wait for root thread to become inactive
+void ThreadsInternal::internal_fence() {
+ if (s_thread_pool_size[0]) {
+ // Wait for the root thread to complete:
+ Impl::spinwait_while_equal(s_threads_exec[0]->m_pool_state,
+ ThreadState::Active);
+ }
+
+ s_current_function = nullptr;
+ s_current_function_arg = nullptr;
+
+ // Make sure function and arguments are cleared before
+ // potentially re-activating threads with a subsequent launch.
+ memory_fence();
+}
+
+/** \brief Begin execution of the asynchronous functor */
+void ThreadsInternal::start(void (*func)(ThreadsInternal &, const void *),
+ const void *arg) {
+ verify_is_process("ThreadsInternal::start", true);
+
+ if (s_current_function || s_current_function_arg) {
+ Kokkos::Impl::throw_runtime_exception(
+ std::string("ThreadsInternal::start() FAILED : already executing"));
+ }
+
+ s_current_function = func;
+ s_current_function_arg = arg;
+
+ // Make sure function and arguments are written before activating threads.
+ memory_fence();
+
+ // Activate threads. The spawned threads will start working on
+ // s_current_function. The root thread is only set to active, we still need to
+ // call s_current_function.
+ for (int i = s_thread_pool_size[0]; 0 < i--;) {
+ s_threads_exec[i]->m_pool_state = ThreadState::Active;
+ }
+
+ if (s_threads_process.m_pool_size) {
+ // Master process is the root thread, run it:
+ (*func)(s_threads_process, arg);
+ s_threads_process.m_pool_state = ThreadState::Inactive;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadsInternal::execute_resize_scratch_in_serial() {
+ const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
+
+ auto deallocate_scratch_memory = [](ThreadsInternal &exec) {
+ if (exec.m_scratch) {
+ Kokkos::kokkos_free<Kokkos::HostSpace>(exec.m_scratch);
+ exec.m_scratch = nullptr;
+ }
+ };
+ if (s_threads_process.m_pool_base) {
+ for (unsigned i = s_thread_pool_size[0]; begin < i;) {
+ deallocate_scratch_memory(*s_threads_exec[--i]);
+ }
+ }
+
+ s_current_function = &first_touch_allocate_thread_private_scratch;
+ s_current_function_arg = &s_threads_process;
+
+ // Make sure function and arguments are written before activating threads.
+ memory_fence();
+
+ for (unsigned i = s_thread_pool_size[0]; begin < i;) {
+ ThreadsInternal &th = *s_threads_exec[--i];
+
+ th.m_pool_state = ThreadState::Active;
+
+ wait_yield(th.m_pool_state, ThreadState::Active);
+ }
+
+ if (s_threads_process.m_pool_base) {
+ deallocate_scratch_memory(s_threads_process);
+ s_threads_process.m_pool_state = ThreadState::Active;
+ first_touch_allocate_thread_private_scratch(s_threads_process, nullptr);
+ s_threads_process.m_pool_state = ThreadState::Inactive;
+ }
+
+ s_current_function_arg = nullptr;
+ s_current_function = nullptr;
+
+ // Make sure function and arguments are cleared before proceeding.
+ memory_fence();
+}
+
+//----------------------------------------------------------------------------
+
+void *ThreadsInternal::root_reduce_scratch() {
+ return s_threads_process.reduce_memory();
+}
+
+void ThreadsInternal::first_touch_allocate_thread_private_scratch(
+ ThreadsInternal &exec, const void *) {
+ exec.m_scratch_reduce_end = s_threads_process.m_scratch_reduce_end;
+ exec.m_scratch_thread_end = s_threads_process.m_scratch_thread_end;
+
+ if (s_threads_process.m_scratch_thread_end) {
+ // Allocate tracked memory:
+ {
+ exec.m_scratch = Kokkos::kokkos_malloc<Kokkos::HostSpace>(
+ "Kokkos::thread_scratch", s_threads_process.m_scratch_thread_end);
+ }
+
+ unsigned *ptr = reinterpret_cast<unsigned *>(exec.m_scratch);
+
+ unsigned *const end =
+ ptr + s_threads_process.m_scratch_thread_end / sizeof(unsigned);
+
+ // touch on this thread
+ while (ptr < end) *ptr++ = 0;
+ }
+}
+
+void *ThreadsInternal::resize_scratch(size_t reduce_size, size_t thread_size) {
+ enum { ALIGN_MASK = Kokkos::Impl::MEMORY_ALIGNMENT - 1 };
+
+ fence();
+
+ const size_t old_reduce_size = s_threads_process.m_scratch_reduce_end;
+ const size_t old_thread_size = s_threads_process.m_scratch_thread_end -
+ s_threads_process.m_scratch_reduce_end;
+
+ reduce_size = (reduce_size + ALIGN_MASK) & ~ALIGN_MASK;
+ thread_size = (thread_size + ALIGN_MASK) & ~ALIGN_MASK;
+
+ // Increase size or deallocate completely.
+
+ if ((old_reduce_size < reduce_size) || (old_thread_size < thread_size) ||
+ ((reduce_size == 0 && thread_size == 0) &&
+ (old_reduce_size != 0 || old_thread_size != 0))) {
+ verify_is_process("ThreadsInternal::resize_scratch", true);
+
+ s_threads_process.m_scratch_reduce_end = reduce_size;
+ s_threads_process.m_scratch_thread_end = reduce_size + thread_size;
+
+ execute_resize_scratch_in_serial();
+
+ s_threads_process.m_scratch = s_threads_exec[0]->m_scratch;
+ }
+
+ return s_threads_process.m_scratch;
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadsInternal::print_configuration(std::ostream &s, const bool detail) {
+ verify_is_process("ThreadsInternal::print_configuration", false);
+
+ fence();
+
+ s << "Kokkos::Threads";
+
+#if defined(KOKKOS_ENABLE_THREADS)
+ s << " KOKKOS_ENABLE_THREADS";
+#endif
+#if defined(KOKKOS_ENABLE_HWLOC)
+ const unsigned numa_count = Kokkos::hwloc::get_available_numa_count();
+ const unsigned cores_per_numa = Kokkos::hwloc::get_available_cores_per_numa();
+ const unsigned threads_per_core =
+ Kokkos::hwloc::get_available_threads_per_core();
+
+ s << " hwloc[" << numa_count << "x" << cores_per_numa << "x"
+ << threads_per_core << "]";
+#endif
+
+ if (s_thread_pool_size[0]) {
+ s << " threads[" << s_thread_pool_size[0] << "]"
+ << " threads_per_numa[" << s_thread_pool_size[1] << "]"
+ << " threads_per_core[" << s_thread_pool_size[2] << "]";
+ if (nullptr == s_threads_process.m_pool_base) {
+ s << " Asynchronous";
+ }
+ s << std::endl;
+
+ if (detail) {
+ for (int i = 0; i < s_thread_pool_size[0]; ++i) {
+ ThreadsInternal *const th = s_threads_exec[i];
+
+ if (th) {
+ const int rank_rev = th->m_pool_size - (th->m_pool_rank + 1);
+
+ s << " Thread[ " << th->m_pool_rank << " ]";
+
+ s << " Fan{";
+ for (int j = 0; j < th->m_pool_fan_size; ++j) {
+ ThreadsInternal *const thfan = th->m_pool_base[rank_rev + (1 << j)];
+ s << " [ " << thfan->m_pool_rank << " ]";
+ }
+ s << " }";
+
+ if (th == &s_threads_process) {
+ s << " is_process";
+ }
+ }
+ s << std::endl;
+ }
+ }
+ } else {
+ s << " not initialized" << std::endl;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+int ThreadsInternal::is_initialized() { return nullptr != s_threads_exec[0]; }
+
+void ThreadsInternal::initialize(int thread_count_arg) {
+ unsigned thread_count = thread_count_arg == -1 ? 0 : thread_count_arg;
+
+ const bool is_initialized = 0 != s_thread_pool_size[0];
+
+ unsigned thread_spawn_failed = 0;
+
+ for (int i = 0; i < ThreadsInternal::MAX_THREAD_COUNT; i++)
+ s_threads_exec[i] = nullptr;
+
+ if (!is_initialized) {
+ // If thread_count is zero then it will be given default values based upon
+ // hwloc detection.
+ const bool hwloc_avail = Kokkos::hwloc::available();
+ const bool hwloc_can_bind =
+ hwloc_avail && Kokkos::hwloc::can_bind_threads();
+
+ if (thread_count == 0) {
+ thread_count = hwloc_avail
+ ? Kokkos::hwloc::get_available_numa_count() *
+ Kokkos::hwloc::get_available_cores_per_numa() *
+ Kokkos::hwloc::get_available_threads_per_core()
+ : 1;
+ }
+
+ const bool allow_asynchronous_threadpool = false;
+ unsigned use_numa_count = 0;
+ unsigned use_cores_per_numa = 0;
+ hwloc::thread_mapping("Kokkos::Threads::initialize",
+ allow_asynchronous_threadpool, thread_count,
+ use_numa_count, use_cores_per_numa, s_threads_coord);
+
+ const std::pair<unsigned, unsigned> proc_coord = s_threads_coord[0];
+
+ // Synchronous with s_threads_coord[0] as the process core
+ // Claim entry #0 for binding the process core.
+ s_threads_coord[0] = std::pair<unsigned, unsigned>(~0u, ~0u);
+
+ s_thread_pool_size[0] = thread_count;
+ s_thread_pool_size[1] = s_thread_pool_size[0] / use_numa_count;
+ s_thread_pool_size[2] = s_thread_pool_size[1] / use_cores_per_numa;
+ s_current_function =
+ &execute_function_noop; // Initialization work function
+
+ for (unsigned ith = 1; ith < thread_count; ++ith) {
+ s_threads_process.m_pool_state = ThreadState::Inactive;
+
+ // If hwloc available then spawned thread will
+ // choose its own entry in 's_threads_coord'
+ // otherwise specify the entry.
+ s_current_function_arg =
+ reinterpret_cast<void *>(hwloc_can_bind ? ~0u : ith);
+
+ // Make sure all outstanding memory writes are complete
+ // before spawning the new thread.
+ memory_fence();
+
+ // Spawn thread executing the 'driver()' function.
+ // Wait until spawned thread has attempted to initialize.
+ // If spawning and initialization is successful then
+ // an entry in 's_threads_exec' will be assigned.
+ std::thread t(internal_cppthread_driver);
+ t.detach();
+ wait_yield(s_threads_process.m_pool_state, ThreadState::Inactive);
+ if (s_threads_process.m_pool_state == ThreadState::Terminating) break;
+ }
+
+ // Wait for all spawned threads to deactivate before zeroing the function.
+
+ for (unsigned ith = 1; ith < thread_count; ++ith) {
+ // Try to protect against cache coherency failure by casting to volatile.
+ ThreadsInternal *const th =
+ ((ThreadsInternal *volatile *)s_threads_exec)[ith];
+ if (th) {
+ wait_yield(th->m_pool_state, ThreadState::Active);
+ } else {
+ ++thread_spawn_failed;
+ }
+ }
+
+ s_current_function = nullptr;
+ s_current_function_arg = nullptr;
+ s_threads_process.m_pool_state = ThreadState::Inactive;
+
+ memory_fence();
+
+ if (!thread_spawn_failed) {
+ // Bind process to the core on which it was located before spawning
+ // occurred
+ if (hwloc_can_bind) {
+ Kokkos::hwloc::bind_this_thread(proc_coord);
+ }
+
+ s_threads_exec[0] = &s_threads_process;
+ s_threads_process.m_pool_base = s_threads_exec;
+ s_threads_process.m_pool_rank =
+ thread_count - 1; // Reversed for scan-compatible reductions
+ s_threads_process.m_pool_size = thread_count;
+ s_threads_process.m_pool_fan_size = fan_size(
+ s_threads_process.m_pool_rank, s_threads_process.m_pool_size);
+ s_threads_pid[s_threads_process.m_pool_rank] = std::this_thread::get_id();
+
+ // Initial allocations:
+ ThreadsInternal::resize_scratch(1024, 1024);
+ } else {
+ s_thread_pool_size[0] = 0;
+ s_thread_pool_size[1] = 0;
+ s_thread_pool_size[2] = 0;
+ }
+ }
+
+ if (is_initialized || thread_spawn_failed) {
+ std::ostringstream msg;
+
+ msg << "Kokkos::Threads::initialize ERROR";
+
+ if (is_initialized) {
+ msg << " : already initialized";
+ }
+ if (thread_spawn_failed) {
+ msg << " : failed to spawn " << thread_spawn_failed << " threads";
+ }
+
+ Kokkos::Impl::throw_runtime_exception(msg.str());
+ }
+
+ // Check for over-subscription
+ auto const reported_ranks = mpi_ranks_per_node();
+ auto const mpi_local_size = reported_ranks < 0 ? 1 : reported_ranks;
+ int const procs_per_node = std::thread::hardware_concurrency();
+ if (Kokkos::show_warnings() &&
+ (mpi_local_size * long(thread_count) > procs_per_node)) {
+ std::cerr << "Kokkos::Threads::initialize WARNING: You are likely "
+ "oversubscribing your CPU cores."
+ << std::endl;
+ std::cerr << " Detected: "
+ << procs_per_node << " cores per node." << std::endl;
+ std::cerr << " Detected: "
+ << mpi_local_size << " MPI_ranks per node." << std::endl;
+ std::cerr << " Requested: "
+ << thread_count << " threads per process." << std::endl;
+ }
+
+ Impl::SharedAllocationRecord<void, void>::tracking_enable();
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadsInternal::finalize() {
+ verify_is_process("ThreadsInternal::finalize", false);
+
+ fence();
+
+ resize_scratch(0, 0);
+
+ const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
+
+ for (unsigned i = s_thread_pool_size[0]; begin < i--;) {
+ if (s_threads_exec[i]) {
+ s_threads_exec[i]->m_pool_state = ThreadState::Terminating;
+
+ wait_yield(s_threads_process.m_pool_state, ThreadState::Inactive);
+
+ s_threads_process.m_pool_state = ThreadState::Inactive;
+ }
+
+ s_threads_pid[i] = std::thread::id();
+ }
+
+ if (s_threads_process.m_pool_base) {
+ (&s_threads_process)->~ThreadsInternal();
+ s_threads_exec[0] = nullptr;
+ }
+
+ if (Kokkos::hwloc::can_bind_threads()) {
+ Kokkos::hwloc::unbind_this_thread();
+ }
+
+ s_thread_pool_size[0] = 0;
+ s_thread_pool_size[1] = 0;
+ s_thread_pool_size[2] = 0;
+
+ // Reset master thread to run solo.
+ s_threads_process.m_pool_base = nullptr;
+ s_threads_process.m_pool_rank = 0;
+ s_threads_process.m_pool_size = 1;
+ s_threads_process.m_pool_fan_size = 0;
+ s_threads_process.m_pool_state = ThreadState::Inactive;
+}
+
+//----------------------------------------------------------------------------
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+int Threads::concurrency() { return impl_thread_pool_size(0); }
+#else
+int Threads::concurrency() const { return impl_thread_pool_size(0); }
+#endif
+
+void Threads::fence(const std::string &name) const {
+ Impl::ThreadsInternal::fence(name);
+}
+
+Threads &Threads::impl_instance(int) {
+ static Threads t;
+ return t;
+}
+
+int Threads::impl_thread_pool_rank_host() {
+ const std::thread::id pid = std::this_thread::get_id();
+ int i = 0;
+ while ((i < Impl::s_thread_pool_size[0]) && (pid != Impl::s_threads_pid[i])) {
+ ++i;
+ }
+ return i;
+}
+
+int Threads::impl_thread_pool_size(int depth) {
+ return Impl::s_thread_pool_size[depth];
+}
+
+const char *Threads::name() { return "Threads"; }
+
+namespace Impl {
+
+int g_threads_space_factory_initialized =
+ initialize_space_factory<Threads>("050_Threads");
+
+} // namespace Impl
+
+} /* namespace Kokkos */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKOS_THREADSEXEC_HPP
-#define KOKKOS_THREADSEXEC_HPP
+#ifndef KOKKOS_THREADS_INSTANCE_HPP
+#define KOKKOS_THREADS_INSTANCE_HPP
#include <Kokkos_Macros.hpp>
#include <cstdio>
-
+#include <ostream>
#include <utility>
-#include <impl/Kokkos_Spinwait.hpp>
#include <Kokkos_Atomic.hpp>
+#include <Kokkos_Pair.hpp>
#include <impl/Kokkos_ConcurrentBitset.hpp>
+#include <Threads/Kokkos_Threads.hpp>
+#include <Threads/Kokkos_Threads_Spinwait.hpp>
+#include <Threads/Kokkos_Threads_State.hpp>
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
-class ThreadsExec {
+class ThreadsInternal {
public:
// Fan array has log_2(NT) reduction threads plus 2 scan threads
// Currently limited to 16k threads.
- enum { MAX_FAN_COUNT = 16 };
- enum { MAX_THREAD_COUNT = 1 << (MAX_FAN_COUNT - 2) };
- enum { VECTOR_LENGTH = 8 };
-
- /** \brief States of a worker thread */
- enum {
- Terminating ///< Termination in progress
- ,
- Inactive ///< Exists, waiting for work
- ,
- Active ///< Exists, performing work
- ,
- Rendezvous ///< Exists, waiting in a barrier or reduce
-
- ,
- ScanCompleted,
- ScanAvailable,
- ReductionAvailable
- };
+ static constexpr int MAX_FAN_COUNT = 16;
+ static constexpr int MAX_THREAD_COUNT = 1 << (MAX_FAN_COUNT - 2);
+ static constexpr int VECTOR_LENGTH = 8;
private:
friend class Kokkos::Threads;
// the threads that need them.
// For a simple reduction the thread location is arbitrary.
- ThreadsExec *const *m_pool_base; ///< Base for pool fan-in
+ ThreadsInternal *const *m_pool_base; ///< Base for pool fan-in
void *m_scratch;
int m_scratch_reduce_end;
size_t m_scratch_thread_end;
- int m_numa_rank;
- int m_numa_core_rank;
int m_pool_rank;
int m_pool_rank_rev;
int m_pool_size;
int m_pool_fan_size;
- int volatile m_pool_state; ///< State for global synchronizations
+ std::atomic<ThreadState> m_pool_state; ///< State for global synchronizations
// Members for dynamic scheduling
// Which thread am I stealing from currently
int m_current_steal_target;
// This thread's owned work_range
- Kokkos::pair<long, long> m_work_range __attribute__((aligned(16)));
+ alignas(16) Kokkos::pair<long, long> m_work_range;
// Team Offset if one thread determines work_range for others
long m_team_work_index;
static void global_lock();
static void global_unlock();
- static void spawn();
- static void first_touch_allocate_thread_private_scratch(ThreadsExec &,
+ static void first_touch_allocate_thread_private_scratch(ThreadsInternal &,
const void *);
- static void execute_sleep(ThreadsExec &, const void *);
- ThreadsExec(const ThreadsExec &);
- ThreadsExec &operator=(const ThreadsExec &);
+ ThreadsInternal(const ThreadsInternal &);
+ ThreadsInternal &operator=(const ThreadsInternal &);
static void execute_resize_scratch_in_serial();
public:
KOKKOS_INLINE_FUNCTION int pool_size() const { return m_pool_size; }
KOKKOS_INLINE_FUNCTION int pool_rank() const { return m_pool_rank; }
- KOKKOS_INLINE_FUNCTION int numa_rank() const { return m_numa_rank; }
- KOKKOS_INLINE_FUNCTION int numa_core_rank() const { return m_numa_core_rank; }
inline long team_work_index() const { return m_team_work_index; }
- static int get_thread_count();
- static ThreadsExec *get_thread(const int init_thread_rank);
+ static ThreadsInternal *get_thread(const int init_thread_rank);
inline void *reduce_memory() const { return m_scratch; }
KOKKOS_INLINE_FUNCTION void *scratch_memory() const {
return reinterpret_cast<unsigned char *>(m_scratch) + m_scratch_reduce_end;
}
- KOKKOS_INLINE_FUNCTION int volatile &state() { return m_pool_state; }
- KOKKOS_INLINE_FUNCTION ThreadsExec *const *pool_base() const {
+ KOKKOS_INLINE_FUNCTION auto &state() { return m_pool_state; }
+ KOKKOS_INLINE_FUNCTION ThreadsInternal *const *pool_base() const {
return m_pool_base;
}
static void driver(void);
- ~ThreadsExec();
- ThreadsExec();
+ ~ThreadsInternal();
+ ThreadsInternal();
static void *resize_scratch(size_t reduce_size, size_t thread_size);
static void finalize();
- /* Given a requested team size, return valid team size */
- static unsigned team_size_valid(unsigned);
-
static void print_configuration(std::ostream &, const bool detail = false);
- //------------------------------------
-
- static void wait_yield(volatile int &, const int);
-
//------------------------------------
// All-thread functions:
// Fan-in reduction with highest ranking thread as the root
for (int i = 0; i < m_pool_fan_size; ++i) {
// Wait: Active -> Rendezvous
- Impl::spinwait_while_equal<int>(
- m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+ spinwait_while_equal(m_pool_base[rev_rank + (1 << i)]->m_pool_state,
+ ThreadState::Active);
}
if (rev_rank) {
- m_pool_state = ThreadsExec::Rendezvous;
+ m_pool_state = ThreadState::Rendezvous;
// Wait: Rendezvous -> Active
- Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::Rendezvous);
+ spinwait_while_equal(m_pool_state, ThreadState::Rendezvous);
} else {
// Root thread does the reduction and broadcast
memory_fence();
for (int rank = 0; rank < m_pool_size; ++rank) {
- get_thread(rank)->m_pool_state = ThreadsExec::Active;
+ get_thread(rank)->m_pool_state = ThreadState::Active;
}
}
// Fan-in reduction with highest ranking thread as the root
for (int i = 0; i < m_pool_fan_size; ++i) {
// Wait: Active -> Rendezvous
- Impl::spinwait_while_equal<int>(
- m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+ spinwait_while_equal(m_pool_base[rev_rank + (1 << i)]->m_pool_state,
+ ThreadState::Active);
}
if (rev_rank) {
- m_pool_state = ThreadsExec::Rendezvous;
+ m_pool_state = ThreadState::Rendezvous;
// Wait: Rendezvous -> Active
- Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::Rendezvous);
+ spinwait_while_equal(m_pool_state, ThreadState::Rendezvous);
} else {
// Root thread does the reduction and broadcast
memory_fence();
for (int rank = 0; rank < m_pool_size; ++rank) {
- get_thread(rank)->m_pool_state = ThreadsExec::Active;
+ get_thread(rank)->m_pool_state = ThreadState::Active;
}
}
}
const int rev_rank = m_pool_size - (m_pool_rank + 1);
for (int i = 0; i < m_pool_fan_size; ++i) {
- ThreadsExec &fan = *m_pool_base[rev_rank + (1 << i)];
+ ThreadsInternal &fan = *m_pool_base[rev_rank + (1 << i)];
- Impl::spinwait_while_equal<int>(fan.m_pool_state, ThreadsExec::Active);
+ spinwait_while_equal(fan.m_pool_state, ThreadState::Active);
f.join(
reinterpret_cast<typename FunctorType::value_type *>(reduce_memory()),
// to inactive triggers another thread to exit a spinwait
// and read the 'reduce_memory'.
// Must 'memory_fence()' to guarantee that storing the update to
- // 'reduce_memory()' will complete before storing the the update to
+ // 'reduce_memory()' will complete before storing the update to
// 'm_pool_state'.
memory_fence();
const int rev_rank = m_pool_size - (m_pool_rank + 1);
for (int i = 0; i < m_pool_fan_size; ++i) {
- Impl::spinwait_while_equal<int>(
- m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+ spinwait_while_equal(m_pool_base[rev_rank + (1 << i)]->m_pool_state,
+ ThreadState::Active);
}
}
//--------------------------------
// Fan-in reduction with highest ranking thread as the root
for (int i = 0; i < m_pool_fan_size; ++i) {
- ThreadsExec &fan = *m_pool_base[rev_rank + (1 << i)];
+ ThreadsInternal &fan = *m_pool_base[rev_rank + (1 << i)];
// Wait: Active -> ReductionAvailable (or ScanAvailable)
- Impl::spinwait_while_equal<int>(fan.m_pool_state, ThreadsExec::Active);
+ spinwait_while_equal(fan.m_pool_state, ThreadState::Active);
f.join(work_value, fan.reduce_memory());
}
if (rev_rank) {
// Set: Active -> ReductionAvailable
- m_pool_state = ThreadsExec::ReductionAvailable;
+ m_pool_state = ThreadState::ReductionAvailable;
// Wait for contributing threads' scan value to be available.
if ((1 << m_pool_fan_size) < (m_pool_rank + 1)) {
- ThreadsExec &th = *m_pool_base[rev_rank + (1 << m_pool_fan_size)];
+ ThreadsInternal &th = *m_pool_base[rev_rank + (1 << m_pool_fan_size)];
// Wait: Active -> ReductionAvailable
// Wait: ReductionAvailable -> ScanAvailable
- Impl::spinwait_while_equal<int>(th.m_pool_state, ThreadsExec::Active);
- Impl::spinwait_while_equal<int>(th.m_pool_state,
- ThreadsExec::ReductionAvailable);
+ spinwait_while_equal(th.m_pool_state, ThreadState::Active);
+ spinwait_while_equal(th.m_pool_state, ThreadState::ReductionAvailable);
f.join(work_value + count, ((scalar_type *)th.reduce_memory()) + count);
}
// This thread has completed inclusive scan
// Set: ReductionAvailable -> ScanAvailable
- m_pool_state = ThreadsExec::ScanAvailable;
+ m_pool_state = ThreadState::ScanAvailable;
// Wait for all threads to complete inclusive scan
// Wait: ScanAvailable -> Rendezvous
- Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::ScanAvailable);
+ spinwait_while_equal(m_pool_state, ThreadState::ScanAvailable);
}
//--------------------------------
for (int i = 0; i < m_pool_fan_size; ++i) {
- ThreadsExec &fan = *m_pool_base[rev_rank + (1 << i)];
+ ThreadsInternal &fan = *m_pool_base[rev_rank + (1 << i)];
// Wait: ReductionAvailable -> ScanAvailable
- Impl::spinwait_while_equal<int>(fan.m_pool_state,
- ThreadsExec::ReductionAvailable);
+ spinwait_while_equal(fan.m_pool_state, ThreadState::ReductionAvailable);
// Set: ScanAvailable -> Rendezvous
- fan.m_pool_state = ThreadsExec::Rendezvous;
+ fan.m_pool_state = ThreadState::Rendezvous;
}
// All threads have completed the inclusive scan.
if ((rev_rank + 1) < m_pool_size) {
// Exclusive scan: copy the previous thread's inclusive scan value
- ThreadsExec &th = *m_pool_base[rev_rank + 1]; // Not the root thread
+ ThreadsInternal &th = *m_pool_base[rev_rank + 1]; // Not the root thread
const scalar_type *const src_value =
((scalar_type *)th.reduce_memory()) + count;
// Wait for all threads to copy previous thread's inclusive scan value
// Wait for all threads: Rendezvous -> ScanCompleted
for (int i = 0; i < m_pool_fan_size; ++i) {
- Impl::spinwait_while_equal<int>(
- m_pool_base[rev_rank + (1 << i)]->m_pool_state,
- ThreadsExec::Rendezvous);
+ spinwait_while_equal(m_pool_base[rev_rank + (1 << i)]->m_pool_state,
+ ThreadState::Rendezvous);
}
if (rev_rank) {
// Set: ScanAvailable -> ScanCompleted
- m_pool_state = ThreadsExec::ScanCompleted;
+ m_pool_state = ThreadState::ScanCompleted;
// Wait: ScanCompleted -> Active
- Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::ScanCompleted);
+ spinwait_while_equal(m_pool_state, ThreadState::ScanCompleted);
}
// Set: ScanCompleted -> Active
for (int i = 0; i < m_pool_fan_size; ++i) {
- m_pool_base[rev_rank + (1 << i)]->m_pool_state = ThreadsExec::Active;
+ m_pool_base[rev_rank + (1 << i)]->m_pool_state = ThreadState::Active;
}
}
// Fan-in reduction with highest ranking thread as the root
for (int i = 0; i < m_pool_fan_size; ++i) {
// Wait: Active -> Rendezvous
- Impl::spinwait_while_equal<int>(
- m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+ spinwait_while_equal(m_pool_base[rev_rank + (1 << i)]->m_pool_state,
+ ThreadState::Active);
}
for (unsigned i = 0; i < count; ++i) {
}
if (rev_rank) {
- m_pool_state = ThreadsExec::Rendezvous;
+ m_pool_state = ThreadState::Rendezvous;
// Wait: Rendezvous -> Active
- Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::Rendezvous);
+ spinwait_while_equal(m_pool_state, ThreadState::Rendezvous);
} else {
// Root thread does the thread-scan before releasing threads
}
for (int i = 0; i < m_pool_fan_size; ++i) {
- m_pool_base[rev_rank + (1 << i)]->m_pool_state = ThreadsExec::Active;
+ m_pool_base[rev_rank + (1 << i)]->m_pool_state = ThreadState::Active;
}
}
* complete and release the Threads device.
* Acquire the Threads device and start this functor.
*/
- static void start(void (*)(ThreadsExec &, const void *), const void *);
+ static void start(void (*)(ThreadsInternal &, const void *), const void *);
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
static int in_parallel();
+#endif
static void fence();
static void fence(const std::string &);
- static void internal_fence(
- Impl::fence_is_static is_static = Impl::fence_is_static::yes);
- static void internal_fence(
- const std::string &,
- Impl::fence_is_static is_static = Impl::fence_is_static::yes);
- static bool sleep();
- static bool wake();
+ static void internal_fence();
/* Dynamic Scheduling related functionality */
// Initialize the work range for this thread
namespace Kokkos {
-inline int Threads::in_parallel() { return Impl::ThreadsExec::in_parallel(); }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+KOKKOS_DEPRECATED inline int Threads::in_parallel() {
+ return Impl::ThreadsInternal::in_parallel();
+}
+#endif
inline int Threads::impl_is_initialized() {
- return Impl::ThreadsExec::is_initialized();
+ return Impl::ThreadsInternal::is_initialized();
}
inline void Threads::impl_initialize(InitializationSettings const &settings) {
- Impl::ThreadsExec::initialize(
+ Impl::ThreadsInternal::initialize(
settings.has_num_threads() ? settings.get_num_threads() : -1);
}
-inline void Threads::impl_finalize() { Impl::ThreadsExec::finalize(); }
+inline void Threads::impl_finalize() { Impl::ThreadsInternal::finalize(); }
inline void Threads::print_configuration(std::ostream &os, bool verbose) const {
os << "Host Parallel Execution Space:\n";
os << " KOKKOS_ENABLE_THREADS: yes\n";
os << "\nThreads Runtime Configuration:\n";
- Impl::ThreadsExec::print_configuration(os, verbose);
+ Impl::ThreadsInternal::print_configuration(os, verbose);
}
inline void Threads::impl_static_fence(const std::string &name) {
- Impl::ThreadsExec::internal_fence(name, Impl::fence_is_static::yes);
+ Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Threads>(
+ name,
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+ GlobalDeviceSynchronization,
+ Impl::ThreadsInternal::internal_fence);
}
} /* namespace Kokkos */
-#endif /* #define KOKKOS_THREADSEXEC_HPP */
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_MDRANGEPOLICY_HPP_
+#define KOKKOS_THREADS_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// Settings for TeamMDRangePolicy
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct ThreadAndVectorNestLevel<Rank, Threads, ThreadAndVector>
+ : HostBasedNestLevel<Rank, ThreadAndVector> {};
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_PARALLEL_FOR_MDRANGE_HPP
+#define KOKKOS_THREADS_PARALLEL_FOR_MDRANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+ Kokkos::Threads> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = typename MDRangePolicy::impl_range_policy;
+
+ using WorkTag = typename MDRangePolicy::work_tag;
+
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+
+ using iterate_type = typename Kokkos::Impl::HostIterateTile<
+ MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
+
+ const iterate_type m_iter;
+
+ inline void exec_range(const Member ibeg, const Member iend) const {
+ for (Member i = ibeg; i < iend; ++i) {
+ m_iter(i);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ exec_schedule<typename Policy::schedule_type::type>(instance, arg);
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Static>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelFor &self = *((const ParallelFor *)arg);
+
+ auto const num_tiles = self.m_iter.m_rp.m_num_tiles;
+ WorkRange range(Policy(0, num_tiles).set_chunk_size(1),
+ instance.pool_rank(), instance.pool_size());
+
+ self.exec_range(range.begin(), range.end());
+
+ instance.fan_in();
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Dynamic>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelFor &self = *((const ParallelFor *)arg);
+
+ auto const num_tiles = self.m_iter.m_rp.m_num_tiles;
+ WorkRange range(Policy(0, num_tiles).set_chunk_size(1),
+ instance.pool_rank(), instance.pool_size());
+
+ instance.set_work_range(range.begin(), range.end(), 1);
+ instance.reset_steal_target();
+ instance.barrier();
+
+ long work_index = instance.get_work_index();
+
+ while (work_index != -1) {
+ const Member begin = static_cast<Member>(work_index);
+ const Member end = begin + 1 < num_tiles ? begin + 1 : num_tiles;
+
+ self.exec_range(begin, end);
+ work_index = instance.get_work_index();
+ }
+
+ instance.fan_in();
+ }
+
+ public:
+ inline void execute() const {
+ ThreadsInternal::start(&ParallelFor::exec, this);
+ ThreadsInternal::fence();
+ }
+
+ ParallelFor(const FunctorType &arg_functor, const MDRangePolicy &arg_policy)
+ : m_iter(arg_policy, arg_functor) {}
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy &, const Functor &) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_PARALLEL_FOR_RANGE_HPP
+#define KOKKOS_THREADS_PARALLEL_FOR_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Threads> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkTag = typename Policy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member ibeg, const Member iend) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(i);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member ibeg, const Member iend) {
+ const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(t, i);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ exec_schedule<typename Policy::schedule_type::type>(instance, arg);
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Static>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelFor &self = *((const ParallelFor *)arg);
+
+ WorkRange range(self.m_policy, instance.pool_rank(), instance.pool_size());
+
+ ParallelFor::template exec_range<WorkTag>(self.m_functor, range.begin(),
+ range.end());
+
+ instance.fan_in();
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Dynamic>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelFor &self = *((const ParallelFor *)arg);
+
+ WorkRange range(self.m_policy, instance.pool_rank(), instance.pool_size());
+
+ instance.set_work_range(range.begin() - self.m_policy.begin(),
+ range.end() - self.m_policy.begin(),
+ self.m_policy.chunk_size());
+ instance.reset_steal_target();
+ instance.barrier();
+
+ long work_index = instance.get_work_index();
+
+ while (work_index != -1) {
+ const Member begin =
+ static_cast<Member>(work_index) * self.m_policy.chunk_size() +
+ self.m_policy.begin();
+ const Member end =
+ begin + self.m_policy.chunk_size() < self.m_policy.end()
+ ? begin + self.m_policy.chunk_size()
+ : self.m_policy.end();
+ ParallelFor::template exec_range<WorkTag>(self.m_functor, begin, end);
+ work_index = instance.get_work_index();
+ }
+
+ instance.fan_in();
+ }
+
+ public:
+ inline void execute() const {
+ ThreadsInternal::start(&ParallelFor::exec, this);
+ ThreadsInternal::fence();
+ }
+
+ ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_PARALLEL_FOR_TEAM_HPP
+#define KOKKOS_THREADS_PARALLEL_FOR_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+ Kokkos::Threads> {
+ private:
+ using Policy =
+ Kokkos::Impl::TeamPolicyInternal<Kokkos::Threads, Properties...>;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+ const size_t m_shared;
+
+ template <class TagType, class Schedule>
+ inline static std::enable_if_t<std::is_void_v<TagType> &&
+ std::is_same_v<Schedule, Kokkos::Static>>
+ exec_team(const FunctorType &functor, Member member) {
+ for (; member.valid_static(); member.next_static()) {
+ functor(member);
+ }
+ }
+
+ template <class TagType, class Schedule>
+ inline static std::enable_if_t<!std::is_void_v<TagType> &&
+ std::is_same_v<Schedule, Kokkos::Static>>
+ exec_team(const FunctorType &functor, Member member) {
+ const TagType t{};
+ for (; member.valid_static(); member.next_static()) {
+ functor(t, member);
+ }
+ }
+
+ template <class TagType, class Schedule>
+ inline static std::enable_if_t<std::is_void_v<TagType> &&
+ std::is_same_v<Schedule, Kokkos::Dynamic>>
+ exec_team(const FunctorType &functor, Member member) {
+ for (; member.valid_dynamic(); member.next_dynamic()) {
+ functor(member);
+ }
+ }
+
+ template <class TagType, class Schedule>
+ inline static std::enable_if_t<!std::is_void_v<TagType> &&
+ std::is_same_v<Schedule, Kokkos::Dynamic>>
+ exec_team(const FunctorType &functor, Member member) {
+ const TagType t{};
+ for (; member.valid_dynamic(); member.next_dynamic()) {
+ functor(t, member);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ const ParallelFor &self = *((const ParallelFor *)arg);
+
+ ParallelFor::exec_team<WorkTag, typename Policy::schedule_type::type>(
+ self.m_functor, Member(&instance, self.m_policy, self.m_shared));
+
+ instance.barrier();
+ instance.fan_in();
+ }
+ template <typename Policy>
+ Policy fix_policy(Policy policy) {
+ if (policy.impl_vector_length() < 0) {
+ policy.impl_set_vector_length(1);
+ }
+ if (policy.team_size() < 0) {
+ int team_size = policy.team_size_recommended(m_functor, ParallelForTag{});
+ if (team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelFor<Threads, TeamPolicy> could not find a "
+ "valid execution configuration.");
+ policy.impl_set_team_size(team_size);
+ }
+ return policy;
+ }
+
+ public:
+ inline void execute() const {
+ ThreadsInternal::resize_scratch(
+ 0, Policy::member_type::team_reduce_size() + m_shared);
+
+ ThreadsInternal::start(&ParallelFor::exec, this);
+
+ ThreadsInternal::fence();
+ }
+
+ ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+ : m_functor(arg_functor),
+ m_policy(fix_policy(arg_policy)),
+ m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor, m_policy.team_size())) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_PARALLEL_REDUCE_MDRANGE_HPP
+#define KOKKOS_THREADS_PARALLEL_REDUCE_MDRANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::MDRangePolicy<Traits...>, Kokkos::Threads> {
+ private:
+ using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+ using Policy = typename MDRangePolicy::impl_range_policy;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using WorkTag = typename MDRangePolicy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using value_type = typename ReducerType::value_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ using iterate_type = typename Kokkos::Impl::HostIterateTile<
+ MDRangePolicy, CombinedFunctorReducerType, WorkTag, reference_type>;
+
+ const iterate_type m_iter;
+ const pointer_type m_result_ptr;
+
+ inline void exec_range(const Member &ibeg, const Member &iend,
+ reference_type update) const {
+ for (Member i = ibeg; i < iend; ++i) {
+ m_iter(i, update);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ exec_schedule<typename Policy::schedule_type::type>(instance, arg);
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Static>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelReduce &self = *((const ParallelReduce *)arg);
+
+ const auto num_tiles = self.m_iter.m_rp.m_num_tiles;
+ const WorkRange range(Policy(0, num_tiles).set_chunk_size(1),
+ instance.pool_rank(), instance.pool_size());
+
+ const ReducerType &reducer = self.m_iter.m_func.get_reducer();
+ self.exec_range(
+ range.begin(), range.end(),
+ reducer.init(static_cast<pointer_type>(instance.reduce_memory())));
+
+ instance.fan_in_reduce(reducer);
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Dynamic>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelReduce &self = *((const ParallelReduce *)arg);
+
+ const auto num_tiles = self.m_iter.m_rp.m_num_tiles;
+ const WorkRange range(Policy(0, num_tiles).set_chunk_size(1),
+ instance.pool_rank(), instance.pool_size());
+
+ instance.set_work_range(range.begin(), range.end(), 1);
+ instance.reset_steal_target();
+ instance.barrier();
+
+ long work_index = instance.get_work_index();
+
+ const ReducerType &reducer = self.m_iter.m_func.get_reducer();
+ reference_type update =
+ reducer.init(static_cast<pointer_type>(instance.reduce_memory()));
+ while (work_index != -1) {
+ const Member begin = static_cast<Member>(work_index);
+ const Member end = begin + 1 < num_tiles ? begin + 1 : num_tiles;
+ self.exec_range(begin, end, update);
+ work_index = instance.get_work_index();
+ }
+
+ instance.fan_in_reduce(reducer);
+ }
+
+ public:
+ inline void execute() const {
+ const ReducerType &reducer = m_iter.m_func.get_reducer();
+ ThreadsInternal::resize_scratch(reducer.value_size(), 0);
+
+ ThreadsInternal::start(&ParallelReduce::exec, this);
+
+ ThreadsInternal::fence();
+
+ if (m_result_ptr) {
+ const pointer_type data =
+ (pointer_type)ThreadsInternal::root_reduce_scratch();
+
+ const unsigned n = reducer.value_count();
+ for (unsigned i = 0; i < n; ++i) {
+ m_result_ptr[i] = data[i];
+ }
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType &arg_functor_reducer,
+ const MDRangePolicy &arg_policy,
+ const ViewType &arg_result_view)
+ : m_iter(arg_policy, arg_functor_reducer),
+ m_result_ptr(arg_result_view.data()) {
+ static_assert(Kokkos::is_view<ViewType>::value,
+ "Kokkos::Threads reduce result must be a View");
+
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::Threads reduce result must be a View accessible from "
+ "HostSpace");
+ }
+
+ template <typename Policy, typename Functor>
+ static int max_tile_size_product(const Policy &, const Functor &) {
+ /**
+ * 1024 here is just our guess for a reasonable max tile size,
+ * it isn't a hardware constraint. If people see a use for larger
+ * tile size products, we're happy to change this.
+ */
+ return 1024;
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_PARALLEL_REDUCE_RANGE_HPP
+#define KOKKOS_THREADS_PARALLEL_REDUCE_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Traits>
+class ParallelReduce<CombinedFunctorReducerType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Threads> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+
+ using WorkTag = typename Policy::work_tag;
+ using WorkRange = typename Policy::WorkRange;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member &ibeg, const Member &iend,
+ reference_type update) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(i, update);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member &ibeg, const Member &iend,
+ reference_type update) {
+ const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(t, i, update);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ exec_schedule<typename Policy::schedule_type::type>(instance, arg);
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Static>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelReduce &self = *((const ParallelReduce *)arg);
+ const WorkRange range(self.m_policy, instance.pool_rank(),
+ instance.pool_size());
+
+ const ReducerType &reducer = self.m_functor_reducer.get_reducer();
+
+ ParallelReduce::template exec_range<WorkTag>(
+ self.m_functor_reducer.get_functor(), range.begin(), range.end(),
+ reducer.init(static_cast<pointer_type>(instance.reduce_memory())));
+
+ instance.fan_in_reduce(reducer);
+ }
+
+ template <class Schedule>
+ static std::enable_if_t<std::is_same_v<Schedule, Kokkos::Dynamic>>
+ exec_schedule(ThreadsInternal &instance, const void *arg) {
+ const ParallelReduce &self = *((const ParallelReduce *)arg);
+ const WorkRange range(self.m_policy, instance.pool_rank(),
+ instance.pool_size());
+
+ instance.set_work_range(range.begin() - self.m_policy.begin(),
+ range.end() - self.m_policy.begin(),
+ self.m_policy.chunk_size());
+ instance.reset_steal_target();
+ instance.barrier();
+
+ long work_index = instance.get_work_index();
+ const ReducerType &reducer = self.m_functor_reducer.get_reducer();
+
+ reference_type update =
+ reducer.init(static_cast<pointer_type>(instance.reduce_memory()));
+ while (work_index != -1) {
+ const Member begin =
+ static_cast<Member>(work_index) * self.m_policy.chunk_size() +
+ self.m_policy.begin();
+ const Member end =
+ begin + self.m_policy.chunk_size() < self.m_policy.end()
+ ? begin + self.m_policy.chunk_size()
+ : self.m_policy.end();
+ ParallelReduce::template exec_range<WorkTag>(
+ self.m_functor_reducer.get_functor(), begin, end, update);
+ work_index = instance.get_work_index();
+ }
+
+ instance.fan_in_reduce(reducer);
+ }
+
+ public:
+ inline void execute() const {
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+
+ if (m_policy.end() <= m_policy.begin()) {
+ if (m_result_ptr) {
+ reducer.init(m_result_ptr);
+ reducer.final(m_result_ptr);
+ }
+ } else {
+ ThreadsInternal::resize_scratch(reducer.value_size(), 0);
+
+ ThreadsInternal::start(&ParallelReduce::exec, this);
+
+ ThreadsInternal::fence();
+
+ if (m_result_ptr) {
+ const pointer_type data =
+ (pointer_type)ThreadsInternal::root_reduce_scratch();
+
+ const unsigned n = reducer.value_count();
+ for (unsigned i = 0; i < n; ++i) {
+ m_result_ptr[i] = data[i];
+ }
+ }
+ }
+ }
+
+ template <class ViewType>
+ ParallelReduce(const CombinedFunctorReducerType &arg_functor_reducer,
+ const Policy &arg_policy, const ViewType &arg_result_view)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_view.data()) {
+ static_assert(Kokkos::is_view<ViewType>::value,
+ "Kokkos::Threads reduce result must be a View");
+
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::Threads reduce result must be a View accessible from "
+ "HostSpace");
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_PARALLEL_REDUCE_TEAM_HPP
+#define KOKKOS_THREADS_PARALLEL_REDUCE_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class CombinedFunctorReducerType, class... Properties>
+class ParallelReduce<CombinedFunctorReducerType,
+ Kokkos::TeamPolicy<Properties...>, Kokkos::Threads> {
+ private:
+ using Policy =
+ Kokkos::Impl::TeamPolicyInternal<Kokkos::Threads, Properties...>;
+ using FunctorType = typename CombinedFunctorReducerType::functor_type;
+ using ReducerType = typename CombinedFunctorReducerType::reducer_type;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+
+ using pointer_type = typename ReducerType::pointer_type;
+ using reference_type = typename ReducerType::reference_type;
+
+ const CombinedFunctorReducerType m_functor_reducer;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+ const size_t m_shared;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void_v<TagType>> exec_team(
+ const FunctorType &functor, Member member, reference_type update) {
+ for (; member.valid_static(); member.next_static()) {
+ functor(member, update);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void_v<TagType>> exec_team(
+ const FunctorType &functor, Member member, reference_type update) {
+ const TagType t{};
+ for (; member.valid_static(); member.next_static()) {
+ functor(t, member, update);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ const ParallelReduce &self = *((const ParallelReduce *)arg);
+
+ ParallelReduce::template exec_team<WorkTag>(
+ self.m_functor_reducer.get_functor(),
+ Member(&instance, self.m_policy, self.m_shared),
+ self.m_functor_reducer.get_reducer().init(
+ static_cast<pointer_type>(instance.reduce_memory())));
+
+ instance.fan_in_reduce(self.m_functor_reducer.get_reducer());
+ }
+
+ public:
+ inline void execute() const {
+ const ReducerType &reducer = m_functor_reducer.get_reducer();
+
+ if (m_policy.league_size() * m_policy.team_size() == 0) {
+ if (m_result_ptr) {
+ reducer.init(m_result_ptr);
+ reducer.final(m_result_ptr);
+ }
+ } else {
+ ThreadsInternal::resize_scratch(
+ reducer.value_size(),
+ Policy::member_type::team_reduce_size() + m_shared);
+
+ ThreadsInternal::start(&ParallelReduce::exec, this);
+
+ ThreadsInternal::fence();
+
+ if (m_result_ptr) {
+ const pointer_type data =
+ (pointer_type)ThreadsInternal::root_reduce_scratch();
+
+ const unsigned n = reducer.value_count();
+ for (unsigned i = 0; i < n; ++i) {
+ m_result_ptr[i] = data[i];
+ }
+ }
+ }
+ }
+
+ template <typename Policy>
+ Policy fix_policy(Policy policy) {
+ if (policy.impl_vector_length() < 0) {
+ policy.impl_set_vector_length(1);
+ }
+ if (policy.team_size() < 0) {
+ int team_size = policy.team_size_recommended(
+ m_functor_reducer.get_functor(), m_functor_reducer.get_reducer(),
+ ParallelReduceTag{});
+ if (team_size <= 0)
+ Kokkos::Impl::throw_runtime_exception(
+ "Kokkos::Impl::ParallelReduce<Threads, TeamPolicy> could not find "
+ "a valid execution configuration.");
+ policy.impl_set_team_size(team_size);
+ }
+ return policy;
+ }
+
+ template <class ViewType>
+ inline ParallelReduce(const CombinedFunctorReducerType &arg_functor_reducer,
+ const Policy &arg_policy, const ViewType &arg_result)
+ : m_functor_reducer(arg_functor_reducer),
+ m_policy(fix_policy(arg_policy)),
+ m_result_ptr(arg_result.data()),
+ m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
+ FunctorTeamShmemSize<FunctorType>::value(
+ arg_functor_reducer.get_functor(), m_policy.team_size())) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::Threads reduce result must be a View accessible from "
+ "HostSpace");
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_PARALLEL_SCAN_RANGE_HPP
+#define KOKKOS_THREADS_PARALLEL_SCAN_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+ Kokkos::Threads> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkRange = typename Policy::WorkRange;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ Policy, FunctorType, void>;
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member &ibeg, const Member &iend,
+ reference_type update, const bool final) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(i, update, final);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member &ibeg, const Member &iend,
+ reference_type update, const bool final) {
+ const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(t, i, update, final);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ const ParallelScan &self = *((const ParallelScan *)arg);
+
+ const WorkRange range(self.m_policy, instance.pool_rank(),
+ instance.pool_size());
+
+ typename Analysis::Reducer final_reducer(self.m_functor);
+
+ reference_type update =
+ final_reducer.init(static_cast<pointer_type>(instance.reduce_memory()));
+
+ ParallelScan::template exec_range<WorkTag>(self.m_functor, range.begin(),
+ range.end(), update, false);
+
+ instance.scan_small(final_reducer);
+
+ ParallelScan::template exec_range<WorkTag>(self.m_functor, range.begin(),
+ range.end(), update, true);
+
+ instance.fan_in();
+ }
+
+ public:
+ inline void execute() const {
+ ThreadsInternal::resize_scratch(2 * Analysis::value_size(m_functor), 0);
+ ThreadsInternal::start(&ParallelScan::exec, this);
+ ThreadsInternal::fence();
+ }
+
+ ParallelScan(const FunctorType &arg_functor, const Policy &arg_policy)
+ : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+ ReturnType, Kokkos::Threads> {
+ private:
+ using Policy = Kokkos::RangePolicy<Traits...>;
+ using WorkRange = typename Policy::WorkRange;
+ using WorkTag = typename Policy::work_tag;
+ using Member = typename Policy::member_type;
+
+ using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ Policy, FunctorType, ReturnType>;
+
+ using value_type = typename Analysis::value_type;
+ using pointer_type = typename Analysis::pointer_type;
+ using reference_type = typename Analysis::reference_type;
+
+ const FunctorType m_functor;
+ const Policy m_policy;
+ const pointer_type m_result_ptr;
+
+ template <class TagType>
+ inline static std::enable_if_t<std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member &ibeg, const Member &iend,
+ reference_type update, const bool final) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(i, update, final);
+ }
+ }
+
+ template <class TagType>
+ inline static std::enable_if_t<!std::is_void_v<TagType>> exec_range(
+ const FunctorType &functor, const Member &ibeg, const Member &iend,
+ reference_type update, const bool final) {
+ const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+ defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+ for (Member i = ibeg; i < iend; ++i) {
+ functor(t, i, update, final);
+ }
+ }
+
+ static void exec(ThreadsInternal &instance, const void *arg) {
+ const ParallelScanWithTotal &self = *((const ParallelScanWithTotal *)arg);
+
+ const WorkRange range(self.m_policy, instance.pool_rank(),
+ instance.pool_size());
+
+ typename Analysis::Reducer final_reducer(self.m_functor);
+
+ reference_type update =
+ final_reducer.init(static_cast<pointer_type>(instance.reduce_memory()));
+
+ ParallelScanWithTotal::template exec_range<WorkTag>(
+ self.m_functor, range.begin(), range.end(), update, false);
+
+ instance.scan_small(final_reducer);
+
+ ParallelScanWithTotal::template exec_range<WorkTag>(
+ self.m_functor, range.begin(), range.end(), update, true);
+
+ instance.fan_in();
+
+ if (instance.pool_rank() == instance.pool_size() - 1) {
+ *self.m_result_ptr = update;
+ }
+ }
+
+ public:
+ inline void execute() const {
+ ThreadsInternal::resize_scratch(2 * Analysis::value_size(m_functor), 0);
+ ThreadsInternal::start(&ParallelScanWithTotal::exec, this);
+ ThreadsInternal::fence();
+ }
+
+ template <class ViewType>
+ ParallelScanWithTotal(const FunctorType &arg_functor,
+ const Policy &arg_policy,
+ const ViewType &arg_result_view)
+ : m_functor(arg_functor),
+ m_policy(arg_policy),
+ m_result_ptr(arg_result_view.data()) {
+ static_assert(
+ Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+ Kokkos::HostSpace>::accessible,
+ "Kokkos::Threads parallel_scan result must be host-accessible!");
+ }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
#include <Kokkos_Atomic.hpp>
-#include <impl/Kokkos_Spinwait.hpp>
+#include <Threads/Kokkos_Threads_Spinwait.hpp>
#include <impl/Kokkos_BitOps.hpp>
#include <thread>
#endif /* defined( KOKKOS_ENABLE_ASM ) */
}
+void spinwait_while_equal(std::atomic<ThreadState> const& flag,
+ ThreadState const value) {
+ Kokkos::store_fence();
+ uint32_t i = 0;
+ while (value == flag) {
+ host_thread_yield(++i, WaitMode::ACTIVE);
+ }
+ Kokkos::load_fence();
+}
+
} // namespace Impl
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_SPINWAIT_HPP
+#define KOKKOS_THREADS_SPINWAIT_HPP
+
+#include <Threads/Kokkos_Threads_State.hpp>
+
+#include <cstdint>
+#include <atomic>
+
+namespace Kokkos {
+namespace Impl {
+
+enum class WaitMode : int {
+ ACTIVE // Used for tight loops to keep threads active longest
+ ,
+ PASSIVE // Used to quickly yield the thread to quite down the system
+ ,
+ ROOT // Never sleep or yield the root thread
+};
+
+void host_thread_yield(const uint32_t i, const WaitMode mode);
+
+void spinwait_while_equal(std::atomic<ThreadState> const& flag,
+ ThreadState const value);
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_STATE_HPP
+#define KOKKOS_THREADS_STATE_HPP
+
+namespace Kokkos {
+namespace Impl {
+/** \brief States of a worker thread */
+enum class ThreadState {
+ Terminating ///< Termination in progress
+ ,
+ Inactive ///< Exists, waiting for work
+ ,
+ Active ///< Exists, performing work
+ ,
+ Rendezvous ///< Exists, waiting in a barrier or reduce
+ ,
+ ScanCompleted,
+ ScanAvailable,
+ ReductionAvailable
+};
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_THREADSTEAM_HPP
#define KOKKOS_THREADSTEAM_HPP
#include <cstdio>
#include <utility>
-#include <impl/Kokkos_Spinwait.hpp>
#include <impl/Kokkos_HostThreadTeam.hpp>
#include <Kokkos_Atomic.hpp>
+#include <Threads/Kokkos_Threads_Spinwait.hpp>
+#include <Threads/Kokkos_Threads_State.hpp>
//----------------------------------------------------------------------------
public:
using execution_space = Kokkos::Threads;
using scratch_memory_space = execution_space::scratch_memory_space;
+ using team_handle = ThreadsExecTeamMember;
private:
using space = execution_space::scratch_memory_space;
- ThreadsExec* const m_exec;
- ThreadsExec* const* m_team_base; ///< Base for team fan-in
+ ThreadsInternal* const m_instance;
+ ThreadsInternal* const* m_team_base; ///< Base for team fan-in
space m_team_shared;
size_t m_team_shared_size;
int m_team_size;
for (n = 1;
(!(m_team_rank_rev & n)) && ((j = m_team_rank_rev + n) < m_team_size);
n <<= 1) {
- Impl::spinwait_while_equal<int>(m_team_base[j]->state(),
- ThreadsExec::Active);
+ spinwait_while_equal(m_team_base[j]->state(), ThreadState::Active);
}
// If not root then wait for release
if (m_team_rank_rev) {
- m_exec->state() = ThreadsExec::Rendezvous;
- Impl::spinwait_while_equal<int>(m_exec->state(), ThreadsExec::Rendezvous);
+ m_instance->state() = ThreadState::Rendezvous;
+ spinwait_while_equal(m_instance->state(), ThreadState::Rendezvous);
}
return !m_team_rank_rev;
for (n = 1;
(!(m_team_rank_rev & n)) && ((j = m_team_rank_rev + n) < m_team_size);
n <<= 1) {
- m_team_base[j]->state() = ThreadsExec::Active;
+ m_team_base[j]->state() = ThreadState::Active;
}
}
KOKKOS_IF_ON_HOST((
// Make sure there is enough scratch space:
- using type = typename if_c<sizeof(ValueType) < TEAM_REDUCE_SIZE,
- ValueType, void>::type;
+ using type = std::conditional_t<sizeof(ValueType) < TEAM_REDUCE_SIZE,
+ ValueType, void>;
if (m_team_base) {
type* const local_value = ((type*)m_team_base[0]->scratch_memory());
KOKKOS_IF_ON_HOST((
// Make sure there is enough scratch space:
- using type = typename if_c<sizeof(ValueType) < TEAM_REDUCE_SIZE,
- ValueType, void>::type;
+ using type = std::conditional_t<sizeof(ValueType) < TEAM_REDUCE_SIZE,
+ ValueType, void>;
f(value); if (m_team_base) {
type* const local_value = ((type*)m_team_base[0]->scratch_memory());
memory_fence();
KOKKOS_IF_ON_HOST((
// Make sure there is enough scratch space:
using type =
- typename if_c<sizeof(Type) < TEAM_REDUCE_SIZE, Type, void>::type;
-
- if (nullptr == m_exec) return value;
+ std::conditional_t<sizeof(Type) < TEAM_REDUCE_SIZE, Type, void>;
if (team_rank() != team_size() - 1) *
- ((volatile type*)m_exec->scratch_memory()) = value;
+ ((volatile type*)m_instance->scratch_memory()) = value;
memory_fence();
}
template <typename ReducerType>
- KOKKOS_INLINE_FUNCTION
- std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
- team_reduce(const ReducerType& reducer,
- const typename ReducerType::value_type contribution) const {
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ team_reduce(const ReducerType& reducer,
+ typename ReducerType::value_type& contribution) const {
KOKKOS_IF_ON_DEVICE(((void)reducer; (void)contribution;))
- KOKKOS_IF_ON_HOST((
- using value_type = typename ReducerType::value_type;
- // Make sure there is enough scratch space:
- using type = typename if_c<sizeof(value_type) < TEAM_REDUCE_SIZE,
- value_type, void>::type;
-
- if (nullptr == m_exec) return;
-
- type* const local_value = ((type*)m_exec->scratch_memory());
+ KOKKOS_IF_ON_HOST(
+ (using value_type = typename ReducerType::value_type;
+ using wrapped_reducer_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE, TeamPolicy<Threads>,
+ ReducerType, value_type>::Reducer;
+ impl_team_reduce(wrapped_reducer_type(reducer), contribution);
+ reducer.reference() = contribution;))
+ }
- // Set this thread's contribution
- if (team_rank() != team_size() - 1) { *local_value = contribution; }
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<WrappedReducerType>::value>
+ impl_team_reduce(
+ const WrappedReducerType& wrapped_reducer,
+ typename WrappedReducerType::value_type& contribution) const {
+ using value_type = typename WrappedReducerType::value_type;
+ // Make sure there is enough scratch space:
+ using type = std::conditional_t<sizeof(value_type) < TEAM_REDUCE_SIZE,
+ value_type, void>;
+
+ type* const local_value = ((type*)m_instance->scratch_memory());
+
+ // Set this thread's contribution
+ if (team_rank() != team_size() - 1) {
+ *local_value = contribution;
+ }
- // Fence to make sure the base team member has access:
- memory_fence();
+ // Fence to make sure the base team member has access:
+ memory_fence();
- if (team_fan_in()) {
- // The last thread to synchronize returns true, all other threads
- // wait for team_fan_out()
- type* const team_value = ((type*)m_team_base[0]->scratch_memory());
+ if (team_fan_in()) {
+ // The last thread to synchronize returns true, all other threads
+ // wait for team_fan_out()
+ type* const team_value = ((type*)m_team_base[0]->scratch_memory());
- *team_value = contribution;
- // Join to the team value:
- for (int i = 1; i < m_team_size; ++i) {
- reducer.join(*team_value,
- *((type*)m_team_base[i]->scratch_memory()));
- }
+ *team_value = contribution;
+ // Join to the team value:
+ for (int i = 1; i < m_team_size; ++i) {
+ wrapped_reducer.join(team_value,
+ ((type*)m_team_base[i]->scratch_memory()));
+ }
- // Team base thread may "lap" member threads so copy out to their
- // local value.
- for (int i = 1; i < m_team_size; ++i) {
- *((type*)m_team_base[i]->scratch_memory()) = *team_value;
- }
+ // Team base thread may "lap" member threads so copy out to their
+ // local value.
+ for (int i = 1; i < m_team_size; ++i) {
+ *((type*)m_team_base[i]->scratch_memory()) = *team_value;
+ }
- // Fence to make sure all team members have access
- memory_fence();
- }
+ // Fence to make sure all team members have access
+ memory_fence();
+ }
- team_fan_out();
+ team_fan_out();
- // Value was changed by the team base
- reducer.reference() = *local_value;))
+ contribution = *local_value;
}
/** \brief Intra-team exclusive prefix sum with team_rank() ordering
KOKKOS_IF_ON_DEVICE(((void)global_accum; return value;))
KOKKOS_IF_ON_HOST(( // Make sure there is enough scratch space:
- using type = typename if_c<sizeof(ArgType) < TEAM_REDUCE_SIZE, ArgType,
- void>::type;
+ using type = std::conditional_t<sizeof(ArgType) < TEAM_REDUCE_SIZE,
+ ArgType, void>;
- if (nullptr == m_exec) return type(0);
-
- volatile type* const work_value = ((type*)m_exec->scratch_memory());
+ volatile type* const work_value = ((type*)m_instance->scratch_memory());
*work_value = value;
template <class... Properties>
ThreadsExecTeamMember(
- Impl::ThreadsExec* exec,
+ Impl::ThreadsInternal* instance,
const TeamPolicyInternal<Kokkos::Threads, Properties...>& team,
const size_t shared_size)
- : m_exec(exec),
+ : m_instance(instance),
m_team_base(nullptr),
m_team_shared(nullptr, 0),
m_team_shared_size(shared_size),
m_chunk_size(team.chunk_size()),
m_league_chunk_end(0),
m_team_alloc(team.team_alloc()) {
+ KOKKOS_ASSERT(m_instance != nullptr);
if (team.league_size()) {
// Execution is using device-team interface:
- const int pool_rank_rev = m_exec->pool_size() - (m_exec->pool_rank() + 1);
+ const int pool_rank_rev =
+ m_instance->pool_size() - (m_instance->pool_rank() + 1);
const int team_rank_rev = pool_rank_rev % team.team_alloc();
- const size_t pool_league_size = m_exec->pool_size() / team.team_alloc();
+ const size_t pool_league_size =
+ m_instance->pool_size() / team.team_alloc();
const size_t pool_league_rank_rev = pool_rank_rev / team.team_alloc();
if (pool_league_rank_rev >= pool_league_size) {
m_invalid_thread = 1;
const size_t pool_league_rank =
pool_league_size - (pool_league_rank_rev + 1);
- const int pool_num_teams = m_exec->pool_size() / team.team_alloc();
+ const int pool_num_teams = m_instance->pool_size() / team.team_alloc();
const int chunk_size =
team.chunk_size() > 0 ? team.chunk_size() : team.team_iter();
const int chunks_per_team =
if ((team.team_alloc() > size_t(m_team_size))
? (team_rank_rev >= m_team_size)
- : (m_exec->pool_size() - pool_num_teams * m_team_size >
- m_exec->pool_rank()))
+ : (m_instance->pool_size() - pool_num_teams * m_team_size >
+ m_instance->pool_rank()))
m_invalid_thread = 1;
else
m_invalid_thread = 0;
if (team_rank_rev < team.team_size() && !m_invalid_thread) {
m_team_base =
- m_exec->pool_base() + team.team_alloc() * pool_league_rank_rev;
+ m_instance->pool_base() + team.team_alloc() * pool_league_rank_rev;
m_team_size = team.team_size();
m_team_rank = team.team_size() - (team_rank_rev + 1);
m_team_rank_rev = team_rank_rev;
}
if ((m_team_rank_rev == 0) && (m_invalid_thread == 0)) {
- m_exec->set_work_range(m_league_rank, m_league_end, m_chunk_size);
- m_exec->reset_steal_target(m_team_size);
+ m_instance->set_work_range(m_league_rank, m_league_end, m_chunk_size);
+ m_instance->reset_steal_target(m_team_size);
}
if (std::is_same<typename TeamPolicyInternal<
Kokkos::Threads, Properties...>::schedule_type::type,
Kokkos::Dynamic>::value) {
- m_exec->barrier();
+ m_instance->barrier();
}
} else {
m_invalid_thread = 1;
}
ThreadsExecTeamMember()
- : m_exec(nullptr),
+ : m_instance(nullptr),
m_team_base(nullptr),
m_team_shared(nullptr, 0),
m_team_shared_size(0),
m_invalid_thread(0),
m_team_alloc(0) {}
- inline ThreadsExec& threads_exec_team_base() const {
- return m_team_base ? **m_team_base : *m_exec;
+ inline ThreadsInternal& threads_exec_team_base() const {
+ return m_team_base ? **m_team_base : *m_instance;
}
bool valid_static() const { return m_league_rank < m_league_end; }
private:
/** \brief finalize chunk_size if it was set to AUTO*/
inline void set_auto_chunk_size() {
- int64_t concurrency = traits::execution_space::concurrency() / m_team_alloc;
+ int64_t concurrency = space().concurrency() / m_team_alloc;
if (concurrency == 0) concurrency = 1;
if (m_chunk_size > 0) {
parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
const Lambda& lambda, ValueType& result) {
- ValueType intermediate;
- Sum<ValueType> sum(intermediate);
- sum.init(intermediate);
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::ThreadsExecTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
- ValueType tmp = ValueType();
- lambda(i, tmp);
- intermediate += tmp;
+ lambda(i, value);
}
- loop_boundaries.thread.team_reduce(sum, intermediate);
- result = sum.reference();
+ loop_boundaries.thread.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value);
+ result = value;
}
template <typename iType, class Lambda, typename ReducerType>
parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
const Lambda& lambda, const ReducerType& reducer) {
- typename ReducerType::value_type value;
- reducer.init(value);
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::ThreadsExecTeamMember::execution_space>,
+ ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
lambda(i, value);
}
- loop_boundaries.thread.team_reduce(reducer, value);
+ loop_boundaries.thread.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
}
} // namespace Kokkos
parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
const Lambda& lambda, ValueType& result) {
- result = ValueType();
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::ThreadsExecTeamMember::execution_space>, Lambda,
+ ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type value;
+ wrapped_reducer.init(&value);
+
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
- lambda(i, result);
+ lambda(i, value);
}
+
+ wrapped_reducer.final(&value);
+ result = value;
}
template <typename iType, class Lambda, typename ReducerType>
parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
const Lambda& lambda, const ReducerType& reducer) {
- reducer.init(reducer.reference());
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<typename Impl::ThreadsExecTeamMember::execution_space>,
+ ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
+
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
- lambda(i, reducer.reference());
+ lambda(i, value);
}
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
}
/** \brief Inter-thread parallel exclusive prefix sum. Executes
* lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
*
*/
-template <typename iType, class FunctorType>
+template <typename iType, class FunctorType, typename ValueType>
KOKKOS_INLINE_FUNCTION void parallel_scan(
const Impl::TeamThreadRangeBoundariesStruct<
iType, Impl::ThreadsExecTeamMember>& loop_bounds,
- const FunctorType& lambda) {
- using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void,
- FunctorType>::value_type;
+ const FunctorType& lambda, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using closure_value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+ static_assert(std::is_same_v<closure_value_type, ValueType>,
+ "Non-matching value types of closure and return type");
- auto scan_val = value_type{};
+ auto scan_val = ValueType{};
// Intra-member scan
#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
lambda(i, scan_val, false);
}
+ auto& team_member = loop_bounds.thread;
+
// 'scan_val' output is the exclusive prefix sum
- scan_val = loop_bounds.thread.team_scan(scan_val);
+ scan_val = team_member.team_scan(scan_val);
#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
#pragma ivdep
i += loop_bounds.increment) {
lambda(i, scan_val, true);
}
+
+ team_member.team_broadcast(scan_val, team_member.team_size() - 1);
+
+ return_val = scan_val;
+}
+
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::TeamThreadRangeBoundariesStruct<
+ iType, Impl::ThreadsExecTeamMember>& loop_bounds,
+ const FunctorType& lambda) {
+ using value_type = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, FunctorType,
+ void>::value_type;
+
+ value_type scan_val;
+ parallel_scan(loop_bounds, lambda, scan_val);
}
/** \brief Intra-thread vector parallel exclusive prefix sum. Executes
* final==true. Scan_val will be set to the final sum value over all vector
* lanes.
*/
-template <typename iType, class FunctorType>
+template <typename iType, class FunctorType, typename ValueType>
KOKKOS_INLINE_FUNCTION void parallel_scan(
const Impl::ThreadVectorRangeBoundariesStruct<
iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
- const FunctorType& lambda) {
- using value_type =
+ const FunctorType& lambda, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using closure_value_type =
typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
- TeamPolicy<Threads>,
- FunctorType>::value_type;
+ TeamPolicy<Threads>, FunctorType,
+ void>::value_type;
+ static_assert(std::is_same_v<closure_value_type, ValueType>,
+ "Non-matching value types of closure and return type");
- value_type scan_val = value_type();
+ ValueType scan_val = ValueType();
#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
#pragma ivdep
i += loop_boundaries.increment) {
lambda(i, scan_val, true);
}
+
+ return_val = scan_val;
+}
+
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+ const Impl::ThreadVectorRangeBoundariesStruct<
+ iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+ const FunctorType& lambda) {
+ using value_type =
+ typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+ TeamPolicy<Threads>, FunctorType,
+ void>::value_type;
+
+ value_type scan_val;
+ parallel_scan(loop_boundaries, lambda, scan_val);
}
/** \brief Intra-thread vector parallel scan with reducer
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_THREADS_UNIQUETOKEN_HPP
#define KOKKOS_THREADS_UNIQUETOKEN_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_WORKGRAPHPOLICY_HPP
+#define KOKKOS_THREADS_WORKGRAPHPOLICY_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Threads/Kokkos_Threads_Instance.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+ Kokkos::Threads> {
+ private:
+ using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+
+ using Self = ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+ Kokkos::Threads>;
+
+ Policy m_policy;
+ FunctorType m_functor;
+
+ template <class TagType>
+ std::enable_if_t<std::is_void_v<TagType>> exec_one(
+ const std::int32_t w) const noexcept {
+ m_functor(w);
+ }
+
+ template <class TagType>
+ std::enable_if_t<!std::is_void_v<TagType>> exec_one(
+ const std::int32_t w) const noexcept {
+ const TagType t{};
+ m_functor(t, w);
+ }
+
+ inline void exec_one_thread() const noexcept {
+ // Spin until COMPLETED_TOKEN.
+ // END_TOKEN indicates no work is currently available.
+
+ for (std::int32_t w = Policy::END_TOKEN;
+ Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+ if (Policy::END_TOKEN != w) {
+ exec_one<typename Policy::work_tag>(w);
+ m_policy.completed_work(w);
+ }
+ }
+ }
+
+ static inline void thread_main(ThreadsInternal& instance,
+ const void* arg) noexcept {
+ const Self& self = *(static_cast<const Self*>(arg));
+ self.exec_one_thread();
+ instance.fan_in();
+ }
+
+ public:
+ inline void execute() {
+ ThreadsInternal::start(&Self::thread_main, this);
+ ThreadsInternal::fence();
+ }
+
+ inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+ : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif /* #define KOKKOS_THREADS_WORKGRAPHPOLICY_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_EXPERIMENTAL_VIEWHOOKS_HPP
#define KOKKOS_EXPERIMENTAL_VIEWHOOKS_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_BASIC_VIEW_HPP
+#define KOKKOS_BASIC_VIEW_HPP
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Utilities.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <View/Kokkos_ViewAlloc.hpp>
+#include <View/Kokkos_ViewCtor.hpp>
+#include <View/Kokkos_ViewTraits.hpp>
+#include <View/MDSpan/Kokkos_MDSpan_Header.hpp>
+#include <View/MDSpan/Kokkos_MDSpan_Accessor.hpp>
+#include <View/MDSpan/Kokkos_MDSpan_Layout.hpp>
+
+#include <optional>
+#include <type_traits>
+
+// FIXME: we need to make this work for not using our mdspan impl
+#define KOKKOS_IMPL_NO_UNIQUE_ADDRESS _MDSPAN_NO_UNIQUE_ADDRESS
+namespace Kokkos::Impl {
+
+constexpr inline struct SubViewCtorTag {
+ explicit SubViewCtorTag() = default;
+} subview_ctor_tag{};
+
+template <class T>
+struct KokkosSliceToMDSpanSliceImpl {
+ using type = T;
+ KOKKOS_FUNCTION
+ static constexpr decltype(auto) transform(const T &s) { return s; }
+};
+
+template <>
+struct KokkosSliceToMDSpanSliceImpl<Kokkos::ALL_t> {
+ using type = full_extent_t;
+ KOKKOS_FUNCTION
+ static constexpr decltype(auto) transform(Kokkos::ALL_t) {
+ return full_extent;
+ }
+};
+
+template <class T>
+using kokkos_slice_to_mdspan_slice =
+ typename KokkosSliceToMDSpanSliceImpl<T>::type;
+
+template <class T>
+KOKKOS_INLINE_FUNCTION constexpr decltype(auto)
+transform_kokkos_slice_to_mdspan_slice(const T &s) {
+ return KokkosSliceToMDSpanSliceImpl<T>::transform(s);
+}
+
+// We do have implementation detail versions of these in our mdspan impl
+// However they are not part of the public standard interface
+template <class T>
+struct is_layout_right_padded : public std::false_type {};
+
+template <size_t Pad>
+struct is_layout_right_padded<Kokkos::Experimental::layout_right_padded<Pad>>
+ : public std::true_type {};
+
+template <class T>
+struct is_layout_left_padded : public std::false_type {};
+
+template <size_t Pad>
+struct is_layout_left_padded<Kokkos::Experimental::layout_left_padded<Pad>>
+ : public std::true_type {};
+
+template <class ElementType, class Extents, class LayoutPolicy,
+ class AccessorPolicy>
+class BasicView {
+ public:
+ using mdspan_type =
+ mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
+ using extents_type = typename mdspan_type::extents_type;
+ using layout_type = typename mdspan_type::layout_type;
+ using accessor_type = typename mdspan_type::accessor_type;
+ using mapping_type = typename mdspan_type::mapping_type;
+ using element_type = typename mdspan_type::element_type;
+ using value_type = typename mdspan_type::value_type;
+ using index_type = typename mdspan_type::index_type;
+ using size_type = typename mdspan_type::size_type;
+ using rank_type = typename mdspan_type::rank_type;
+ using data_handle_type = typename mdspan_type::data_handle_type;
+ using reference = typename mdspan_type::reference;
+ using memory_space = typename accessor_type::memory_space;
+ using execution_space = typename memory_space::execution_space;
+
+ // For now View and BasicView will have a restriction that the data handle
+ // needs to be convertible to element_type* and vice versa
+ static_assert(std::is_constructible_v<element_type *, data_handle_type>);
+ static_assert(std::is_constructible_v<data_handle_type, element_type *>);
+
+ KOKKOS_FUNCTION static constexpr rank_type rank() noexcept {
+ return extents_type::rank();
+ }
+ KOKKOS_FUNCTION static constexpr rank_type rank_dynamic() noexcept {
+ return extents_type::rank_dynamic();
+ }
+ KOKKOS_FUNCTION static constexpr size_t static_extent(rank_type r) noexcept {
+ return extents_type::static_extent(r);
+ }
+ KOKKOS_FUNCTION constexpr index_type extent(rank_type r) const noexcept {
+ return m_map.extents().extent(r);
+ };
+
+ protected:
+ // These are pre-condition checks which are unconditionally (i.e. in release
+ // mode) enabled in Kokkos::View 4.4
+ template <class OtherMapping>
+ KOKKOS_FUNCTION static constexpr void check_basic_view_constructibility(
+ [[maybe_unused]] const OtherMapping &rhs) {
+ using src_t = typename OtherMapping::layout_type;
+ using dst_t = layout_type;
+ constexpr size_t rnk = mdspan_type::rank();
+ if constexpr (!std::is_same_v<src_t, dst_t>) {
+ if constexpr (Impl::is_layout_left_padded<dst_t>::value) {
+ if constexpr (std::is_same_v<src_t, layout_stride>) {
+ index_type stride = 1;
+ for (size_t r = 0; r < rnk; r++) {
+ if (rhs.stride(r) != stride)
+ Kokkos::abort("View assignment must have compatible layouts");
+ if constexpr (rnk > 1)
+ stride *= (r == 0 ? rhs.stride(1) : rhs.extents().extent(r));
+ }
+ }
+ }
+ if constexpr (Impl::is_layout_right_padded<dst_t>::value) {
+ if constexpr (std::is_same_v<src_t, layout_stride>) {
+ index_type stride = 1;
+ if constexpr (rnk > 0) {
+ for (size_t r = rnk; r > 0; r--) {
+ if (rhs.stride(r - 1) != stride)
+ Kokkos::abort("View assignment must have compatible layouts");
+ if constexpr (rnk > 1)
+ stride *= (r == rnk ? rhs.stride(r - 2)
+ : rhs.extents().extent(r - 1));
+ }
+ }
+ }
+ }
+ if constexpr (std::is_same_v<dst_t, layout_left>) {
+ if constexpr (std::is_same_v<src_t, layout_stride>) {
+ index_type stride = 1;
+ for (size_t r = 0; r < rnk; r++) {
+ if (rhs.stride(r) != stride)
+ Kokkos::abort("View assignment must have compatible layouts");
+ stride *= rhs.extents().extent(r);
+ }
+ } else if constexpr (Impl::is_layout_left_padded<src_t>::value &&
+ rnk > 1) {
+ if (rhs.stride(1) != rhs.extents().extent(0))
+ Kokkos::abort("View assignment must have compatible layouts");
+ }
+ }
+ if constexpr (std::is_same_v<dst_t, layout_right>) {
+ if constexpr (std::is_same_v<src_t, layout_stride>) {
+ index_type stride = 1;
+ if constexpr (rnk > 0) {
+ for (size_t r = rnk; r > 0; r--) {
+ if (rhs.stride(r - 1) != stride)
+ Kokkos::abort("View assignment must have compatible layouts");
+ stride *= rhs.extents().extent(r - 1);
+ }
+ }
+ } else if constexpr (Impl::is_layout_right_padded<src_t>::value &&
+ rnk > 1) {
+ if (rhs.stride(rnk - 2) != rhs.extents().extent(rnk - 1))
+ Kokkos::abort("View assignment must have compatible layouts");
+ }
+ }
+ }
+ }
+
+ public:
+ KOKKOS_DEFAULTED_FUNCTION constexpr BasicView() = default;
+
+ KOKKOS_FUNCTION constexpr BasicView(const mdspan_type &other)
+ : m_ptr(other.data_handle()),
+ m_map(other.mapping()),
+ m_acc(other.accessor()){};
+ KOKKOS_FUNCTION constexpr BasicView(mdspan_type &&other)
+ : m_ptr(std::move(other.data_handle())),
+ m_map(std::move(other.mapping())),
+ m_acc(std::move(other.accessor())){};
+
+ template <class... OtherIndexTypes>
+ // requires(std::is_constructible_v<mdspan_type, data_handle_type,
+ // OtherIndexTypes...>)
+ KOKKOS_FUNCTION explicit constexpr BasicView(
+ std::enable_if_t<std::is_constructible_v<mdspan_type, data_handle_type,
+ OtherIndexTypes...>,
+ data_handle_type>
+ p,
+ OtherIndexTypes... exts)
+ : m_ptr(std::move(p)),
+ m_map(extents_type(static_cast<index_type>(std::move(exts))...)),
+ m_acc{} {}
+
+ template <class OtherIndexType, size_t Size>
+ // When doing C++20 we should switch to this, the conditional explicit we
+ // can't do in 17
+ // requires(std::is_constructible_v<mdspan_type, data_handle_type,
+ // std::array<OtherIndexType, Size>>)
+ // explicit(Size != rank_dynamic())
+ KOKKOS_FUNCTION constexpr BasicView(
+ std::enable_if_t<
+ std::is_constructible_v<mdspan_type, data_handle_type,
+ std::array<OtherIndexType, Size>>,
+ data_handle_type>
+ p,
+ const Array<OtherIndexType, Size> &exts)
+ : m_ptr(std::move(p)), m_map(extents_type(exts)), m_acc{} {}
+
+ KOKKOS_FUNCTION constexpr BasicView(data_handle_type p,
+ const extents_type &exts)
+// Compilation will simply fail in C++17 and overload set should not be an issue
+#ifndef KOKKOS_ENABLE_CXX17
+ requires(std::is_default_constructible_v<accessor_type> &&
+ std::is_constructible_v<mapping_type, const extents_type &>)
+#endif
+ : m_ptr(std::move(p)), m_map(exts), m_acc{} {
+ }
+
+ KOKKOS_FUNCTION constexpr BasicView(data_handle_type p, const mapping_type &m)
+// Compilation will simply fail in C++17 and overload set should not be an issue
+#ifndef KOKKOS_ENABLE_CXX17
+ requires(std::is_default_constructible_v<accessor_type>)
+#endif
+ : m_ptr(std::move(p)), m_map(m), m_acc{} {
+ }
+
+ KOKKOS_FUNCTION constexpr BasicView(data_handle_type p, const mapping_type &m,
+ const accessor_type &a)
+ : m_ptr(std::move(p)), m_map(m), m_acc(a) {}
+
+ template <class OtherT, class OtherE, class OtherL, class OtherA>
+// requires(std::is_constructible_v<mdspan_type,
+// typename BasicView<OtherT, OtherE,
+// OtherL,
+// OtherA>::mdspan_type>)
+#ifndef KOKKOS_ENABLE_CXX17
+ explicit(
+ !std::is_convertible_v<const typename OtherL::template mapping<OtherE> &,
+ mapping_type> ||
+ !std::is_convertible_v<const OtherA &, accessor_type>)
+#endif
+ KOKKOS_INLINE_FUNCTION
+ BasicView(const BasicView<OtherT, OtherE, OtherL, OtherA> &other,
+ std::enable_if_t<
+ std::is_constructible_v<
+ mdspan_type, typename BasicView<OtherT, OtherE, OtherL,
+ OtherA>::mdspan_type>,
+ void *> = nullptr)
+ : m_ptr(other.m_ptr), m_map(other.m_map), m_acc(other.m_acc) {
+ // Kokkos View precondition checks happen in release builds
+ check_basic_view_constructibility(other.mapping());
+
+ static_assert(
+ std::is_constructible_v<data_handle_type,
+ const typename OtherA::data_handle_type &>,
+ "Kokkos::View: incompatible data_handle_type for View construction");
+ static_assert(std::is_constructible_v<extents_type, OtherE>,
+ "Kokkos::View: incompatible extents for View construction");
+ }
+
+ template <class OtherT, class OtherE, class OtherL, class OtherA>
+// requires(std::is_constructible_v<mdspan_type,
+// mdspan<OtherT, OtherE, OtherL, OtherA>>)
+#ifndef KOKKOS_ENABLE_CXX17
+ explicit(
+ !std::is_convertible_v<const typename OtherL::template mapping<OtherE> &,
+ mapping_type> ||
+ !std::is_convertible_v<const OtherA &, accessor_type>)
+#endif
+ KOKKOS_INLINE_FUNCTION
+ BasicView(const mdspan<OtherT, OtherE, OtherL, OtherA> &other,
+ std::enable_if_t<
+ std::is_constructible_v<
+ mdspan_type, mdspan<OtherT, OtherE, OtherL, OtherA>>,
+ void *> = nullptr)
+ : m_ptr(other.data_handle()),
+ m_map(other.mapping()),
+ m_acc(other.accessor()) {
+ // Kokkos View precondition checks happen in release builds
+ check_basic_view_constructibility(other.mapping());
+
+ static_assert(
+ std::is_constructible_v<data_handle_type,
+ const typename OtherA::data_handle_type &>,
+ "Kokkos::View: incompatible data_handle_type for View construction");
+ static_assert(std::is_constructible_v<extents_type, OtherE>,
+ "Kokkos::View: incompatible extents for View construction");
+ }
+
+ // Allocating constructors specific to BasicView
+ ///
+ /// Construct from a given mapping
+ ///
+ explicit constexpr BasicView(const std::string &label,
+ const mapping_type &mapping)
+ : BasicView(view_alloc(label), mapping) {}
+
+ ///
+ /// Construct from a given extents
+ ///
+ explicit constexpr BasicView(const std::string &label,
+ const extents_type &ext)
+ : BasicView(view_alloc(label), mapping_type{ext}) {}
+
+ private:
+ template <class... P>
+ data_handle_type create_data_handle(
+ const Impl::ViewCtorProp<P...> &arg_prop,
+ const typename mdspan_type::mapping_type &arg_mapping) {
+ constexpr bool has_exec = Impl::ViewCtorProp<P...>::has_execution_space;
+ // Copy the input allocation properties with possibly defaulted properties
+ // We need to split it in two to avoid MSVC compiler errors
+ auto prop_copy_tmp =
+ Impl::with_properties_if_unset(arg_prop, std::string{});
+ auto prop_copy = Impl::with_properties_if_unset(
+ prop_copy_tmp, memory_space{}, execution_space{});
+ using alloc_prop = decltype(prop_copy);
+
+ if (alloc_prop::initialize &&
+ !alloc_prop::execution_space::impl_is_initialized()) {
+ // If initializing view data then
+ // the execution space must be initialized.
+ Kokkos::Impl::throw_runtime_exception(
+ "Constructing View and initializing data with uninitialized "
+ "execution space");
+ }
+ return data_handle_type(Impl::make_shared_allocation_record<ElementType>(
+ arg_mapping.required_span_size(),
+ Impl::get_property<Impl::LabelTag>(prop_copy),
+ Impl::get_property<Impl::MemorySpaceTag>(prop_copy),
+ has_exec ? std::optional<execution_space>{Impl::get_property<
+ Impl::ExecutionSpaceTag>(prop_copy)}
+ : std::optional<execution_space>{std::nullopt},
+ std::integral_constant<bool, alloc_prop::initialize>(),
+ std::integral_constant<bool, alloc_prop::sequential_host_init>()));
+ }
+
+ public:
+ template <class... P>
+ // requires(!Impl::ViewCtorProp<P...>::has_pointer)
+ explicit inline BasicView(
+ const Impl::ViewCtorProp<P...> &arg_prop,
+ std::enable_if_t<!Impl::ViewCtorProp<P...>::has_pointer,
+ typename mdspan_type::mapping_type> const &arg_mapping)
+ : BasicView(create_data_handle(arg_prop, arg_mapping), arg_mapping) {}
+
+ template <class... P>
+ // requires(Impl::ViewCtorProp<P...>::has_pointer)
+ KOKKOS_FUNCTION explicit inline BasicView(
+ const Impl::ViewCtorProp<P...> &arg_prop,
+ std::enable_if_t<Impl::ViewCtorProp<P...>::has_pointer,
+ typename mdspan_type::mapping_type> const &arg_mapping)
+ : BasicView(
+ data_handle_type(Impl::get_property<Impl::PointerTag>(arg_prop)),
+ arg_mapping) {}
+
+ protected:
+ template <class OtherElementType, class OtherExtents, class OtherLayoutPolicy,
+ class OtherAccessorPolicy, class... SliceSpecifiers>
+ KOKKOS_INLINE_FUNCTION BasicView(
+ Impl::SubViewCtorTag,
+ const BasicView<OtherElementType, OtherExtents, OtherLayoutPolicy,
+ OtherAccessorPolicy> &src_view,
+ SliceSpecifiers... slices)
+ : BasicView(submdspan(
+ src_view.to_mdspan(),
+ Impl::transform_kokkos_slice_to_mdspan_slice(slices)...)) {}
+
+ public:
+ //----------------------------------------
+ // Conversion to MDSpan
+ template <class OtherElementType, class OtherExtents, class OtherLayoutPolicy,
+ class OtherAccessor,
+ typename = std::enable_if_t<
+ std::is_assignable_v<mdspan<OtherElementType, OtherExtents,
+ OtherLayoutPolicy, OtherAccessor>,
+ mdspan_type>>>
+ KOKKOS_INLINE_FUNCTION constexpr
+ operator mdspan<OtherElementType, OtherExtents, OtherLayoutPolicy,
+ OtherAccessor>() const {
+ return mdspan_type(m_ptr, m_map, m_acc);
+ }
+
+ // Here we use an overload instead of a default parameter as a workaround
+ // to a potential compiler bug with clang 17. It may be present in other
+ // compilers
+ template <class OtherAccessorType = AccessorPolicy,
+ typename = std::enable_if_t<std::is_constructible_v<
+ typename mdspan_type::data_handle_type,
+ typename OtherAccessorType::data_handle_type>>>
+ KOKKOS_INLINE_FUNCTION constexpr auto to_mdspan() const {
+ using ret_mdspan_type =
+ mdspan<typename mdspan_type::element_type,
+ typename mdspan_type::extents_type,
+ typename mdspan_type::layout_type, OtherAccessorType>;
+ return ret_mdspan_type(
+ static_cast<typename OtherAccessorType::data_handle_type>(
+ data_handle()),
+ mapping(), static_cast<OtherAccessorType>(accessor()));
+ }
+
+ template <
+ class OtherAccessorType = AccessorPolicy,
+ typename = std::enable_if_t<std::is_assignable_v<
+ data_handle_type, typename OtherAccessorType::data_handle_type>>>
+ KOKKOS_INLINE_FUNCTION constexpr auto to_mdspan(
+ const OtherAccessorType &other_accessor) const {
+ using ret_mdspan_type =
+ mdspan<element_type, extents_type, layout_type, OtherAccessorType>;
+ return ret_mdspan_type(
+ static_cast<typename OtherAccessorType::data_handle_type>(
+ data_handle()),
+ mapping(), other_accessor);
+ }
+
+ KOKKOS_FUNCTION void assign_data(element_type *ptr) { m_ptr = ptr; }
+
+ // ========================= mdspan =================================
+
+ // [mdspan.mdspan.members], members
+
+// Introducing the C++20 and C++23 variants of the operators already
+#ifndef KOKKOS_ENABLE_CXX17
+#ifndef KOKKOS_ENABLE_CXX20
+ // C++23 only operator[]
+ template <class... OtherIndexTypes>
+ requires((std::is_convertible_v<OtherIndexTypes, index_type> && ...) &&
+ (std::is_nothrow_constructible_v<index_type, OtherIndexTypes> &&
+ ...) &&
+ (sizeof...(OtherIndexTypes) == rank()))
+ KOKKOS_FUNCTION constexpr reference operator[](
+ OtherIndexTypes... indices) const {
+ return m_acc.access(m_ptr,
+ m_map(static_cast<index_type>(std::move(indices))...));
+ }
+
+ template <class OtherIndexType>
+ requires(
+ std::is_convertible_v<const OtherIndexType &, index_type> &&
+ std::is_nothrow_constructible_v<index_type, const OtherIndexType &>)
+ KOKKOS_FUNCTION constexpr reference operator[](
+ const Array<OtherIndexType, rank()> &indices) const {
+ return m_acc.access(m_ptr,
+ [&]<size_t... Idxs>(std::index_sequence<Idxs...>) {
+ return m_map(indices[Idxs]...);
+ }(std::make_index_sequence<rank()>()));
+ }
+
+ template <class OtherIndexType>
+ requires(
+ std::is_convertible_v<const OtherIndexType &, index_type> &&
+ std::is_nothrow_constructible_v<index_type, const OtherIndexType &>)
+ KOKKOS_FUNCTION constexpr reference operator[](
+ std::span<OtherIndexType, rank()> indices) const {
+ return m_acc.access(m_ptr,
+ [&]<size_t... Idxs>(std::index_sequence<Idxs...>) {
+ return m_map(indices[Idxs]...);
+ }(std::make_index_sequence<rank()>()));
+ }
+#endif
+
+ // C++20 operator()
+ template <class... OtherIndexTypes>
+ requires((std::is_convertible_v<OtherIndexTypes, index_type> && ...) &&
+ (std::is_nothrow_constructible_v<index_type, OtherIndexTypes> &&
+ ...) &&
+ (sizeof...(OtherIndexTypes) == rank()))
+ KOKKOS_FUNCTION constexpr reference operator()(
+ OtherIndexTypes... indices) const {
+ return m_acc.access(m_ptr,
+ m_map(static_cast<index_type>(std::move(indices))...));
+ }
+
+ template <class OtherIndexType>
+ requires(
+ std::is_convertible_v<const OtherIndexType &, index_type> &&
+ std::is_nothrow_constructible_v<index_type, const OtherIndexType &>)
+ KOKKOS_FUNCTION constexpr reference operator()(
+ const Array<OtherIndexType, rank()> &indices) const {
+ return m_acc.access(m_ptr,
+ [&]<size_t... Idxs>(std::index_sequence<Idxs...>) {
+ return m_map(indices[Idxs]...);
+ }(std::make_index_sequence<rank()>()));
+ }
+
+ template <class OtherIndexType>
+ requires(
+ std::is_convertible_v<const OtherIndexType &, index_type> &&
+ std::is_nothrow_constructible_v<index_type, const OtherIndexType &>)
+ KOKKOS_FUNCTION constexpr reference operator()(
+ std::span<OtherIndexType, rank()> indices) const {
+ return m_acc.access(m_ptr,
+ [&]<size_t... Idxs>(std::index_sequence<Idxs...>) {
+ return m_map(indices[Idxs]...);
+ }(std::make_index_sequence<rank()>()));
+ }
+#else
+ // C++17 variant of operator()
+
+ // Some weird unexplained issue in compiling the SFINAE version with CUDA/MSVC
+ // So we just use post factor check here with static_assert
+#if defined(KOKKOS_ENABLE_CUDA) && defined(_WIN32)
+ template <class... OtherIndexTypes>
+ KOKKOS_FUNCTION constexpr reference operator()(
+ OtherIndexTypes... indices) const {
+ static_assert((std::is_convertible_v<OtherIndexTypes, index_type> && ...));
+ static_assert(
+ (std::is_nothrow_constructible_v<index_type, OtherIndexTypes> && ...));
+ static_assert((sizeof...(OtherIndexTypes)) == rank());
+ return m_acc.access(m_ptr,
+ m_map(static_cast<index_type>(std::move(indices))...));
+ }
+#else
+ template <class... OtherIndexTypes>
+ KOKKOS_FUNCTION constexpr std::enable_if_t<
+ ((std::is_convertible_v<OtherIndexTypes, index_type> && ...)) &&
+ ((std::is_nothrow_constructible_v<index_type, OtherIndexTypes> &&
+ ...)) &&
+ ((sizeof...(OtherIndexTypes)) == rank()),
+ reference>
+ operator()(OtherIndexTypes... indices) const {
+ return m_acc.access(m_ptr,
+ m_map(static_cast<index_type>(std::move(indices))...));
+ }
+#endif
+#endif
+
+ private:
+ // FIXME_CXX20: could use inline templated lambda in C++20 mode inside size()
+ template <size_t... Idxs>
+ KOKKOS_FUNCTION constexpr size_type size_impl(
+ std::index_sequence<Idxs...>) const noexcept {
+ // Note we restrict data_handle to be convertible to element_type* for now.
+ // This is also different from mdspan: mdspan can NOT be legally in a state
+ // where m_ptr is nullptr and the product of extents is non-zero
+ // The default constructor of mdspan is constrained to dynamic_rank > 0
+ // For View we do not have that constraint today
+ if (data_handle() == nullptr) return 0u;
+ return ((static_cast<size_type>(m_map.extents().extent(Idxs))) * ... *
+ size_type(1));
+ }
+
+ public:
+ KOKKOS_FUNCTION constexpr size_type size() const noexcept {
+ return size_impl(std::make_index_sequence<rank()>());
+ }
+
+ private:
+ // FIXME_CXX20: could use inline templated lambda in C++20 mode inside empty()
+ template <size_t... Idxs>
+ KOKKOS_FUNCTION constexpr bool empty_impl(
+ std::index_sequence<Idxs...>) const noexcept {
+ // Note we restrict data_handle to be convertible to element_type* for now.
+ // This is also different from mdspan: mdspan can NOT be legally in a state
+ // where m_ptr is nullptr and the product of extents is non-zero
+ // The default constructor of mdspan is constrained to dynamic_rank > 0
+ // For View we do not have that constraint today
+ if (data_handle() == nullptr) return true;
+ return (rank() > 0) &&
+ ((m_map.extents().extent(Idxs) == index_type(0)) || ... || false);
+ }
+
+ public:
+ [[nodiscard]] KOKKOS_FUNCTION constexpr bool empty() const noexcept {
+ return empty_impl(std::make_index_sequence<rank()>());
+ }
+
+ KOKKOS_FUNCTION friend constexpr void swap(BasicView &x,
+ BasicView &y) noexcept {
+ kokkos_swap(x.m_ptr, y.m_ptr);
+ kokkos_swap(x.m_map, y.m_map);
+ kokkos_swap(x.m_acc, y.m_acc);
+ }
+
+ KOKKOS_FUNCTION constexpr const extents_type &extents() const noexcept {
+ return m_map.extents();
+ };
+ KOKKOS_FUNCTION constexpr const data_handle_type &data_handle()
+ const noexcept {
+ return m_ptr;
+ };
+ KOKKOS_FUNCTION constexpr const mapping_type &mapping() const noexcept {
+ return m_map;
+ };
+ KOKKOS_FUNCTION constexpr const accessor_type &accessor() const noexcept {
+ return m_acc;
+ };
+
+ KOKKOS_FUNCTION static constexpr bool is_always_unique() noexcept {
+ return mapping_type::is_always_unique();
+ };
+ KOKKOS_FUNCTION static constexpr bool is_always_exhaustive() noexcept {
+ return mapping_type::is_always_exhaustive();
+ };
+ KOKKOS_FUNCTION static constexpr bool is_always_strided() noexcept {
+ return mapping_type::is_always_strided();
+ };
+
+ KOKKOS_FUNCTION constexpr bool is_unique() const {
+ return m_map.is_unique();
+ };
+ KOKKOS_FUNCTION constexpr bool is_exhaustive() const {
+ return m_map.is_exhaustive();
+ };
+ KOKKOS_FUNCTION constexpr bool is_strided() const {
+ return m_map.is_strided();
+ };
+ KOKKOS_FUNCTION constexpr index_type stride(rank_type r) const {
+ return m_map.stride(r);
+ };
+
+ protected:
+#ifndef __NVCC__
+ KOKKOS_IMPL_NO_UNIQUE_ADDRESS data_handle_type m_ptr{};
+ KOKKOS_IMPL_NO_UNIQUE_ADDRESS mapping_type m_map{};
+ KOKKOS_IMPL_NO_UNIQUE_ADDRESS accessor_type m_acc{};
+#else
+ data_handle_type m_ptr{};
+ mapping_type m_map{};
+ accessor_type m_acc{};
+#endif
+
+ template <class, class, class, class>
+ friend class BasicView;
+};
+} // namespace Kokkos::Impl
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_VIEW_ALLOC_HPP
+#define KOKKOS_VIEW_ALLOC_HPP
+
+#include <cstring>
+#include <type_traits>
+#include <string>
+#include <optional>
+
+#include <impl/Kokkos_Tools.hpp>
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <impl/Kokkos_ZeroMemset_fwd.hpp>
+
+namespace Kokkos::Impl {
+
+template <typename T>
+bool is_zero_byte(const T& x) {
+ constexpr std::byte all_zeroes[sizeof(T)] = {};
+ return std::memcmp(&x, all_zeroes, sizeof(T)) == 0;
+}
+
+template <class DeviceType, class ValueType>
+struct ViewValueFunctor {
+ using ExecSpace = typename DeviceType::execution_space;
+
+ struct DestroyTag {};
+ struct ConstructTag {};
+
+ ExecSpace space;
+ ValueType* ptr;
+ size_t n;
+ std::string name;
+ bool default_exec_space;
+
+ template <class SameValueType = ValueType>
+ KOKKOS_FUNCTION
+ std::enable_if_t<std::is_default_constructible_v<SameValueType>>
+ operator()(ConstructTag, const size_t i) const {
+ new (ptr + i) ValueType();
+ }
+
+ KOKKOS_FUNCTION void operator()(DestroyTag, const size_t i) const {
+ // When instantiating a View on host execution space with a host only
+ // destructor the workaround for CUDA device symbol instantiation tries to
+ // still compile a destruction kernel for the device, and issues a warning
+ // for host from host-device
+#ifdef KOKKOS_ENABLE_CUDA
+ if constexpr (std::is_same_v<ExecSpace, Cuda>) {
+ KOKKOS_IF_ON_DEVICE(((ptr + i)->~ValueType();))
+ } else {
+ KOKKOS_IF_ON_HOST(((ptr + i)->~ValueType();))
+ }
+#else
+ (ptr + i)->~ValueType();
+#endif
+ }
+
+ ViewValueFunctor() = default;
+ ViewValueFunctor(const ViewValueFunctor&) = default;
+ ViewValueFunctor& operator=(const ViewValueFunctor&) = default;
+
+ ViewValueFunctor(ExecSpace const& arg_space, ValueType* const arg_ptr,
+ size_t const arg_n, std::string arg_name)
+ : space(arg_space),
+ ptr(arg_ptr),
+ n(arg_n),
+ name(std::move(arg_name)),
+ default_exec_space(false) {
+ functor_instantiate_workaround();
+ }
+
+ ViewValueFunctor(ValueType* const arg_ptr, size_t const arg_n,
+ std::string arg_name)
+ : space(ExecSpace{}),
+ ptr(arg_ptr),
+ n(arg_n),
+ name(std::move(arg_name)),
+ default_exec_space(true) {
+ functor_instantiate_workaround();
+ }
+
+ template <typename Tag>
+ void parallel_for_implementation() {
+ using PolicyType =
+ Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<int64_t>, Tag>;
+ PolicyType policy(space, 0, n);
+ uint64_t kpID = 0;
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ const std::string functor_name =
+ (std::is_same_v<Tag, DestroyTag>
+ ? "Kokkos::View::destruction [" + name + "]"
+ : "Kokkos::View::initialization [" + name + "]");
+ Kokkos::Profiling::beginParallelFor(
+ functor_name, Kokkos::Profiling::Experimental::device_id(space),
+ &kpID);
+ }
+
+#ifdef KOKKOS_ENABLE_CUDA
+ if (std::is_same<ExecSpace, Kokkos::Cuda>::value) {
+ Kokkos::Impl::cuda_prefetch_pointer(space, ptr, sizeof(ValueType) * n,
+ true);
+ }
+#endif
+ const Kokkos::Impl::ParallelFor<ViewValueFunctor, PolicyType> closure(
+ *this, policy);
+ closure.execute();
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ Kokkos::Profiling::endParallelFor(kpID);
+ }
+ if (default_exec_space || std::is_same_v<Tag, DestroyTag>) {
+ space.fence(std::is_same_v<Tag, DestroyTag>
+ ? "Kokkos::View::destruction before deallocate"
+ : "Kokkos::View::initialization");
+ }
+ }
+
+ // Shortcut for zero initialization
+ void zero_memset_implementation() {
+ uint64_t kpID = 0;
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ // We are not really using parallel_for here but using beginParallelFor
+ // instead of begin_parallel_for (and adding "via memset") is the best
+ // we can do to indicate that this is not supposed to be tunable (and
+ // doesn't really execute a parallel_for).
+ Kokkos::Profiling::beginParallelFor(
+ "Kokkos::View::initialization [" + name + "] via memset",
+ Kokkos::Profiling::Experimental::device_id(space), &kpID);
+ }
+
+ (void)ZeroMemset(space, ptr, n * sizeof(ValueType));
+
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ Kokkos::Profiling::endParallelFor(kpID);
+ }
+ if (default_exec_space) {
+ space.fence("Kokkos::View::initialization via memset");
+ }
+ }
+
+ void construct_shared_allocation() {
+// On A64FX memset seems to do the wrong thing with regards to first touch
+// leading to the significant performance issues
+#ifndef KOKKOS_ARCH_A64FX
+ if constexpr (std::is_trivial_v<ValueType>) {
+ // value-initialization is equivalent to filling with zeros
+ zero_memset_implementation();
+ } else
+#endif
+ parallel_for_implementation<ConstructTag>();
+ }
+
+ void destroy_shared_allocation() {
+ if constexpr (std::is_trivially_destructible_v<ValueType>) {
+ // do nothing, don't bother calling the destructor
+ } else {
+#ifdef KOKKOS_ENABLE_IMPL_VIEW_OF_VIEWS_DESTRUCTOR_PRECONDITION_VIOLATION_WORKAROUND
+ if constexpr (std::is_same_v<typename ExecSpace::memory_space,
+ Kokkos::HostSpace>)
+ for (size_t i = 0; i < n; ++i) (ptr + i)->~ValueType();
+ else
+#endif
+ parallel_for_implementation<DestroyTag>();
+ }
+ }
+
+ // This function is to ensure that the functor with DestroyTag is instantiated
+ // This is a workaround to avoid "cudaErrorInvalidDeviceFunction" error later
+ // when the function is queried with cudaFuncGetAttributes
+ void functor_instantiate_workaround() {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
+ defined(KOKKOS_ENABLE_SYCL) || defined(KOKKOS_ENABLE_OPENMPTARGET)
+ if (false) {
+ parallel_for_implementation<DestroyTag>();
+ }
+#endif
+ }
+};
+
+template <class DeviceType, class ValueType>
+struct ViewValueFunctorSequentialHostInit {
+ using ExecSpace = typename DeviceType::execution_space;
+ using MemSpace = typename DeviceType::memory_space;
+ static_assert(SpaceAccessibility<HostSpace, MemSpace>::accessible);
+
+ ValueType* ptr;
+ size_t n;
+
+ ViewValueFunctorSequentialHostInit() = default;
+
+ ViewValueFunctorSequentialHostInit(ExecSpace const& /*arg_space*/,
+ ValueType* const arg_ptr,
+ size_t const arg_n,
+ std::string /*arg_name*/)
+ : ptr(arg_ptr), n(arg_n) {}
+
+ ViewValueFunctorSequentialHostInit(ValueType* const arg_ptr,
+ size_t const arg_n,
+ std::string /*arg_name*/)
+ : ptr(arg_ptr), n(arg_n) {}
+
+ void construct_shared_allocation() {
+ if constexpr (std::is_trivial_v<ValueType>) {
+ // value-initialization is equivalent to filling with zeros
+ std::memset(static_cast<void*>(ptr), 0, n * sizeof(ValueType));
+ } else {
+ for (size_t i = 0; i < n; ++i) {
+ new (ptr + i) ValueType();
+ }
+ }
+ }
+
+ void destroy_shared_allocation() {
+ if constexpr (std::is_trivially_destructible_v<ValueType>) {
+ // do nothing, don't bother calling the destructor
+ } else {
+ for (size_t i = 0; i < n; ++i) {
+ (ptr + i)->~ValueType();
+ }
+ }
+ }
+};
+
+template <class ElementType, class MemorySpace, class ExecutionSpace,
+ bool Initialize, bool SequentialInit>
+Kokkos::Impl::SharedAllocationRecord<void, void>* make_shared_allocation_record(
+ const size_t& required_span_size, std::string_view label,
+ const MemorySpace& memory_space,
+ const std::optional<ExecutionSpace> exec_space,
+ std::bool_constant<Initialize>, std::bool_constant<SequentialInit>) {
+ static_assert(SpaceAccessibility<ExecutionSpace, MemorySpace>::accessible);
+
+ // Use this for constructing and destroying the view
+ using device_type = Kokkos::Device<ExecutionSpace, MemorySpace>;
+ using functor_type = std::conditional_t<
+ SequentialInit,
+ ViewValueFunctorSequentialHostInit<device_type, ElementType>,
+ ViewValueFunctor<device_type, ElementType>>;
+ using record_type =
+ Kokkos::Impl::SharedAllocationRecord<MemorySpace, functor_type>;
+
+ /* Force alignment of allocations on on 8 byte boundaries even for
+ * element types smaller than 8 bytes */
+ static constexpr std::size_t align_mask = 0x7;
+
+ // Calculate the total size of the memory, in bytes, and make sure it is
+ // byte-aligned
+ const std::size_t alloc_size =
+ (required_span_size * sizeof(ElementType) + align_mask) & ~align_mask;
+
+ auto* record =
+ exec_space
+ ? record_type::allocate(*exec_space, memory_space, std::string{label},
+ alloc_size)
+ : record_type::allocate(memory_space, std::string{label}, alloc_size);
+
+ auto ptr = static_cast<ElementType*>(record->data());
+
+ auto functor =
+ exec_space ? functor_type(*exec_space, ptr, required_span_size,
+ std::string{label})
+ : functor_type(ptr, required_span_size, std::string{label});
+
+ // Only initialize if the allocation is non-zero.
+ // May be zero if one of the dimensions is zero.
+ if constexpr (Initialize) {
+ if (alloc_size) {
+ // Assume destruction is only required when construction is requested.
+ // The ViewValueFunctor has both value construction and destruction
+ // operators.
+ record->m_destroy = std::move(functor);
+
+ // Construct values
+ record->m_destroy.construct_shared_allocation();
+ }
+ }
+
+ return record;
+}
+
+} // namespace Kokkos::Impl
+
+#endif // KOKKOS_VIEW_ALLOC_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#ifndef KOKKOS_VIEWATOMIC_HPP
+#define KOKKOS_VIEWATOMIC_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Atomic.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// The following tag is used to prevent an implicit call of the constructor when
+// trying to assign a literal 0 int ( = 0 );
+struct AtomicViewConstTag {};
+
+template <class ViewTraits>
+class AtomicDataElement {
+ public:
+ using value_type = typename ViewTraits::value_type;
+ using const_value_type = typename ViewTraits::const_value_type;
+ using non_const_value_type = typename ViewTraits::non_const_value_type;
+ value_type* const ptr;
+
+ KOKKOS_INLINE_FUNCTION
+ AtomicDataElement(value_type* ptr_, AtomicViewConstTag) : ptr(ptr_) {}
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator=(const_value_type& val) const {
+ Kokkos::atomic_store(ptr, val);
+ return val;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ void inc() const { Kokkos::atomic_inc(ptr); }
+
+ KOKKOS_INLINE_FUNCTION
+ void dec() const { Kokkos::atomic_dec(ptr); }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator++() const {
+ const_value_type tmp =
+ Kokkos::atomic_fetch_add(ptr, non_const_value_type(1));
+ return tmp + 1;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator--() const {
+ const_value_type tmp =
+ Kokkos::atomic_fetch_sub(ptr, non_const_value_type(1));
+ return tmp - 1;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator++(int) const {
+ return Kokkos::atomic_fetch_add(ptr, non_const_value_type(1));
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator--(int) const {
+ return Kokkos::atomic_fetch_sub(ptr, non_const_value_type(1));
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator+=(const_value_type& val) const {
+ const_value_type tmp = Kokkos::atomic_fetch_add(ptr, val);
+ return tmp + val;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator-=(const_value_type& val) const {
+ const_value_type tmp = Kokkos::atomic_fetch_sub(ptr, val);
+ return tmp - val;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator*=(const_value_type& val) const {
+ return Kokkos::atomic_mul_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator/=(const_value_type& val) const {
+ return Kokkos::atomic_div_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator%=(const_value_type& val) const {
+ return Kokkos::atomic_mod_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator&=(const_value_type& val) const {
+ return Kokkos::atomic_and_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator^=(const_value_type& val) const {
+ return Kokkos::atomic_xor_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator|=(const_value_type& val) const {
+ return Kokkos::atomic_or_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator<<=(const_value_type& val) const {
+ return Kokkos::atomic_lshift_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator>>=(const_value_type& val) const {
+ return Kokkos::atomic_rshift_fetch(ptr, val);
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator+(const_value_type& val) const { return *ptr + val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator-(const_value_type& val) const { return *ptr - val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator*(const_value_type& val) const { return *ptr * val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator/(const_value_type& val) const { return *ptr / val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator%(const_value_type& val) const { return *ptr ^ val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator!() const { return !*ptr; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator&&(const_value_type& val) const {
+ return *ptr && val;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator||(const_value_type& val) const {
+ return *ptr | val;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator&(const_value_type& val) const { return *ptr & val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator|(const_value_type& val) const { return *ptr | val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator^(const_value_type& val) const { return *ptr ^ val; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator~() const { return ~*ptr; }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator<<(const unsigned int& val) const {
+ return *ptr << val;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ const_value_type operator>>(const unsigned int& val) const {
+ return *ptr >> val;
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ bool operator==(const AtomicDataElement& val) const { return *ptr == val; }
+
+ KOKKOS_INLINE_FUNCTION
+ bool operator!=(const AtomicDataElement& val) const { return *ptr != val; }
+
+ KOKKOS_INLINE_FUNCTION
+ bool operator>=(const_value_type& val) const { return *ptr >= val; }
+
+ KOKKOS_INLINE_FUNCTION
+ bool operator<=(const_value_type& val) const { return *ptr <= val; }
+
+ KOKKOS_INLINE_FUNCTION
+ bool operator<(const_value_type& val) const { return *ptr < val; }
+
+ KOKKOS_INLINE_FUNCTION
+ bool operator>(const_value_type& val) const { return *ptr > val; }
+
+ KOKKOS_INLINE_FUNCTION
+ operator value_type() const { return Kokkos::atomic_load(ptr); }
+};
+
+template <class ViewTraits>
+class AtomicViewDataHandle {
+ public:
+ typename ViewTraits::value_type* ptr;
+
+ KOKKOS_INLINE_FUNCTION
+ AtomicViewDataHandle() : ptr(nullptr) {}
+
+ KOKKOS_INLINE_FUNCTION
+ AtomicViewDataHandle(typename ViewTraits::value_type* ptr_) : ptr(ptr_) {}
+
+ template <class iType>
+ KOKKOS_INLINE_FUNCTION AtomicDataElement<ViewTraits> operator[](
+ const iType& i) const {
+ return AtomicDataElement<ViewTraits>(ptr + i, AtomicViewConstTag());
+ }
+
+ KOKKOS_INLINE_FUNCTION
+ operator typename ViewTraits::value_type *() const { return ptr; }
+};
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_EXPERIMENTAL_IMPL_VIEW_CTOR_PROP_HPP
+#define KOKKOS_EXPERIMENTAL_IMPL_VIEW_CTOR_PROP_HPP
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+struct SequentialHostInit_t {};
+struct WithoutInitializing_t {};
+struct AllowPadding_t {};
+
+template <typename>
+struct is_view_ctor_property : public std::false_type {};
+
+template <>
+struct is_view_ctor_property<SequentialHostInit_t> : public std::true_type {};
+
+template <>
+struct is_view_ctor_property<WithoutInitializing_t> : public std::true_type {};
+
+template <>
+struct is_view_ctor_property<AllowPadding_t> : public std::true_type {};
+
+//----------------------------------------------------------------------------
+/**\brief Whether a type can be used for a view label */
+
+template <typename>
+struct is_view_label : public std::false_type {};
+
+template <>
+struct is_view_label<std::string> : public std::true_type {};
+
+template <unsigned N>
+struct is_view_label<char[N]> : public std::true_type {};
+
+template <unsigned N>
+struct is_view_label<const char[N]> : public std::true_type {};
+
+//----------------------------------------------------------------------------
+
+template <typename... P>
+struct ViewCtorProp;
+
+// Forward declare
+template <typename Specialize, typename T>
+struct CommonViewAllocProp;
+
+/* Dummy to allow for empty ViewCtorProp object
+ */
+template <>
+struct ViewCtorProp<void> {};
+
+/* Common value_type stored as ViewCtorProp
+ */
+template <typename Specialize, typename T>
+struct ViewCtorProp<void, CommonViewAllocProp<Specialize, T>> {
+ ViewCtorProp() = default;
+ ViewCtorProp(const ViewCtorProp &) = default;
+ ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+ using type = CommonViewAllocProp<Specialize, T>;
+
+ KOKKOS_FUNCTION
+ ViewCtorProp(const type &arg) : value(arg) {}
+ KOKKOS_FUNCTION
+ ViewCtorProp(type &&arg) : value(arg) {}
+
+ type value;
+};
+
+/* Property flags have constexpr value */
+template <typename P>
+struct ViewCtorProp<std::enable_if_t<std::is_same_v<P, AllowPadding_t> ||
+ std::is_same_v<P, WithoutInitializing_t> ||
+ std::is_same_v<P, SequentialHostInit_t>>,
+ P> {
+ ViewCtorProp() = default;
+ ViewCtorProp(const ViewCtorProp &) = default;
+ ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+ using type = P;
+
+ ViewCtorProp(const type &) {}
+
+ type value = type();
+};
+
+/* Map input label type to std::string */
+template <typename Label>
+struct ViewCtorProp<std::enable_if_t<is_view_label<Label>::value>, Label> {
+ ViewCtorProp() = default;
+ ViewCtorProp(const ViewCtorProp &) = default;
+ ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+ using type = std::string;
+
+ ViewCtorProp(const type &arg) : value(arg) {}
+ ViewCtorProp(type &&arg) : value(std::move(arg)) {}
+
+ type value;
+};
+
+template <typename Space>
+struct ViewCtorProp<std::enable_if_t<Kokkos::is_memory_space<Space>::value ||
+ Kokkos::is_execution_space<Space>::value>,
+ Space> {
+ ViewCtorProp() = default;
+ ViewCtorProp(const ViewCtorProp &) = default;
+ ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+ using type = Space;
+
+ ViewCtorProp(const type &arg) : value(arg) {}
+
+ type value;
+};
+
+template <typename T>
+struct ViewCtorProp<void, T *> {
+ ViewCtorProp() = default;
+ ViewCtorProp(const ViewCtorProp &) = default;
+ ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+ using type = T *;
+
+ KOKKOS_FUNCTION
+ ViewCtorProp(const type arg) : value(arg) {}
+
+ type value;
+};
+
+// For some reason I don't understand I needed this specialization explicitly
+// for NVCC/MSVC
+template <typename T>
+struct ViewCtorProp<T *> : public ViewCtorProp<void, T *> {
+ static constexpr bool has_memory_space = false;
+ static constexpr bool has_execution_space = false;
+ static constexpr bool has_pointer = true;
+ static constexpr bool has_label = false;
+ static constexpr bool allow_padding = false;
+ static constexpr bool initialize = true;
+
+ using memory_space = void;
+ using execution_space = void;
+ using pointer_type = T *;
+
+ KOKKOS_FUNCTION ViewCtorProp(const pointer_type arg)
+ : ViewCtorProp<void, pointer_type>(arg) {}
+};
+
+// If we use `ViewCtorProp<Args...>` and `ViewCtorProp<void, Args>...` directly
+// in the parameter lists and base class initializers, respectively, as far as
+// we can tell MSVC 16.5.5+CUDA 10.2 thinks that `ViewCtorProp` refers to the
+// current instantiation, not the template itself, and gets all kinds of
+// confused. To work around this, we just use a couple of alias templates that
+// amount to the same thing.
+template <typename... Args>
+using view_ctor_prop_args = ViewCtorProp<Args...>;
+
+template <typename Arg>
+using view_ctor_prop_base = ViewCtorProp<void, Arg>;
+
+template <typename... P>
+struct ViewCtorProp : public ViewCtorProp<void, P>... {
+ private:
+ using var_memory_space =
+ Kokkos::Impl::has_condition<void, Kokkos::is_memory_space, P...>;
+
+ using var_execution_space =
+ Kokkos::Impl::has_condition<void, Kokkos::is_execution_space, P...>;
+
+ struct VOIDDUMMY {};
+
+ using var_pointer =
+ Kokkos::Impl::has_condition<VOIDDUMMY, std::is_pointer, P...>;
+
+ public:
+ /* Flags for the common properties */
+ static constexpr bool has_memory_space = var_memory_space::value;
+ static constexpr bool has_execution_space = var_execution_space::value;
+ static constexpr bool has_pointer = var_pointer::value;
+ static constexpr bool has_label =
+ Kokkos::Impl::has_type<std::string, P...>::value;
+ static constexpr bool allow_padding =
+ Kokkos::Impl::has_type<AllowPadding_t, P...>::value;
+ static constexpr bool initialize =
+ !Kokkos::Impl::has_type<WithoutInitializing_t, P...>::value;
+ static constexpr bool sequential_host_init =
+ Kokkos::Impl::has_type<SequentialHostInit_t, P...>::value;
+ static_assert(initialize || !sequential_host_init,
+ "Incompatible WithoutInitializing and SequentialHostInit view "
+ "alloc properties");
+
+ using memory_space = typename var_memory_space::type;
+ using execution_space = typename var_execution_space::type;
+ using pointer_type = typename var_pointer::type;
+
+ // Construct from a matching argument list.
+ //
+ // Note that if P is empty, this constructor is the default constructor.
+ // On the other hand, if P is not empty, the constraint implies that
+ // there is no default constructor.
+ template <typename... Args,
+ typename = std::enable_if_t<std::conjunction_v<
+ std::is_constructible<view_ctor_prop_base<P>, Args &&>...>>>
+ ViewCtorProp(Args &&...args)
+ : ViewCtorProp<void, P>(std::forward<Args>(args))... {}
+
+ template <typename... Args>
+ KOKKOS_FUNCTION ViewCtorProp(pointer_type arg0, Args const &...args)
+ : ViewCtorProp<void, pointer_type>(arg0),
+ ViewCtorProp<void, typename ViewCtorProp<void, Args>::type>(args)... {}
+
+ /* Copy from a matching property subset */
+ KOKKOS_FUNCTION ViewCtorProp(pointer_type arg0)
+ : ViewCtorProp<void, pointer_type>(arg0) {}
+
+ // If we use `ViewCtorProp<Args...>` and `ViewCtorProp<void, Args>...` here
+ // directly, MSVC 16.5.5+CUDA 10.2 appears to think that `ViewCtorProp` refers
+ // to the current instantiation, not the template itself, and gets all kinds
+ // of confused. To work around this, we just use a couple of alias templates
+ // that amount to the same thing.
+ template <typename... Args>
+ ViewCtorProp(view_ctor_prop_args<Args...> const &arg)
+ : view_ctor_prop_base<Args>(
+ static_cast<view_ctor_prop_base<Args> const &>(arg))... {
+ // Suppress an unused argument warning that (at least at one point) would
+ // show up if sizeof...(Args) == 0
+ (void)arg;
+ }
+};
+
+#if !defined(KOKKOS_COMPILER_MSVC) || !defined(KOKKOS_COMPILER_NVCC)
+template <typename... P>
+auto with_properties_if_unset(const ViewCtorProp<P...> &view_ctor_prop) {
+ return view_ctor_prop;
+}
+
+template <typename... P, typename Property, typename... Properties>
+auto with_properties_if_unset(const ViewCtorProp<P...> &view_ctor_prop,
+ [[maybe_unused]] const Property &property,
+ const Properties &...properties) {
+ if constexpr ((is_execution_space<Property>::value &&
+ !ViewCtorProp<P...>::has_execution_space) ||
+ (is_memory_space<Property>::value &&
+ !ViewCtorProp<P...>::has_memory_space) ||
+ (is_view_label<Property>::value &&
+ !ViewCtorProp<P...>::has_label) ||
+ (std::is_same_v<Property, WithoutInitializing_t> &&
+ ViewCtorProp<P...>::initialize) ||
+ (std::is_same_v<Property, SequentialHostInit_t> &&
+ !ViewCtorProp<P...>::sequential_host_init)) {
+ using NewViewCtorProp = ViewCtorProp<P..., Property>;
+ NewViewCtorProp new_view_ctor_prop(view_ctor_prop);
+ static_cast<ViewCtorProp<void, Property> &>(new_view_ctor_prop).value =
+ property;
+ return with_properties_if_unset(new_view_ctor_prop, properties...);
+ } else
+ return with_properties_if_unset(view_ctor_prop, properties...);
+
+// A workaround placed to prevent spurious "missing return statement at the
+// end of non-void function" warnings from CUDA builds (issue #5470). Because
+// KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK removes [[noreturn]] attribute from
+// cuda_abort(), an unreachable while(true); is placed as a fallback method
+#if (defined(KOKKOS_COMPILER_NVCC) && (KOKKOS_COMPILER_NVCC < 1150)) || \
+ (defined(KOKKOS_COMPILER_INTEL) && (KOKKOS_COMPILER_INTEL <= 2100))
+ Kokkos::abort(
+ "Prevents an incorrect warning: missing return statement at end of "
+ "non-void function");
+#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ while (true)
+ ;
+#endif
+#endif
+}
+#else
+
+template <class ViewCtorP, class... Properties>
+struct WithPropertiesIfUnset;
+
+template <class ViewCtorP>
+struct WithPropertiesIfUnset<ViewCtorP> {
+ static constexpr auto apply_prop(const ViewCtorP &view_ctor_prop) {
+ return view_ctor_prop;
+ }
+};
+
+template <class... P, class Property, class... Properties>
+struct WithPropertiesIfUnset<ViewCtorProp<P...>, Property, Properties...> {
+ static constexpr auto apply_prop(const ViewCtorProp<P...> &view_ctor_prop,
+ const Property &prop,
+ const Properties &...properties) {
+ if constexpr ((is_execution_space<Property>::value &&
+ !ViewCtorProp<P...>::has_execution_space) ||
+ (is_memory_space<Property>::value &&
+ !ViewCtorProp<P...>::has_memory_space) ||
+ (is_view_label<Property>::value &&
+ !ViewCtorProp<P...>::has_label) ||
+ (std::is_same_v<Property, WithoutInitializing_t> &&
+ ViewCtorProp<P...>::initialize) ||
+ (std::is_same_v<Property, SequentialHostInit_t> &&
+ !ViewCtorProp<P...>::sequential_host_init)) {
+ using NewViewCtorProp = ViewCtorProp<P..., Property>;
+ NewViewCtorProp new_view_ctor_prop(view_ctor_prop);
+ static_cast<ViewCtorProp<void, Property> &>(new_view_ctor_prop).value =
+ prop;
+ return WithPropertiesIfUnset<NewViewCtorProp, Properties...>::apply_prop(
+ new_view_ctor_prop, properties...);
+ } else
+ return WithPropertiesIfUnset<ViewCtorProp<P...>,
+ Properties...>::apply_prop(view_ctor_prop,
+ properties...);
+ }
+};
+
+template <typename... P, class... Properties>
+auto with_properties_if_unset(const ViewCtorProp<P...> &view_ctor_prop,
+ const Properties &...properties) {
+ return WithPropertiesIfUnset<ViewCtorProp<P...>, Properties...>::apply_prop(
+ view_ctor_prop, properties...);
+}
+
+#endif
+
+struct ExecutionSpaceTag {};
+struct MemorySpaceTag {};
+struct LabelTag {};
+struct PointerTag {};
+
+template <typename Tag, typename... P>
+KOKKOS_FUNCTION const auto &get_property(
+ const ViewCtorProp<P...> &view_ctor_prop) {
+ if constexpr (std::is_same_v<Tag, ExecutionSpaceTag>) {
+ static_assert(ViewCtorProp<P...>::has_execution_space);
+ using execution_space_type = typename ViewCtorProp<P...>::execution_space;
+ return static_cast<const ViewCtorProp<void, execution_space_type> &>(
+ view_ctor_prop)
+ .value;
+ } else if constexpr (std::is_same_v<Tag, MemorySpaceTag>) {
+ static_assert(ViewCtorProp<P...>::has_memory_space);
+ using memory_space_type = typename ViewCtorProp<P...>::memory_space;
+ return static_cast<const ViewCtorProp<void, memory_space_type> &>(
+ view_ctor_prop)
+ .value;
+ } else if constexpr (std::is_same_v<Tag, LabelTag>) {
+ static_assert(ViewCtorProp<P...>::has_label);
+ return static_cast<const ViewCtorProp<void, std::string> &>(view_ctor_prop)
+ .value;
+ } else if constexpr (std::is_same_v<Tag, PointerTag>) {
+ static_assert(ViewCtorProp<P...>::has_pointer);
+ using pointer_type = typename ViewCtorProp<P...>::pointer_type;
+ return static_cast<const ViewCtorProp<void, pointer_type> &>(view_ctor_prop)
+ .value;
+ } else {
+ static_assert(std::is_same_v<Tag, void>, "Invalid property tag!");
+ return view_ctor_prop;
+ }
+
+// A workaround placed to prevent spurious "missing return statement at the
+// end of non-void function" warnings from CUDA builds (issue #5470). Because
+// KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK removes [[noreturn]] attribute from
+// cuda_abort(), an unreachable while(true); is placed as a fallback method
+#if (defined(KOKKOS_COMPILER_NVCC) && (KOKKOS_COMPILER_NVCC < 1150)) || \
+ (defined(KOKKOS_COMPILER_INTEL) && (KOKKOS_COMPILER_INTEL <= 2100))
+ Kokkos::abort(
+ "Prevents an incorrect warning: missing return statement at end of "
+ "non-void function");
+#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ while (true)
+ ;
+#endif
+#endif
+}
+#if defined(KOKKOS_COMPILER_NVCC) && (KOKKOS_COMPILER_NVCC < 1150)
+// pragma pop is getting a warning from the underlying GCC
+// for unknown pragma if -pedantic is used
+#ifdef __CUDA_ARCH__
+#pragma pop
+#endif
+#endif
+#ifdef KOKKOS_IMPL_INTEL_BOGUS_MISSING_RETURN_STATEMENT_AT_END_OF_NON_VOID_FUNCTION
+#pragma warning(pop)
+#undef KOKKOS_IMPL_INTEL_BOGUS_MISSING_RETURN_STATEMENT_AT_END_OF_NON_VOID_FUNCTION
+#endif
+
+template <typename Tag, typename... P>
+KOKKOS_FUNCTION auto &get_property(ViewCtorProp<P...> &view_ctor_prop) {
+ // Avoid code duplication by deferring to the const-qualified overload and
+ // casting the const away from the return type
+ const auto &tmp = get_property<Tag>(
+ static_cast<const ViewCtorProp<P...> &>(view_ctor_prop));
+ return const_cast<std::decay_t<decltype(tmp)> &>(tmp);
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+struct ViewAllocateWithoutInitializingBackwardCompat {};
+
+template <>
+struct ViewCtorProp<void, ViewAllocateWithoutInitializingBackwardCompat> {};
+
+// NOTE This specialization is meant to be used as the
+// ViewAllocateWithoutInitializing alias below. All it does is add a
+// constructor that takes the label as single argument.
+template <>
+struct ViewCtorProp<WithoutInitializing_t, std::string,
+ ViewAllocateWithoutInitializingBackwardCompat>
+ : ViewCtorProp<WithoutInitializing_t, std::string>,
+ ViewCtorProp<void, ViewAllocateWithoutInitializingBackwardCompat> {
+ ViewCtorProp(std::string label)
+ : ViewCtorProp<WithoutInitializing_t, std::string>(
+ WithoutInitializing_t(), std::move(label)) {}
+};
+} /* namespace Impl */
+
+using ViewAllocateWithoutInitializing =
+ Impl::ViewCtorProp<Impl::WithoutInitializing_t, std::string,
+ Impl::ViewAllocateWithoutInitializingBackwardCompat>;
+
+inline constexpr Kokkos::Impl::SequentialHostInit_t SequentialHostInit{};
+
+inline constexpr Kokkos::Impl::WithoutInitializing_t WithoutInitializing{};
+
+inline constexpr Kokkos::Impl::AllowPadding_t AllowPadding{};
+
+/** \brief Create View allocation parameter bundle from argument list.
+ *
+ * Valid argument list members are:
+ * 1) label as a "string" or std::string
+ * 2) memory space instance of the View::memory_space type
+ * 3) execution space instance compatible with the View::memory_space
+ * 4) Kokkos::WithoutInitializing to bypass initialization
+ * 4) Kokkos::AllowPadding to allow allocation to pad dimensions for memory
+ * alignment
+ */
+template <class... Args>
+auto view_alloc(Args &&...args) {
+ using return_type = Impl::ViewCtorProp<typename Impl::ViewCtorProp<
+ void, Kokkos::Impl::remove_cvref_t<Args>>::type...>;
+
+ static_assert(!return_type::has_pointer,
+ "Cannot give pointer-to-memory for view allocation");
+
+ return return_type(std::forward<Args>(args)...);
+}
+
+template <class... Args>
+KOKKOS_INLINE_FUNCTION
+ Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>
+ view_wrap(Args const &...args) {
+ using return_type =
+ Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>;
+
+ static_assert(!return_type::has_memory_space &&
+ !return_type::has_execution_space &&
+ !return_type::has_label && return_type::has_pointer,
+ "Must only give pointer-to-memory for view wrapping");
+
+ return return_type(args...);
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_VIEW_DATA_ANALYSIS_HPP
+#define KOKKOS_VIEW_DATA_ANALYSIS_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos::Impl {
+
+template <unsigned I, size_t... Args>
+struct variadic_size_t {
+ enum : size_t { value = KOKKOS_INVALID_INDEX };
+};
+
+template <size_t Val, size_t... Args>
+struct variadic_size_t<0, Val, Args...> {
+ enum : size_t { value = Val };
+};
+
+template <unsigned I, size_t Val, size_t... Args>
+struct variadic_size_t<I, Val, Args...> {
+ enum : size_t { value = variadic_size_t<I - 1, Args...>::value };
+};
+
+template <size_t... Args>
+struct rank_dynamic;
+
+template <>
+struct rank_dynamic<> {
+ enum : unsigned { value = 0 };
+};
+
+template <size_t Val, size_t... Args>
+struct rank_dynamic<Val, Args...> {
+ enum : unsigned { value = (Val == 0 ? 1 : 0) + rank_dynamic<Args...>::value };
+};
+
+#define KOKKOS_IMPL_VIEW_DIMENSION(R) \
+ template <size_t V, unsigned> \
+ struct ViewDimension##R { \
+ static constexpr size_t ArgN##R = (V != KOKKOS_INVALID_INDEX ? V : 1); \
+ static constexpr size_t N##R = (V != KOKKOS_INVALID_INDEX ? V : 1); \
+ KOKKOS_INLINE_FUNCTION explicit ViewDimension##R(size_t) {} \
+ ViewDimension##R() = default; \
+ ViewDimension##R(const ViewDimension##R&) = default; \
+ ViewDimension##R& operator=(const ViewDimension##R&) = default; \
+ }; \
+ template <size_t V, unsigned RD> \
+ constexpr size_t ViewDimension##R<V, RD>::ArgN##R; \
+ template <size_t V, unsigned RD> \
+ constexpr size_t ViewDimension##R<V, RD>::N##R; \
+ template <unsigned RD> \
+ struct ViewDimension##R<0u, RD> { \
+ static constexpr size_t ArgN##R = 0; \
+ std::conditional_t<(RD < 3), size_t, unsigned> N##R; \
+ ViewDimension##R() = default; \
+ ViewDimension##R(const ViewDimension##R&) = default; \
+ ViewDimension##R& operator=(const ViewDimension##R&) = default; \
+ KOKKOS_INLINE_FUNCTION explicit ViewDimension##R(size_t V) : N##R(V) {} \
+ }; \
+ template <unsigned RD> \
+ constexpr size_t ViewDimension##R<0u, RD>::ArgN##R;
+
+KOKKOS_IMPL_VIEW_DIMENSION(0)
+KOKKOS_IMPL_VIEW_DIMENSION(1)
+KOKKOS_IMPL_VIEW_DIMENSION(2)
+KOKKOS_IMPL_VIEW_DIMENSION(3)
+KOKKOS_IMPL_VIEW_DIMENSION(4)
+KOKKOS_IMPL_VIEW_DIMENSION(5)
+KOKKOS_IMPL_VIEW_DIMENSION(6)
+KOKKOS_IMPL_VIEW_DIMENSION(7)
+
+#undef KOKKOS_IMPL_VIEW_DIMENSION
+
+// MSVC does not do empty base class optimization by default.
+// Per standard it is required for standard layout types
+template <size_t... Vals>
+struct KOKKOS_IMPL_ENFORCE_EMPTY_BASE_OPTIMIZATION ViewDimension
+ : public ViewDimension0<variadic_size_t<0u, Vals...>::value,
+ rank_dynamic<Vals...>::value>,
+ public ViewDimension1<variadic_size_t<1u, Vals...>::value,
+ rank_dynamic<Vals...>::value>,
+ public ViewDimension2<variadic_size_t<2u, Vals...>::value,
+ rank_dynamic<Vals...>::value>,
+ public ViewDimension3<variadic_size_t<3u, Vals...>::value,
+ rank_dynamic<Vals...>::value>,
+ public ViewDimension4<variadic_size_t<4u, Vals...>::value,
+ rank_dynamic<Vals...>::value>,
+ public ViewDimension5<variadic_size_t<5u, Vals...>::value,
+ rank_dynamic<Vals...>::value>,
+ public ViewDimension6<variadic_size_t<6u, Vals...>::value,
+ rank_dynamic<Vals...>::value>,
+ public ViewDimension7<variadic_size_t<7u, Vals...>::value,
+ rank_dynamic<Vals...>::value> {
+ using D0 = ViewDimension0<variadic_size_t<0U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+ using D1 = ViewDimension1<variadic_size_t<1U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+ using D2 = ViewDimension2<variadic_size_t<2U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+ using D3 = ViewDimension3<variadic_size_t<3U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+ using D4 = ViewDimension4<variadic_size_t<4U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+ using D5 = ViewDimension5<variadic_size_t<5U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+ using D6 = ViewDimension6<variadic_size_t<6U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+ using D7 = ViewDimension7<variadic_size_t<7U, Vals...>::value,
+ rank_dynamic<Vals...>::value>;
+
+ using D0::ArgN0;
+ using D1::ArgN1;
+ using D2::ArgN2;
+ using D3::ArgN3;
+ using D4::ArgN4;
+ using D5::ArgN5;
+ using D6::ArgN6;
+ using D7::ArgN7;
+
+ using D0::N0;
+ using D1::N1;
+ using D2::N2;
+ using D3::N3;
+ using D4::N4;
+ using D5::N5;
+ using D6::N6;
+ using D7::N7;
+
+ static constexpr unsigned rank = sizeof...(Vals);
+ static constexpr unsigned rank_dynamic = Impl::rank_dynamic<Vals...>::value;
+
+ ViewDimension() = default;
+ ViewDimension(const ViewDimension&) = default;
+ ViewDimension& operator=(const ViewDimension&) = default;
+
+ KOKKOS_INLINE_FUNCTION
+ constexpr ViewDimension(size_t n0, size_t n1, size_t n2, size_t n3, size_t n4,
+ size_t n5, size_t n6, size_t n7)
+ : D0(n0 == KOKKOS_INVALID_INDEX ? 1 : n0),
+ D1(n1 == KOKKOS_INVALID_INDEX ? 1 : n1),
+ D2(n2 == KOKKOS_INVALID_INDEX ? 1 : n2),
+ D3(n3 == KOKKOS_INVALID_INDEX ? 1 : n3),
+ D4(n4 == KOKKOS_INVALID_INDEX ? 1 : n4),
+ D5(n5 == KOKKOS_INVALID_INDEX ? 1 : n5),
+ D6(n6 == KOKKOS_INVALID_INDEX ? 1 : n6),
+ D7(n7 == KOKKOS_INVALID_INDEX ? 1 : n7) {}
+
+ KOKKOS_INLINE_FUNCTION
+ constexpr size_t extent(const unsigned r) const noexcept {
+ return r == 0
+ ? N0
+ : (r == 1
+ ? N1
+ : (r == 2
+ ? N2
+ : (r == 3
+ ? N3
+ : (r == 4
+ ? N4
+ : (r == 5
+ ? N5
+ : (r == 6
+ ? N6
+ : (r == 7 ? N7
+ : 0)))))));
+ }
+
+ static KOKKOS_INLINE_FUNCTION constexpr size_t static_extent(
+ const unsigned r) noexcept {
+ return r == 0
+ ? ArgN0
+ : (r == 1
+ ? ArgN1
+ : (r == 2
+ ? ArgN2
+ : (r == 3
+ ? ArgN3
+ : (r == 4
+ ? ArgN4
+ : (r == 5
+ ? ArgN5
+ : (r == 6
+ ? ArgN6
+ : (r == 7 ? ArgN7
+ : 0)))))));
+ }
+
+ template <size_t N>
+ struct prepend {
+ using type = ViewDimension<N, Vals...>;
+ };
+
+ template <size_t N>
+ struct append {
+ using type = ViewDimension<Vals..., N>;
+ };
+};
+
+template <class A, class B>
+struct ViewDimensionJoin;
+
+template <size_t... A, size_t... B>
+struct ViewDimensionJoin<ViewDimension<A...>, ViewDimension<B...>> {
+ using type = ViewDimension<A..., B...>;
+};
+
+//----------------------------------------------------------------------------
+
+template <class DstDim, class SrcDim>
+struct ViewDimensionAssignable;
+
+template <size_t... DstArgs, size_t... SrcArgs>
+struct ViewDimensionAssignable<ViewDimension<DstArgs...>,
+ ViewDimension<SrcArgs...>> {
+ using dst = ViewDimension<DstArgs...>;
+ using src = ViewDimension<SrcArgs...>;
+
+ enum {
+ value = unsigned(dst::rank) == unsigned(src::rank) &&
+ (
+ // Compile time check that potential static dimensions match
+ ((1 > dst::rank_dynamic && 1 > src::rank_dynamic)
+ ? (size_t(dst::ArgN0) == size_t(src::ArgN0))
+ : true) &&
+ ((2 > dst::rank_dynamic && 2 > src::rank_dynamic)
+ ? (size_t(dst::ArgN1) == size_t(src::ArgN1))
+ : true) &&
+ ((3 > dst::rank_dynamic && 3 > src::rank_dynamic)
+ ? (size_t(dst::ArgN2) == size_t(src::ArgN2))
+ : true) &&
+ ((4 > dst::rank_dynamic && 4 > src::rank_dynamic)
+ ? (size_t(dst::ArgN3) == size_t(src::ArgN3))
+ : true) &&
+ ((5 > dst::rank_dynamic && 5 > src::rank_dynamic)
+ ? (size_t(dst::ArgN4) == size_t(src::ArgN4))
+ : true) &&
+ ((6 > dst::rank_dynamic && 6 > src::rank_dynamic)
+ ? (size_t(dst::ArgN5) == size_t(src::ArgN5))
+ : true) &&
+ ((7 > dst::rank_dynamic && 7 > src::rank_dynamic)
+ ? (size_t(dst::ArgN6) == size_t(src::ArgN6))
+ : true) &&
+ ((8 > dst::rank_dynamic && 8 > src::rank_dynamic)
+ ? (size_t(dst::ArgN7) == size_t(src::ArgN7))
+ : true))
+ };
+};
+
+/** \brief Given a value type and dimension generate the View data type */
+template <class T, class Dim>
+struct ViewDataType;
+
+template <class T>
+struct ViewDataType<T, ViewDimension<>> {
+ using type = T;
+};
+
+template <class T, size_t... Args>
+struct ViewDataType<T, ViewDimension<0, Args...>> {
+ using type = typename ViewDataType<T*, ViewDimension<Args...>>::type;
+};
+
+template <class T, size_t N, size_t... Args>
+struct ViewDataType<T, ViewDimension<N, Args...>> {
+ using type = typename ViewDataType<T, ViewDimension<Args...>>::type[N];
+};
+
+/**\brief Analysis of View data type.
+ *
+ * Data type conforms to one of the following patterns :
+ * {const} value_type [][#][#][#]
+ * {const} value_type ***[#][#][#]
+ * Where the sum of counts of '*' and '[#]' is at most ten.
+ *
+ * Provide alias for ViewDimension<...> and value_type.
+ */
+template <class T>
+struct ViewArrayAnalysis {
+ using value_type = T;
+ using const_value_type = std::add_const_t<T>;
+ using non_const_value_type = std::remove_const_t<T>;
+ using static_dimension = ViewDimension<>;
+ using dynamic_dimension = ViewDimension<>;
+ using dimension = ViewDimension<>;
+};
+
+template <class T, size_t N>
+struct ViewArrayAnalysis<T[N]> {
+ private:
+ using nested = ViewArrayAnalysis<T>;
+
+ public:
+ using value_type = typename nested::value_type;
+ using const_value_type = typename nested::const_value_type;
+ using non_const_value_type = typename nested::non_const_value_type;
+
+ using static_dimension =
+ typename nested::static_dimension::template prepend<N>::type;
+
+ using dynamic_dimension = typename nested::dynamic_dimension;
+
+ using dimension =
+ typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
+};
+
+template <class T>
+struct ViewArrayAnalysis<T[]> {
+ private:
+ using nested = ViewArrayAnalysis<T>;
+ using nested_dimension = typename nested::dimension;
+
+ public:
+ using value_type = typename nested::value_type;
+ using const_value_type = typename nested::const_value_type;
+ using non_const_value_type = typename nested::non_const_value_type;
+
+ using dynamic_dimension =
+ typename nested::dynamic_dimension::template prepend<0>::type;
+
+ using static_dimension = typename nested::static_dimension;
+
+ using dimension =
+ typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
+};
+
+template <class T>
+struct ViewArrayAnalysis<T*> {
+ private:
+ using nested = ViewArrayAnalysis<T>;
+
+ public:
+ using value_type = typename nested::value_type;
+ using const_value_type = typename nested::const_value_type;
+ using non_const_value_type = typename nested::non_const_value_type;
+
+ using dynamic_dimension =
+ typename nested::dynamic_dimension::template prepend<0>::type;
+
+ using static_dimension = typename nested::static_dimension;
+
+ using dimension =
+ typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
+};
+
+template <class DataType, class ArrayLayout, class ValueType>
+struct ViewDataAnalysis {
+ private:
+ using array_analysis = ViewArrayAnalysis<DataType>;
+
+ // ValueType is opportunity for partial specialization.
+ // Must match array analysis when this default template is used.
+ static_assert(
+ std::is_same_v<ValueType, typename array_analysis::non_const_value_type>);
+
+ public:
+ using specialize = void; // No specialization
+
+ using dimension = typename array_analysis::dimension;
+ using value_type = typename array_analysis::value_type;
+ using const_value_type = typename array_analysis::const_value_type;
+ using non_const_value_type = typename array_analysis::non_const_value_type;
+
+ // Generate analogous multidimensional array specification type.
+ using type = typename ViewDataType<value_type, dimension>::type;
+ using const_type = typename ViewDataType<const_value_type, dimension>::type;
+ using non_const_type =
+ typename ViewDataType<non_const_value_type, dimension>::type;
+
+ // Generate "flattened" multidimensional array specification type.
+ using scalar_array_type = type;
+ using const_scalar_array_type = const_type;
+ using non_const_scalar_array_type = non_const_type;
+};
+
+template <class Dimension, class Layout, class Enable = void>
+struct ViewOffset {
+ using is_mapping_plugin = std::false_type;
+};
+} // namespace Kokkos::Impl
+
+#endif // KOKKOS_VIEW_DATA_ANALYSIS_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
static_assert(false,
"Including non-public Kokkos header files is not allowed.");
-#else
-KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
-#endif
#endif
-#ifndef KOKKOS_VIEW_HPP
-#define KOKKOS_VIEW_HPP
+#ifndef KOKKOS_VIEWLEGACY_HPP
+#define KOKKOS_VIEWLEGACY_HPP
#include <type_traits>
#include <string>
#include <View/Hooks/Kokkos_ViewHooks.hpp>
#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_Utilities.hpp>
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class DataType>
-struct ViewArrayAnalysis;
-
-template <class DataType, class ArrayLayout,
- typename ValueType =
- typename ViewArrayAnalysis<DataType>::non_const_value_type>
-struct ViewDataAnalysis;
-
-template <class, class...>
-class ViewMapping {
- public:
- enum : bool { is_assignable_data_type = false };
- enum : bool { is_assignable = false };
-};
-
-template <typename IntType>
-constexpr KOKKOS_INLINE_FUNCTION std::size_t count_valid_integers(
- const IntType i0, const IntType i1, const IntType i2, const IntType i3,
- const IntType i4, const IntType i5, const IntType i6, const IntType i7) {
- static_assert(std::is_integral<IntType>::value,
- "count_valid_integers() must have integer arguments.");
-
- return (i0 != KOKKOS_INVALID_INDEX) + (i1 != KOKKOS_INVALID_INDEX) +
- (i2 != KOKKOS_INVALID_INDEX) + (i3 != KOKKOS_INVALID_INDEX) +
- (i4 != KOKKOS_INVALID_INDEX) + (i5 != KOKKOS_INVALID_INDEX) +
- (i6 != KOKKOS_INVALID_INDEX) + (i7 != KOKKOS_INVALID_INDEX);
-}
-
-KOKKOS_INLINE_FUNCTION
-void runtime_check_rank(const size_t rank, const size_t dyn_rank,
- const bool is_void_spec, const size_t i0,
- const size_t i1, const size_t i2, const size_t i3,
- const size_t i4, const size_t i5, const size_t i6,
- const size_t i7, const std::string& label) {
- (void)(label);
-
- if (is_void_spec) {
- const size_t num_passed_args =
- count_valid_integers(i0, i1, i2, i3, i4, i5, i6, i7);
-
- if (num_passed_args != dyn_rank && num_passed_args != rank) {
- KOKKOS_IF_ON_HOST(
- const std::string message =
- "Constructor for Kokkos View '" + label +
- "' has mismatched number of arguments. Number of arguments = " +
- std::to_string(num_passed_args) +
- " but dynamic rank = " + std::to_string(dyn_rank) + " \n";
- Kokkos::abort(message.c_str());)
- KOKKOS_IF_ON_DEVICE(Kokkos::abort("Constructor for Kokkos View has "
- "mismatched number of arguments.");)
- }
- }
-}
-
-} /* namespace Impl */
-} /* namespace Kokkos */
+#ifdef KOKKOS_ENABLE_IMPL_MDSPAN
+#include <View/MDSpan/Kokkos_MDSpan_Extents.hpp>
+#include <View/MDSpan/Kokkos_MDSpan_Layout.hpp>
+#include <View/MDSpan/Kokkos_MDSpan_Accessor.hpp>
+#endif
+#include <Kokkos_MinMax.hpp>
-// Class to provide a uniform type
-namespace Kokkos {
-namespace Impl {
-template <class ViewType, int Traits = 0>
-struct ViewUniformType;
-}
-} // namespace Kokkos
+#include <View/Kokkos_ViewTraits.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
-/** \class ViewTraits
- * \brief Traits class for accessing attributes of a View.
- *
- * This is an implementation detail of View. It is only of interest
- * to developers implementing a new specialization of View.
- *
- * Template argument options:
- * - View< DataType >
- * - View< DataType , Space >
- * - View< DataType , Space , MemoryTraits >
- * - View< DataType , ArrayLayout >
- * - View< DataType , ArrayLayout , Space >
- * - View< DataType , ArrayLayout , MemoryTraits >
- * - View< DataType , ArrayLayout , Space , MemoryTraits >
- * - View< DataType , MemoryTraits >
- */
-
-template <class DataType, class... Properties>
-struct ViewTraits;
-
-template <>
-struct ViewTraits<void> {
- using execution_space = void;
- using memory_space = void;
- using HostMirrorSpace = void;
- using array_layout = void;
- using memory_traits = void;
- using specialize = void;
- using hooks_policy = void;
-};
-
-template <class... Prop>
-struct ViewTraits<void, void, Prop...> {
- // Ignore an extraneous 'void'
- using execution_space = typename ViewTraits<void, Prop...>::execution_space;
- using memory_space = typename ViewTraits<void, Prop...>::memory_space;
- using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
- using array_layout = typename ViewTraits<void, Prop...>::array_layout;
- using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
- using specialize = typename ViewTraits<void, Prop...>::specialize;
- using hooks_policy = typename ViewTraits<void, Prop...>::hooks_policy;
-};
-
-template <class HooksPolicy, class... Prop>
-struct ViewTraits<
- std::enable_if_t<Kokkos::Experimental::is_hooks_policy<HooksPolicy>::value>,
- HooksPolicy, Prop...> {
- using execution_space = typename ViewTraits<void, Prop...>::execution_space;
- using memory_space = typename ViewTraits<void, Prop...>::memory_space;
- using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
- using array_layout = typename ViewTraits<void, Prop...>::array_layout;
- using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
- using specialize = typename ViewTraits<void, Prop...>::specialize;
- using hooks_policy = HooksPolicy;
-};
-
-template <class ArrayLayout, class... Prop>
-struct ViewTraits<std::enable_if_t<Kokkos::is_array_layout<ArrayLayout>::value>,
- ArrayLayout, Prop...> {
- // Specify layout, keep subsequent space and memory traits arguments
-
- using execution_space = typename ViewTraits<void, Prop...>::execution_space;
- using memory_space = typename ViewTraits<void, Prop...>::memory_space;
- using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
- using array_layout = ArrayLayout;
- using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
- using specialize = typename ViewTraits<void, Prop...>::specialize;
- using hooks_policy = typename ViewTraits<void, Prop...>::hooks_policy;
-};
-
-template <class Space, class... Prop>
-struct ViewTraits<std::enable_if_t<Kokkos::is_space<Space>::value>, Space,
- Prop...> {
- // Specify Space, memory traits should be the only subsequent argument.
-
- static_assert(
- std::is_same<typename ViewTraits<void, Prop...>::execution_space,
- void>::value &&
- std::is_same<typename ViewTraits<void, Prop...>::memory_space,
- void>::value &&
- std::is_same<typename ViewTraits<void, Prop...>::HostMirrorSpace,
- void>::value &&
- std::is_same<typename ViewTraits<void, Prop...>::array_layout,
- void>::value,
- "Only one View Execution or Memory Space template argument");
-
- using execution_space = typename Space::execution_space;
- using memory_space = typename Space::memory_space;
- using HostMirrorSpace =
- typename Kokkos::Impl::HostMirror<Space>::Space::memory_space;
- using array_layout = typename execution_space::array_layout;
- using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
- using specialize = typename ViewTraits<void, Prop...>::specialize;
- using hooks_policy = typename ViewTraits<void, Prop...>::hooks_policy;
-};
-
-template <class MemoryTraits, class... Prop>
-struct ViewTraits<
- std::enable_if_t<Kokkos::is_memory_traits<MemoryTraits>::value>,
- MemoryTraits, Prop...> {
- // Specify memory trait, should not be any subsequent arguments
-
- static_assert(
- std::is_same<typename ViewTraits<void, Prop...>::execution_space,
- void>::value &&
- std::is_same<typename ViewTraits<void, Prop...>::memory_space,
- void>::value &&
- std::is_same<typename ViewTraits<void, Prop...>::array_layout,
- void>::value &&
- std::is_same<typename ViewTraits<void, Prop...>::memory_traits,
- void>::value &&
- std::is_same<typename ViewTraits<void, Prop...>::hooks_policy,
- void>::value,
- "MemoryTrait is the final optional template argument for a View");
-
- using execution_space = void;
- using memory_space = void;
- using HostMirrorSpace = void;
- using array_layout = void;
- using memory_traits = MemoryTraits;
- using specialize = void;
- using hooks_policy = void;
-};
-
-template <class DataType, class... Properties>
-struct ViewTraits {
- private:
- // Unpack the properties arguments
- using prop = ViewTraits<void, Properties...>;
-
- using ExecutionSpace =
- std::conditional_t<!std::is_void<typename prop::execution_space>::value,
- typename prop::execution_space,
- Kokkos::DefaultExecutionSpace>;
-
- using MemorySpace =
- std::conditional_t<!std::is_void<typename prop::memory_space>::value,
- typename prop::memory_space,
- typename ExecutionSpace::memory_space>;
-
- using ArrayLayout =
- std::conditional_t<!std::is_void<typename prop::array_layout>::value,
- typename prop::array_layout,
- typename ExecutionSpace::array_layout>;
-
- using HostMirrorSpace = std::conditional_t<
- !std::is_void<typename prop::HostMirrorSpace>::value,
- typename prop::HostMirrorSpace,
- typename Kokkos::Impl::HostMirror<ExecutionSpace>::Space>;
-
- using MemoryTraits =
- std::conditional_t<!std::is_void<typename prop::memory_traits>::value,
- typename prop::memory_traits,
- typename Kokkos::MemoryManaged>;
-
- using HooksPolicy =
- std::conditional_t<!std::is_void<typename prop::hooks_policy>::value,
- typename prop::hooks_policy,
- Kokkos::Experimental::DefaultViewHooks>;
-
- // Analyze data type's properties,
- // May be specialized based upon the layout and value type
- using data_analysis = Kokkos::Impl::ViewDataAnalysis<DataType, ArrayLayout>;
-
- public:
- //------------------------------------
- // Data type traits:
-
- using data_type = typename data_analysis::type;
- using const_data_type = typename data_analysis::const_type;
- using non_const_data_type = typename data_analysis::non_const_type;
-
- //------------------------------------
- // Compatible array of trivial type traits:
-
- using scalar_array_type = typename data_analysis::scalar_array_type;
- using const_scalar_array_type =
- typename data_analysis::const_scalar_array_type;
- using non_const_scalar_array_type =
- typename data_analysis::non_const_scalar_array_type;
-
- //------------------------------------
- // Value type traits:
-
- using value_type = typename data_analysis::value_type;
- using const_value_type = typename data_analysis::const_value_type;
- using non_const_value_type = typename data_analysis::non_const_value_type;
-
- //------------------------------------
- // Mapping traits:
-
- using array_layout = ArrayLayout;
- using dimension = typename data_analysis::dimension;
-
- using specialize = std::conditional_t<
- std::is_void<typename data_analysis::specialize>::value,
- typename prop::specialize,
- typename data_analysis::specialize>; /* mapping specialization tag */
-
- enum { rank = dimension::rank };
- enum { rank_dynamic = dimension::rank_dynamic };
-
- //------------------------------------
- // Execution space, memory space, memory access traits, and host mirror space.
-
- using execution_space = ExecutionSpace;
- using memory_space = MemorySpace;
- using device_type = Kokkos::Device<ExecutionSpace, MemorySpace>;
- using memory_traits = MemoryTraits;
- using host_mirror_space = HostMirrorSpace;
- using hooks_policy = HooksPolicy;
-
- using size_type = typename MemorySpace::size_type;
-
- enum { is_hostspace = std::is_same<MemorySpace, HostSpace>::value };
- enum { is_managed = MemoryTraits::is_unmanaged == 0 };
- enum { is_random_access = MemoryTraits::is_random_access == 1 };
-
- //------------------------------------
-};
-
/** \class View
* \brief View to an array of data.
*
std::remove_reference_t<View1>,
std::remove_const_t<std::remove_reference_t<View2>>>;
-#ifdef KOKKOS_ENABLE_CXX17
template <class T1, class T2>
inline constexpr bool is_always_assignable_v =
is_always_assignable<T1, T2>::value;
-#endif
template <class... ViewTDst, class... ViewTSrc>
constexpr bool is_assignable(const Kokkos::View<ViewTDst...>& dst,
Kokkos::Impl::ViewMapping<DstTraits, SrcTraits,
typename DstTraits::specialize>;
-#ifdef KOKKOS_ENABLE_CXX17
return is_always_assignable_v<Kokkos::View<ViewTDst...>,
Kokkos::View<ViewTSrc...>> ||
-#else
- return is_always_assignable<Kokkos::View<ViewTDst...>,
- Kokkos::View<ViewTSrc...>>::value ||
-#endif
(mapping_type::is_assignable &&
((DstTraits::dimension::rank_dynamic >= 1) ||
(dst.static_extent(0) == src.extent(0))) &&
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-#include <impl/Kokkos_ViewMapping.hpp>
-#include <impl/Kokkos_ViewArray.hpp>
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-
-inline constexpr Kokkos::Impl::ALL_t ALL = Kokkos::Impl::ALL_t();
-
-inline constexpr Kokkos::Impl::WithoutInitializing_t WithoutInitializing =
- Kokkos::Impl::WithoutInitializing_t();
-
-inline constexpr Kokkos::Impl::AllowPadding_t AllowPadding =
- Kokkos::Impl::AllowPadding_t();
-
-/** \brief Create View allocation parameter bundle from argument list.
- *
- * Valid argument list members are:
- * 1) label as a "string" or std::string
- * 2) memory space instance of the View::memory_space type
- * 3) execution space instance compatible with the View::memory_space
- * 4) Kokkos::WithoutInitializing to bypass initialization
- * 4) Kokkos::AllowPadding to allow allocation to pad dimensions for memory
- * alignment
- */
-template <class... Args>
-inline Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>
-view_alloc(Args const&... args) {
- using return_type =
- Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>;
-
- static_assert(!return_type::has_pointer,
- "Cannot give pointer-to-memory for view allocation");
-
- return return_type(args...);
-}
-
-template <class... Args>
-KOKKOS_INLINE_FUNCTION
- Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>
- view_wrap(Args const&... args) {
- using return_type =
- Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>;
-
- static_assert(!return_type::has_memory_space &&
- !return_type::has_execution_space &&
- !return_type::has_label && return_type::has_pointer,
- "Must only give pointer-to-memory for view wrapping");
-
- return return_type(args...);
-}
-
-} /* namespace Kokkos */
+#include <View/Kokkos_ViewMapping.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
template <class D, class... P>
struct is_view<const View<D, P...>> : public std::true_type {};
+template <class T>
+inline constexpr bool is_view_v = is_view<T>::value;
+
template <class DataType, class... Properties>
class View : public ViewTraits<DataType, Properties...> {
private:
typename traits::device_type, typename traits::hooks_policy,
typename traits::memory_traits>;
- /** \brief Compatible HostMirror view */
- using HostMirror =
+ /** \brief Compatible host mirror view */
+ using host_mirror_type =
View<typename traits::non_const_data_type, typename traits::array_layout,
Device<DefaultHostExecutionSpace,
typename traits::host_mirror_space::memory_space>,
typename traits::hooks_policy>;
- /** \brief Compatible HostMirror view */
- using host_mirror_type =
- View<typename traits::non_const_data_type, typename traits::array_layout,
- typename traits::host_mirror_space, typename traits::hooks_policy>;
+ /** \brief Compatible host mirror view */
+ using HostMirror = host_mirror_type;
/** \brief Unified types */
using uniform_type = typename Impl::ViewUniformType<View, 0>::type;
using uniform_runtime_const_nomemspace_type =
typename Impl::ViewUniformType<View, 0>::runtime_const_nomemspace_type;
+ using reference_type = typename map_type::reference_type;
+ using pointer_type = typename map_type::pointer_type;
+
+ // Typedefs from mdspan
+ // using extents_type -> not applicable
+ // Defining layout_type here made MSVC+CUDA fail
+ // using layout_type = typename traits::array_layout;
+ // using accessor_type -> not applicable
+ // using mapping_type -> not applicable
+ using element_type = typename traits::value_type;
+ // using value_type -> conflicts with traits::value_type
+ using index_type = typename traits::memory_space::size_type;
+ // using size_type -> already from traits::size_type; where it is
+ // memory_space::size_type
+ using rank_type = size_t;
+ using data_handle_type = pointer_type;
+ using reference = reference_type;
+
//----------------------------------------
// Domain rank and extents
- enum { Rank = map_type::Rank };
-
- /** \brief rank() to be implemented
- */
- // KOKKOS_INLINE_FUNCTION
- // static
- // constexpr unsigned rank() { return map_type::Rank; }
+ static constexpr Impl::integral_constant<size_t, traits::dimension::rank>
+ rank = {};
+ static constexpr Impl::integral_constant<size_t,
+ traits::dimension::rank_dynamic>
+ rank_dynamic = {};
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ enum {Rank KOKKOS_DEPRECATED_WITH_COMMENT("Use rank instead.") =
+ map_type::Rank};
+#endif
template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, size_t>
+ KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<std::is_integral_v<iType>,
+ size_t>
extent(const iType& r) const noexcept {
return m_map.extent(r);
}
}
template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, int>
+ KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<std::is_integral_v<iType>,
+ int>
extent_int(const iType& r) const noexcept {
return static_cast<int>(m_map.extent(r));
}
}
template <typename iType>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
- std::is_integral<iType>::value, size_t>
+ KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<std::is_integral_v<iType>,
+ size_t>
stride(iType r) const {
return (
r == 0
//----------------------------------------
// Range span is the span which contains all members.
- using reference_type = typename map_type::reference_type;
- using pointer_type = typename map_type::pointer_type;
-
enum {
reference_type_is_lvalue_reference =
- std::is_lvalue_reference<reference_type>::value
+ std::is_lvalue_reference_v<reference_type>
};
KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_map.span(); }
private:
static constexpr bool is_layout_left =
- std::is_same<typename traits::array_layout, Kokkos::LayoutLeft>::value;
+ std::is_same_v<typename traits::array_layout, Kokkos::LayoutLeft>;
static constexpr bool is_layout_right =
- std::is_same<typename traits::array_layout, Kokkos::LayoutRight>::value;
+ std::is_same_v<typename traits::array_layout, Kokkos::LayoutRight>;
static constexpr bool is_layout_stride =
- std::is_same<typename traits::array_layout, Kokkos::LayoutStride>::value;
+ std::is_same_v<typename traits::array_layout, Kokkos::LayoutStride>;
static constexpr bool is_default_map =
- std::is_void<typename traits::specialize>::value &&
+ std::is_void_v<typename traits::specialize> &&
(is_layout_left || is_layout_right || is_layout_stride);
#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
template <typename... Is>
static KOKKOS_FUNCTION void check_access_member_function_valid_args(Is...) {
- static_assert(Rank <= sizeof...(Is), "");
- static_assert(sizeof...(Is) <= 8, "");
- static_assert(Kokkos::Impl::are_integral<Is...>::value, "");
+ static_assert(rank <= sizeof...(Is));
+ static_assert(sizeof...(Is) <= 8);
+ static_assert(Kokkos::Impl::are_integral<Is...>::value);
}
template <typename... Is>
static KOKKOS_FUNCTION void check_operator_parens_valid_args(Is...) {
- static_assert(Rank == sizeof...(Is), "");
- static_assert(Kokkos::Impl::are_integral<Is...>::value, "");
+ static_assert(rank == sizeof...(Is));
+ static_assert(Kokkos::Impl::are_integral<Is...>::value);
}
public:
template <typename I0>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0>::value && //
- (1 == Rank) && is_default_map && !is_layout_stride),
+ (1 == rank) && is_default_map && !is_layout_stride),
reference_type>
operator()(I0 i0) const {
check_operator_parens_valid_args(i0);
template <typename I0>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0>::value && //
- (1 == Rank) && is_default_map && is_layout_stride),
+ (1 == rank) && is_default_map && is_layout_stride),
reference_type>
operator()(I0 i0) const {
check_operator_parens_valid_args(i0);
template <typename I0>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- ((1 == Rank) && Kokkos::Impl::are_integral<I0>::value && !is_default_map),
+ ((1 == rank) && Kokkos::Impl::are_integral<I0>::value && !is_default_map),
reference_type>
operator[](I0 i0) const {
KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0)
template <typename I0>
KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<((1 == Rank) && Kokkos::Impl::are_integral<I0>::value &&
+ std::enable_if_t<((1 == rank) && Kokkos::Impl::are_integral<I0>::value &&
is_default_map && !is_layout_stride),
reference_type>
operator[](I0 i0) const {
template <typename I0>
KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<((1 == Rank) && Kokkos::Impl::are_integral<I0>::value &&
+ std::enable_if_t<((1 == rank) && Kokkos::Impl::are_integral<I0>::value &&
is_default_map && is_layout_stride),
reference_type>
operator[](I0 i0) const {
// Rank 2 default map operator()
template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value && //
- (2 == Rank) && is_default_map && is_layout_left &&
- (traits::rank_dynamic == 0)),
- reference_type>
- operator()(I0 i0, I1 i1) const {
- check_operator_parens_valid_args(i0, i1);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
- return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_dim.N0 * i1];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value && //
- (2 == Rank) && is_default_map && is_layout_left &&
- (traits::rank_dynamic != 0)),
- reference_type>
- operator()(I0 i0, I1 i1) const {
- check_operator_parens_valid_args(i0, i1);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
- return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_stride * i1];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value && //
- (2 == Rank) && is_default_map && is_layout_right &&
- (traits::rank_dynamic == 0)),
- reference_type>
- operator()(I0 i0, I1 i1) const {
- check_operator_parens_valid_args(i0, i1);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
- return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_dim.N1 * i0];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value && //
- (2 == Rank) && is_default_map && is_layout_right &&
- (traits::rank_dynamic != 0)),
- reference_type>
- operator()(I0 i0, I1 i1) const {
- check_operator_parens_valid_args(i0, i1);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
- return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_stride * i0];
- }
-
- template <typename I0, typename I1>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value && //
- (2 == Rank) && is_default_map && is_layout_stride),
- reference_type>
- operator()(I0 i0, I1 i1) const {
+ KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+ (Kokkos::Impl::always_true<I0, I1>::value && //
+ (2 == rank) && is_default_map &&
+ (is_layout_left || is_layout_right || is_layout_stride)),
+ reference_type>
+ operator()(I0 i0, I1 i1) const {
check_operator_parens_valid_args(i0, i1);
KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
- return m_map.m_impl_handle[i0 * m_map.m_impl_offset.m_stride.S0 +
- i1 * m_map.m_impl_offset.m_stride.S1];
+ if constexpr (is_layout_left) {
+ if constexpr (rank_dynamic == 0)
+ return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_dim.N0 * i1];
+ else
+ return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_stride * i1];
+ } else if constexpr (is_layout_right) {
+ if constexpr (rank_dynamic == 0)
+ return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_dim.N1 * i0];
+ else
+ return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_stride * i0];
+ } else {
+ static_assert(is_layout_stride);
+ return m_map.m_impl_handle[i0 * m_map.m_impl_offset.m_stride.S0 +
+ i1 * m_map.m_impl_offset.m_stride.S1];
+ }
+#if defined KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
}
// Rank 0 -> 8 operator() except for rank-1 and rank-2 with default map which
template <typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
(Kokkos::Impl::always_true<Is...>::value && //
- (2 != Rank) && (1 != Rank) && (0 != Rank) && is_default_map),
+ (2 != rank) && (1 != rank) && (0 != rank) && is_default_map),
reference_type>
operator()(Is... indices) const {
check_operator_parens_valid_args(indices...);
template <typename... Is>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<Is...>::value && //
- ((0 == Rank) || !is_default_map)),
+ ((0 == rank) || !is_default_map)),
reference_type>
operator()(Is... indices) const {
check_operator_parens_valid_args(indices...);
template <typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::always_true<Is...>::value && (0 == Rank)), reference_type>
+ (Kokkos::Impl::always_true<Is...>::value && (0 == rank)), reference_type>
access(Is... extra) const {
check_access_member_function_valid_args(extra...);
KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, extra...)
template <typename I0, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, Is...>::value &&
- (1 == Rank) && !is_default_map),
+ (1 == rank) && !is_default_map),
reference_type>
access(I0 i0, Is... extra) const {
check_access_member_function_valid_args(i0, extra...);
template <typename I0, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, Is...>::value &&
- (1 == Rank) && is_default_map && !is_layout_stride),
+ (1 == rank) && is_default_map && !is_layout_stride),
reference_type>
access(I0 i0, Is... extra) const {
check_access_member_function_valid_args(i0, extra...);
template <typename I0, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, Is...>::value &&
- (1 == Rank) && is_default_map && is_layout_stride),
+ (1 == rank) && is_default_map && is_layout_stride),
reference_type>
access(I0 i0, Is... extra) const {
check_access_member_function_valid_args(i0, extra...);
template <typename I0, typename I1, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, Is...>::value &&
- (2 == Rank) && !is_default_map),
+ (2 == rank) && !is_default_map),
reference_type>
access(I0 i0, I1 i1, Is... extra) const {
check_access_member_function_valid_args(i0, i1, extra...);
template <typename I0, typename I1, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
- is_default_map && is_layout_left && (traits::rank_dynamic == 0)),
- reference_type>
- access(I0 i0, I1 i1, Is... extra) const {
- check_access_member_function_valid_args(i0, i1, extra...);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
- return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_dim.N0 * i1];
- }
-
- template <typename I0, typename I1, typename... Is>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
- is_default_map && is_layout_left && (traits::rank_dynamic != 0)),
- reference_type>
- access(I0 i0, I1 i1, Is... extra) const {
- check_access_member_function_valid_args(i0, i1, extra...);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
- return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_stride * i1];
- }
-
- template <typename I0, typename I1, typename... Is>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
- is_default_map && is_layout_right && (traits::rank_dynamic == 0)),
- reference_type>
- access(I0 i0, I1 i1, Is... extra) const {
- check_access_member_function_valid_args(i0, i1, extra...);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
- return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_dim.N1 * i0];
- }
-
- template <typename I0, typename I1, typename... Is>
- KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
- is_default_map && is_layout_right && (traits::rank_dynamic != 0)),
+ (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == rank) &&
+ is_default_map &&
+ (is_layout_left || is_layout_right || is_layout_stride)),
reference_type>
access(I0 i0, I1 i1, Is... extra) const {
check_access_member_function_valid_args(i0, i1, extra...);
KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
- return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_stride * i0];
- }
-
- template <typename I0, typename I1, typename... Is>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, Is...>::value &&
- (2 == Rank) && is_default_map && is_layout_stride),
- reference_type>
- access(I0 i0, I1 i1, Is... extra) const {
- check_access_member_function_valid_args(i0, i1, extra...);
- KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
- return m_map.m_impl_handle[i0 * m_map.m_impl_offset.m_stride.S0 +
- i1 * m_map.m_impl_offset.m_stride.S1];
+ if constexpr (is_layout_left) {
+ if constexpr (rank_dynamic == 0)
+ return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_dim.N0 * i1];
+ else
+ return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_stride * i1];
+ } else if constexpr (is_layout_right) {
+ if constexpr (rank_dynamic == 0)
+ return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_dim.N1 * i0];
+ else
+ return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_stride * i0];
+ } else {
+ static_assert(is_layout_stride);
+ return m_map.m_impl_handle[i0 * m_map.m_impl_offset.m_stride.S0 +
+ i1 * m_map.m_impl_offset.m_stride.S1];
+ }
+#if defined KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
}
//------------------------------
template <typename I0, typename I1, typename I2, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, Is...>::value &&
- (3 == Rank) && is_default_map),
+ (3 == rank) && is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, extra...);
template <typename I0, typename I1, typename I2, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, Is...>::value &&
- (3 == Rank) && !is_default_map),
+ (3 == rank) && !is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, extra...);
template <typename I0, typename I1, typename I2, typename I3, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::always_true<I0, I1, I2, I3, Is...>::value && (4 == Rank) &&
+ (Kokkos::Impl::always_true<I0, I1, I2, I3, Is...>::value && (4 == rank) &&
is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, Is... extra) const {
template <typename I0, typename I1, typename I2, typename I3, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- (Kokkos::Impl::always_true<I0, I1, I2, I3, Is...>::value && (4 == Rank) &&
+ (Kokkos::Impl::always_true<I0, I1, I2, I3, Is...>::value && (4 == rank) &&
!is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, Is... extra) const {
typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, Is...>::value &&
- (5 == Rank) && is_default_map),
+ (5 == rank) && is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, i3, i4, extra...);
typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, Is...>::value &&
- (5 == Rank) && !is_default_map),
+ (5 == rank) && !is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, i3, i4, extra...);
typename I5, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, Is...>::value &&
- (6 == Rank) && is_default_map),
+ (6 == rank) && is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, extra...);
typename I5, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, Is...>::value &&
- (6 == Rank) && !is_default_map),
+ (6 == rank) && !is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, extra...);
typename I5, typename I6, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6, Is...>::value &&
- (7 == Rank) && is_default_map),
+ (7 == rank) && is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, i6,
typename I5, typename I6, typename... Is>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6, Is...>::value &&
- (7 == Rank) && !is_default_map),
+ (7 == rank) && !is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, Is... extra) const {
check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, i6,
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6,
I7, Is...>::value &&
- (8 == Rank) && is_default_map),
+ (8 == rank) && is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, I7 i7,
Is... extra) const {
KOKKOS_FORCEINLINE_FUNCTION
std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6,
I7, Is...>::value &&
- (8 == Rank) && !is_default_map),
+ (8 == rank) && !is_default_map),
reference_type>
access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, I7 i7,
Is... extra) const {
.template get_label<typename traits::memory_space>();
}
- private:
- enum class check_input_args : bool { yes = true, no = false };
-
public:
//----------------------------------------
// Allocation according to allocation properties and array layout
explicit inline View(
const Impl::ViewCtorProp<P...>& arg_prop,
std::enable_if_t<!Impl::ViewCtorProp<P...>::has_pointer,
- typename traits::array_layout> const& arg_layout,
- check_input_args check_args = check_input_args::no)
+ typename traits::array_layout> const& arg_layout)
: m_track(), m_map() {
- // Append layout and spaces if not input
- using alloc_prop_input = Impl::ViewCtorProp<P...>;
-
- // use 'std::integral_constant<unsigned,I>' for non-types
- // to avoid duplicate class error.
- using alloc_prop = Impl::ViewCtorProp<
- P...,
- std::conditional_t<alloc_prop_input::has_label,
- std::integral_constant<unsigned int, 0>,
- std::string>,
- std::conditional_t<alloc_prop_input::has_memory_space,
- std::integral_constant<unsigned int, 1>,
- typename traits::device_type::memory_space>,
- std::conditional_t<alloc_prop_input::has_execution_space,
- std::integral_constant<unsigned int, 2>,
- typename traits::device_type::execution_space>>;
+ // Copy the input allocation properties with possibly defaulted properties
+ // We need to split it in two to avoid MSVC compiler errors
+ auto prop_copy_tmp =
+ Impl::with_properties_if_unset(arg_prop, std::string{});
+ auto prop_copy = Impl::with_properties_if_unset(
+ prop_copy_tmp, typename traits::device_type::memory_space{},
+ typename traits::device_type::execution_space{});
+ using alloc_prop = decltype(prop_copy);
static_assert(traits::is_managed,
"View allocation constructor requires managed memory");
"execution space");
}
- // Copy the input allocation properties with possibly defaulted properties
- alloc_prop prop_copy(arg_prop);
-
- if (check_args == check_input_args::yes) {
+#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ if constexpr (std::is_same_v<typename traits::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename traits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename traits::array_layout,
+ Kokkos::LayoutStride>) {
size_t i0 = arg_layout.dimension[0];
size_t i1 = arg_layout.dimension[1];
size_t i2 = arg_layout.dimension[2];
size_t i7 = arg_layout.dimension[7];
const std::string& alloc_name =
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
- prop_copy)
- .value;
+ Impl::get_property<Impl::LabelTag>(prop_copy);
Impl::runtime_check_rank(
- traits::rank, traits::rank_dynamic,
- std::is_same<typename traits::specialize, void>::value, i0, i1, i2,
- i3, i4, i5, i6, i7, alloc_name);
- }
-
-//------------------------------------------------------------
-#if defined(KOKKOS_ENABLE_CUDA)
- // If allocating in CudaUVMSpace must fence before and after
- // the allocation to protect against possible concurrent access
- // on the CPU and the GPU.
- // Fence using the trait's execution space (which will be Kokkos::Cuda)
- // to avoid incomplete type errors from using Kokkos::Cuda directly.
- if (std::is_same<Kokkos::CudaUVMSpace,
- typename traits::device_type::memory_space>::value) {
- typename traits::device_type::memory_space::execution_space().fence(
- "Kokkos::View<...>::View: fence before allocating UVM");
+ *this, std::is_same<typename traits::specialize, void>::value, i0, i1,
+ i2, i3, i4, i5, i6, i7, alloc_name.c_str());
}
#endif
- //------------------------------------------------------------
Kokkos::Impl::SharedAllocationRecord<>* record = m_map.allocate_shared(
prop_copy, arg_layout, Impl::ViewCtorProp<P...>::has_execution_space);
-//------------------------------------------------------------
-#if defined(KOKKOS_ENABLE_CUDA)
- if (std::is_same<Kokkos::CudaUVMSpace,
- typename traits::device_type::memory_space>::value) {
- typename traits::device_type::memory_space::execution_space().fence(
- "Kokkos::View<...>::View: fence after allocating UVM");
- }
-#endif
- //------------------------------------------------------------
-
// Setup and initialization complete, start tracking
m_track.m_tracker.assign_allocated_record_to_uninitialized(record);
}
explicit KOKKOS_INLINE_FUNCTION View(
const Impl::ViewCtorProp<P...>& arg_prop,
std::enable_if_t<Impl::ViewCtorProp<P...>::has_pointer,
- typename traits::array_layout> const& arg_layout,
- check_input_args /*ignored*/ = check_input_args::no) // Not checking
+ typename traits::array_layout> const& arg_layout)
: m_track() // No memory tracking
,
m_map(arg_prop, arg_layout) {
typename Impl::ViewCtorProp<P...>::pointer_type>::value,
"Constructing View to wrap user memory must supply matching pointer "
"type");
+
+#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+ if constexpr (std::is_same_v<typename traits::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename traits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename traits::array_layout,
+ Kokkos::LayoutStride>) {
+ size_t i0 = arg_layout.dimension[0];
+ size_t i1 = arg_layout.dimension[1];
+ size_t i2 = arg_layout.dimension[2];
+ size_t i3 = arg_layout.dimension[3];
+ size_t i4 = arg_layout.dimension[4];
+ size_t i5 = arg_layout.dimension[5];
+ size_t i6 = arg_layout.dimension[6];
+ size_t i7 = arg_layout.dimension[7];
+
+ Impl::runtime_check_rank(
+ *this, std::is_same<typename traits::specialize, void>::value, i0, i1,
+ i2, i3, i4, i5, i6, i7, "UNMANAGED");
+ }
+#endif
}
// Simple dimension-only layout
const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
: View(arg_prop,
typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
- arg_N4, arg_N5, arg_N6, arg_N7),
- check_input_args::yes) {
+ arg_N4, arg_N5, arg_N6, arg_N7)) {
static_assert(traits::array_layout::is_extent_constructible,
"Layout is not constructible from extent arguments. Use "
"overload taking a layout object instead.");
const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
: View(arg_prop,
typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
- arg_N4, arg_N5, arg_N6, arg_N7),
- check_input_args::yes) {
+ arg_N4, arg_N5, arg_N6, arg_N7)) {
static_assert(traits::array_layout::is_extent_constructible,
"Layout is not constructible from extent arguments. Use "
"overload taking a layout object instead.");
const Label& arg_label,
std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
typename traits::array_layout> const& arg_layout)
- : View(Impl::ViewCtorProp<std::string>(arg_label), arg_layout,
- check_input_args::yes) {}
+ : View(Impl::ViewCtorProp<std::string>(arg_label), arg_layout) {}
// Allocate label and layout, must disambiguate from subview constructor.
template <typename Label>
const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
: View(Impl::ViewCtorProp<std::string>(arg_label),
typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
- arg_N4, arg_N5, arg_N6, arg_N7),
- check_input_args::yes) {
+ arg_N4, arg_N5, arg_N6, arg_N7)) {
static_assert(traits::array_layout::is_extent_constructible,
"Layout is not constructible from extent arguments. Use "
"overload taking a layout object instead.");
const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
: View(Impl::ViewCtorProp<pointer_type>(arg_ptr),
typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
- arg_N4, arg_N5, arg_N6, arg_N7),
- check_input_args::yes) {
+ arg_N4, arg_N5, arg_N6, arg_N7)) {
static_assert(traits::array_layout::is_extent_constructible,
"Layout is not constructible from extent arguments. Use "
"overload taking a layout object instead.");
const size_t num_passed_args = Impl::count_valid_integers(
arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7);
- if (std::is_void<typename traits::specialize>::value &&
- num_passed_args != traits::rank_dynamic) {
+ if (std::is_void_v<typename traits::specialize> &&
+ num_passed_args != rank_dynamic) {
Kokkos::abort(
"Kokkos::View::shmem_size() rank_dynamic != number of arguments.\n");
}
arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7));
}
+ private:
+ // Want to be able to align to minimum scratch alignment or sizeof or alignof
+ // elements
+ static constexpr size_t scratch_value_alignment =
+ max({sizeof(typename traits::value_type),
+ alignof(typename traits::value_type),
+ static_cast<size_t>(
+ traits::execution_space::scratch_memory_space::ALIGN)});
+
+ public:
static KOKKOS_INLINE_FUNCTION size_t
shmem_size(typename traits::array_layout const& arg_layout) {
- return map_type::memory_span(arg_layout) +
- sizeof(typename traits::value_type);
+ return map_type::memory_span(arg_layout) + scratch_value_alignment;
}
explicit KOKKOS_INLINE_FUNCTION View(
const typename traits::execution_space::scratch_memory_space& arg_space,
const typename traits::array_layout& arg_layout)
- : View(Impl::ViewCtorProp<pointer_type>(
- reinterpret_cast<pointer_type>(arg_space.get_shmem_aligned(
- map_type::memory_span(arg_layout),
- sizeof(typename traits::value_type)))),
+ : View(Impl::ViewCtorProp<pointer_type>(reinterpret_cast<pointer_type>(
+ arg_space.get_shmem_aligned(map_type::memory_span(arg_layout),
+ scratch_value_alignment))),
arg_layout) {}
explicit KOKKOS_INLINE_FUNCTION View(
map_type::memory_span(typename traits::array_layout(
arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6,
arg_N7)),
- sizeof(typename traits::value_type)))),
+ scratch_value_alignment))),
typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
- arg_N4, arg_N5, arg_N6, arg_N7),
- check_input_args::yes) {
+ arg_N4, arg_N5, arg_N6, arg_N7)) {
static_assert(traits::array_layout::is_extent_constructible,
"Layout is not constructible from extent arguments. Use "
"overload taking a layout object instead.");
}
+
+ //----------------------------------------
+ // MDSpan converting constructors
+#ifdef KOKKOS_ENABLE_IMPL_MDSPAN
+ template <typename U = typename Impl::MDSpanViewTraits<traits>::mdspan_type>
+ KOKKOS_INLINE_FUNCTION
+#ifndef KOKKOS_ENABLE_CXX17
+ explicit(traits::is_managed)
+#endif
+ View(const typename Impl::MDSpanViewTraits<traits>::mdspan_type& mds,
+ std::enable_if_t<
+ !std::is_same_v<Impl::UnsupportedKokkosArrayLayout, U>>* =
+ nullptr)
+ : View(mds.data_handle(),
+ Impl::array_layout_from_mapping<
+ typename traits::array_layout,
+ typename Impl::MDSpanViewTraits<traits>::mdspan_type>(
+ mds.mapping())) {
+ }
+
+ template <class ElementType, class ExtentsType, class LayoutType,
+ class AccessorType>
+ KOKKOS_INLINE_FUNCTION
+#ifndef KOKKOS_ENABLE_CXX17
+ explicit(!std::is_convertible_v<
+ Kokkos::mdspan<ElementType, ExtentsType, LayoutType,
+ AccessorType>,
+ typename Impl::MDSpanViewTraits<traits>::mdspan_type>)
+#endif
+ View(const Kokkos::mdspan<ElementType, ExtentsType, LayoutType,
+ AccessorType>& mds)
+ : View(typename Impl::MDSpanViewTraits<traits>::mdspan_type(mds)) {
+ }
+
+ //----------------------------------------
+ // Conversion to MDSpan
+ template <class OtherElementType, class OtherExtents, class OtherLayoutPolicy,
+ class OtherAccessor,
+ class ImplNaturalMDSpanType =
+ typename Impl::MDSpanViewTraits<traits>::mdspan_type,
+ typename = std::enable_if_t<std::conditional_t<
+ std::is_same_v<Impl::UnsupportedKokkosArrayLayout,
+ ImplNaturalMDSpanType>,
+ std::false_type,
+ std::is_assignable<mdspan<OtherElementType, OtherExtents,
+ OtherLayoutPolicy, OtherAccessor>,
+ ImplNaturalMDSpanType>>::value>>
+ KOKKOS_INLINE_FUNCTION constexpr operator mdspan<
+ OtherElementType, OtherExtents, OtherLayoutPolicy, OtherAccessor>() {
+ using mdspan_type = typename Impl::MDSpanViewTraits<traits>::mdspan_type;
+ return mdspan_type{data(),
+ Impl::mapping_from_view_mapping<mdspan_type>(m_map)};
+ }
+
+ template <class OtherAccessorType = Impl::SpaceAwareAccessor<
+ typename traits::memory_space,
+ Kokkos::default_accessor<typename traits::value_type>>,
+ typename = std::enable_if_t<std::is_assignable_v<
+ typename traits::value_type*&,
+ typename OtherAccessorType::data_handle_type>>>
+ KOKKOS_INLINE_FUNCTION constexpr auto to_mdspan(
+ const OtherAccessorType& other_accessor =
+ typename Impl::MDSpanViewTraits<traits>::accessor_type()) {
+ using mdspan_type = typename Impl::MDSpanViewTraits<traits>::mdspan_type;
+ using ret_mdspan_type =
+ mdspan<typename mdspan_type::element_type,
+ typename mdspan_type::extents_type,
+ typename mdspan_type::layout_type, OtherAccessorType>;
+ return ret_mdspan_type{data(),
+ Impl::mapping_from_view_mapping<mdspan_type>(m_map),
+ other_accessor};
+ }
+#endif // KOKKOS_ENABLE_IMPL_MDSPAN
};
-/** \brief Temporary free function rank()
- * until rank() is implemented
- * in the View
- */
template <typename D, class... P>
-KOKKOS_INLINE_FUNCTION constexpr unsigned rank(const View<D, P...>& V) {
- return V.Rank;
-} // Temporary until added to view
+KOKKOS_INLINE_FUNCTION constexpr unsigned rank(const View<D, P...>&) {
+ return View<D, P...>::rank();
+}
namespace Impl {
};
template <unsigned N, typename... Args>
-KOKKOS_FUNCTION std::enable_if_t<N == View<Args...>::Rank, View<Args...>>
+KOKKOS_FUNCTION std::enable_if_t<
+ N == View<Args...>::rank() &&
+ std::is_same_v<typename ViewTraits<Args...>::specialize, void>,
+ View<Args...>>
as_view_of_rank_n(View<Args...> v) {
return v;
}
// Placeholder implementation to compile generic code for DynRankView; should
// never be called
template <unsigned N, typename T, typename... Args>
-std::enable_if_t<
- N != View<T, Args...>::Rank,
+KOKKOS_FUNCTION std::enable_if_t<
+ N != View<T, Args...>::rank() &&
+ std::is_same_v<typename ViewTraits<T, Args...>::specialize, void>,
View<typename RankDataType<typename View<T, Args...>::value_type, N>::type,
Args...>>
as_view_of_rank_n(View<T, Args...>) {
- Kokkos::Impl::throw_runtime_exception(
- "Trying to get at a View of the wrong rank");
+ Kokkos::abort("Trying to get at a View of the wrong rank");
return {};
}
}
} // namespace Impl
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-template <class V, class... Args>
-using Subview =
- typename Kokkos::Impl::ViewMapping<void /* deduce subview type from source
- view traits */
- ,
- typename V::traits, Args...>::type;
template <class D, class... P, class... Args>
-KOKKOS_INLINE_FUNCTION
- typename Kokkos::Impl::ViewMapping<void /* deduce subview type from source
- view traits */
- ,
- ViewTraits<D, P...>, Args...>::type
- subview(const View<D, P...>& src, Args... args) {
- static_assert(View<D, P...>::Rank == sizeof...(Args),
+KOKKOS_INLINE_FUNCTION auto subview(const View<D, P...>& src, Args... args) {
+ static_assert(View<D, P...>::rank == sizeof...(Args),
"subview requires one argument for each source View rank");
return typename Kokkos::Impl::ViewMapping<
void /* deduce subview type from source view traits */
,
- ViewTraits<D, P...>, Args...>::type(src, args...);
+ typename Impl::RemoveAlignedMemoryTrait<D, P...>::type,
+ Args...>::type(src, args...);
}
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
template <class MemoryTraits, class D, class... P, class... Args>
-KOKKOS_INLINE_FUNCTION typename Kokkos::Impl::ViewMapping<
- void /* deduce subview type from source view traits */
- ,
- ViewTraits<D, P...>, Args...>::template apply<MemoryTraits>::type
-subview(const View<D, P...>& src, Args... args) {
- static_assert(View<D, P...>::Rank == sizeof...(Args),
+KOKKOS_DEPRECATED KOKKOS_INLINE_FUNCTION auto subview(const View<D, P...>& src,
+ Args... args) {
+ static_assert(View<D, P...>::rank == sizeof...(Args),
"subview requires one argument for each source View rank");
+ static_assert(Kokkos::is_memory_traits<MemoryTraits>::value);
return typename Kokkos::Impl::ViewMapping<
void /* deduce subview type from source view traits */
,
- ViewTraits<D, P...>,
- Args...>::template apply<MemoryTraits>::type(src, args...);
+ typename Impl::RemoveAlignedMemoryTrait<D, P..., MemoryTraits>::type,
+ Args...>::type(src, args...);
}
+#endif
+
+template <class V, class... Args>
+using Subview = decltype(subview(std::declval<V>(), std::declval<Args>()...));
} /* namespace Kokkos */
using lhs_traits = ViewTraits<LT, LP...>;
using rhs_traits = ViewTraits<RT, RP...>;
- return std::is_same<typename lhs_traits::const_value_type,
- typename rhs_traits::const_value_type>::value &&
- std::is_same<typename lhs_traits::array_layout,
- typename rhs_traits::array_layout>::value &&
- std::is_same<typename lhs_traits::memory_space,
- typename rhs_traits::memory_space>::value &&
- unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
+ return std::is_same_v<typename lhs_traits::const_value_type,
+ typename rhs_traits::const_value_type> &&
+ std::is_same_v<typename lhs_traits::array_layout,
+ typename rhs_traits::array_layout> &&
+ std::is_same_v<typename lhs_traits::memory_space,
+ typename rhs_traits::memory_space> &&
+ View<LT, LP...>::rank() == View<RT, RP...>::rank() &&
lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
namespace Kokkos {
namespace Impl {
-inline void shared_allocation_tracking_disable() {
- Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_disable();
-}
-
-inline void shared_allocation_tracking_enable() {
- Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_enable();
-}
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
template <class Specialize, typename A, typename B>
struct CommonViewValueType;
// determine specialize type
// if first and next specialize differ, but are not the same specialize, error
// out
- static_assert(!(!std::is_same<first_specialize, next_specialize>::value &&
- !std::is_void<first_specialize>::value &&
- !std::is_void<next_specialize>::value),
+ static_assert(!(!std::is_same_v<first_specialize, next_specialize> &&
+ !std::is_void_v<first_specialize> &&
+ !std::is_void_v<next_specialize>),
"Kokkos DeduceCommonViewAllocProp ERROR: Only one non-void "
"specialize trait allowed");
// otherwise choose non-void specialize if either/both are non-void
- using specialize = std::conditional_t<
- std::is_same<first_specialize, next_specialize>::value, first_specialize,
- std::conditional_t<(std::is_void<first_specialize>::value &&
- !std::is_void<next_specialize>::value),
- next_specialize, first_specialize>>;
+ using specialize =
+ std::conditional_t<std::is_same_v<first_specialize, next_specialize>,
+ first_specialize,
+ std::conditional_t<(std::is_void_v<first_specialize> &&
+ !std::is_void_v<next_specialize>),
+ next_specialize, first_specialize>>;
using value_type = typename CommonViewValueType<specialize, first_value_type,
next_value_type>::value_type;
} // namespace Kokkos
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-namespace Kokkos {
-namespace Impl {
-
-template <class T>
-using is_view KOKKOS_DEPRECATED_WITH_COMMENT("Use Kokkos::is_view instead!") =
- Kokkos::is_view<T>;
-
-} /* namespace Impl */
-} /* namespace Kokkos */
-#endif
-
-#include <impl/Kokkos_ViewUniformType.hpp>
-#include <impl/Kokkos_Atomic_View.hpp>
+#include <View/Kokkos_ViewUniformType.hpp>
+#include <View/Kokkos_ViewAtomic.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-#endif /* #ifndef KOKKOS_VIEW_HPP */
+#endif /* #ifndef KOKKOS_VIEWLEGACY_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_EXPERIMENTAL_VIEW_MAPPING_HPP
#define KOKKOS_EXPERIMENTAL_VIEW_MAPPING_HPP
+#include <cstring>
#include <type_traits>
#include <initializer_list>
#include <Kokkos_Extents.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_Traits.hpp>
-#include <impl/Kokkos_ViewTracker.hpp>
-#include <impl/Kokkos_ViewCtor.hpp>
-#include <impl/Kokkos_Atomic_View.hpp>
+#include <View/Kokkos_ViewTracker.hpp>
+#include <View/Kokkos_ViewTraits.hpp>
+#include <View/Kokkos_ViewCtor.hpp>
+#include <View/Kokkos_ViewAtomic.hpp>
#include <impl/Kokkos_Tools.hpp>
#include <impl/Kokkos_StringManipulation.hpp>
+#include <impl/Kokkos_ZeroMemset_fwd.hpp>
+#include <View/Kokkos_ViewDataAnalysis.hpp>
+#include <View/Kokkos_ViewAlloc.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
namespace Kokkos {
namespace Impl {
-template <unsigned I, size_t... Args>
-struct variadic_size_t {
- enum : size_t { value = KOKKOS_INVALID_INDEX };
-};
-
-template <size_t Val, size_t... Args>
-struct variadic_size_t<0, Val, Args...> {
- enum : size_t { value = Val };
-};
-
-template <unsigned I, size_t Val, size_t... Args>
-struct variadic_size_t<I, Val, Args...> {
- enum : size_t { value = variadic_size_t<I - 1, Args...>::value };
-};
-
-template <size_t... Args>
-struct rank_dynamic;
-
-template <>
-struct rank_dynamic<> {
- enum : unsigned { value = 0 };
-};
-
-template <size_t Val, size_t... Args>
-struct rank_dynamic<Val, Args...> {
- enum : unsigned { value = (Val == 0 ? 1 : 0) + rank_dynamic<Args...>::value };
-};
-
-#define KOKKOS_IMPL_VIEW_DIMENSION(R) \
- template <size_t V, unsigned> \
- struct ViewDimension##R { \
- static constexpr size_t ArgN##R = (V != KOKKOS_INVALID_INDEX ? V : 1); \
- static constexpr size_t N##R = (V != KOKKOS_INVALID_INDEX ? V : 1); \
- KOKKOS_INLINE_FUNCTION explicit ViewDimension##R(size_t) {} \
- ViewDimension##R() = default; \
- ViewDimension##R(const ViewDimension##R&) = default; \
- ViewDimension##R& operator=(const ViewDimension##R&) = default; \
- }; \
- template <size_t V, unsigned RD> \
- constexpr size_t ViewDimension##R<V, RD>::ArgN##R; \
- template <size_t V, unsigned RD> \
- constexpr size_t ViewDimension##R<V, RD>::N##R; \
- template <unsigned RD> \
- struct ViewDimension##R<0u, RD> { \
- static constexpr size_t ArgN##R = 0; \
- std::conditional_t<(RD < 3), size_t, unsigned> N##R; \
- ViewDimension##R() = default; \
- ViewDimension##R(const ViewDimension##R&) = default; \
- ViewDimension##R& operator=(const ViewDimension##R&) = default; \
- KOKKOS_INLINE_FUNCTION explicit ViewDimension##R(size_t V) : N##R(V) {} \
- }; \
- template <unsigned RD> \
- constexpr size_t ViewDimension##R<0u, RD>::ArgN##R;
-
-KOKKOS_IMPL_VIEW_DIMENSION(0)
-KOKKOS_IMPL_VIEW_DIMENSION(1)
-KOKKOS_IMPL_VIEW_DIMENSION(2)
-KOKKOS_IMPL_VIEW_DIMENSION(3)
-KOKKOS_IMPL_VIEW_DIMENSION(4)
-KOKKOS_IMPL_VIEW_DIMENSION(5)
-KOKKOS_IMPL_VIEW_DIMENSION(6)
-KOKKOS_IMPL_VIEW_DIMENSION(7)
-
-#undef KOKKOS_IMPL_VIEW_DIMENSION
-
-// MSVC does not do empty base class optimization by default.
-// Per standard it is required for standard layout types
-template <size_t... Vals>
-struct KOKKOS_IMPL_ENFORCE_EMPTY_BASE_OPTIMIZATION ViewDimension
- : public ViewDimension0<variadic_size_t<0u, Vals...>::value,
- rank_dynamic<Vals...>::value>,
- public ViewDimension1<variadic_size_t<1u, Vals...>::value,
- rank_dynamic<Vals...>::value>,
- public ViewDimension2<variadic_size_t<2u, Vals...>::value,
- rank_dynamic<Vals...>::value>,
- public ViewDimension3<variadic_size_t<3u, Vals...>::value,
- rank_dynamic<Vals...>::value>,
- public ViewDimension4<variadic_size_t<4u, Vals...>::value,
- rank_dynamic<Vals...>::value>,
- public ViewDimension5<variadic_size_t<5u, Vals...>::value,
- rank_dynamic<Vals...>::value>,
- public ViewDimension6<variadic_size_t<6u, Vals...>::value,
- rank_dynamic<Vals...>::value>,
- public ViewDimension7<variadic_size_t<7u, Vals...>::value,
- rank_dynamic<Vals...>::value> {
- using D0 = ViewDimension0<variadic_size_t<0U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
- using D1 = ViewDimension1<variadic_size_t<1U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
- using D2 = ViewDimension2<variadic_size_t<2U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
- using D3 = ViewDimension3<variadic_size_t<3U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
- using D4 = ViewDimension4<variadic_size_t<4U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
- using D5 = ViewDimension5<variadic_size_t<5U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
- using D6 = ViewDimension6<variadic_size_t<6U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
- using D7 = ViewDimension7<variadic_size_t<7U, Vals...>::value,
- rank_dynamic<Vals...>::value>;
-
- using D0::ArgN0;
- using D1::ArgN1;
- using D2::ArgN2;
- using D3::ArgN3;
- using D4::ArgN4;
- using D5::ArgN5;
- using D6::ArgN6;
- using D7::ArgN7;
-
- using D0::N0;
- using D1::N1;
- using D2::N2;
- using D3::N3;
- using D4::N4;
- using D5::N5;
- using D6::N6;
- using D7::N7;
-
- enum : unsigned { rank = sizeof...(Vals) };
- enum : unsigned { rank_dynamic = Impl::rank_dynamic<Vals...>::value };
-
- ViewDimension() = default;
- ViewDimension(const ViewDimension&) = default;
- ViewDimension& operator=(const ViewDimension&) = default;
-
- KOKKOS_INLINE_FUNCTION
- constexpr ViewDimension(size_t n0, size_t n1, size_t n2, size_t n3, size_t n4,
- size_t n5, size_t n6, size_t n7)
- : D0(n0 == KOKKOS_INVALID_INDEX ? 1 : n0),
- D1(n1 == KOKKOS_INVALID_INDEX ? 1 : n1),
- D2(n2 == KOKKOS_INVALID_INDEX ? 1 : n2),
- D3(n3 == KOKKOS_INVALID_INDEX ? 1 : n3),
- D4(n4 == KOKKOS_INVALID_INDEX ? 1 : n4),
- D5(n5 == KOKKOS_INVALID_INDEX ? 1 : n5),
- D6(n6 == KOKKOS_INVALID_INDEX ? 1 : n6),
- D7(n7 == KOKKOS_INVALID_INDEX ? 1 : n7) {}
-
- KOKKOS_INLINE_FUNCTION
- constexpr size_t extent(const unsigned r) const noexcept {
- return r == 0
- ? N0
- : (r == 1
- ? N1
- : (r == 2
- ? N2
- : (r == 3
- ? N3
- : (r == 4
- ? N4
- : (r == 5
- ? N5
- : (r == 6
- ? N6
- : (r == 7 ? N7
- : 0)))))));
- }
-
- static KOKKOS_INLINE_FUNCTION constexpr size_t static_extent(
- const unsigned r) noexcept {
- return r == 0
- ? ArgN0
- : (r == 1
- ? ArgN1
- : (r == 2
- ? ArgN2
- : (r == 3
- ? ArgN3
- : (r == 4
- ? ArgN4
- : (r == 5
- ? ArgN5
- : (r == 6
- ? ArgN6
- : (r == 7 ? ArgN7
- : 0)))))));
- }
-
- template <size_t N>
- struct prepend {
- using type = ViewDimension<N, Vals...>;
- };
-
- template <size_t N>
- struct append {
- using type = ViewDimension<Vals..., N>;
- };
-};
-
-template <class A, class B>
-struct ViewDimensionJoin;
-
-template <size_t... A, size_t... B>
-struct ViewDimensionJoin<ViewDimension<A...>, ViewDimension<B...>> {
- using type = ViewDimension<A..., B...>;
-};
-
-//----------------------------------------------------------------------------
-
-template <class DstDim, class SrcDim>
-struct ViewDimensionAssignable;
-
-template <size_t... DstArgs, size_t... SrcArgs>
-struct ViewDimensionAssignable<ViewDimension<DstArgs...>,
- ViewDimension<SrcArgs...>> {
- using dst = ViewDimension<DstArgs...>;
- using src = ViewDimension<SrcArgs...>;
-
- enum {
- value = unsigned(dst::rank) == unsigned(src::rank) &&
- (
- // Compile time check that potential static dimensions match
- ((1 > dst::rank_dynamic && 1 > src::rank_dynamic)
- ? (size_t(dst::ArgN0) == size_t(src::ArgN0))
- : true) &&
- ((2 > dst::rank_dynamic && 2 > src::rank_dynamic)
- ? (size_t(dst::ArgN1) == size_t(src::ArgN1))
- : true) &&
- ((3 > dst::rank_dynamic && 3 > src::rank_dynamic)
- ? (size_t(dst::ArgN2) == size_t(src::ArgN2))
- : true) &&
- ((4 > dst::rank_dynamic && 4 > src::rank_dynamic)
- ? (size_t(dst::ArgN3) == size_t(src::ArgN3))
- : true) &&
- ((5 > dst::rank_dynamic && 5 > src::rank_dynamic)
- ? (size_t(dst::ArgN4) == size_t(src::ArgN4))
- : true) &&
- ((6 > dst::rank_dynamic && 6 > src::rank_dynamic)
- ? (size_t(dst::ArgN5) == size_t(src::ArgN5))
- : true) &&
- ((7 > dst::rank_dynamic && 7 > src::rank_dynamic)
- ? (size_t(dst::ArgN6) == size_t(src::ArgN6))
- : true) &&
- ((8 > dst::rank_dynamic && 8 > src::rank_dynamic)
- ? (size_t(dst::ArgN7) == size_t(src::ArgN7))
- : true))
- };
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-struct ALL_t {
- KOKKOS_INLINE_FUNCTION
- constexpr const ALL_t& operator()() const { return *this; }
-
- KOKKOS_INLINE_FUNCTION
- constexpr bool operator==(const ALL_t&) const { return true; }
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-namespace Kokkos {
-namespace Impl {
-
template <class T>
struct is_integral_extent_type {
- enum : bool { value = std::is_same<T, Kokkos::Impl::ALL_t>::value ? 1 : 0 };
+ enum : bool { value = std::is_same_v<T, Kokkos::ALL_t> ? 1 : 0 };
};
template <class iType>
struct is_integral_extent_type<std::pair<iType, iType>> {
- enum : bool { value = std::is_integral<iType>::value ? 1 : 0 };
+ enum : bool { value = std::is_integral_v<iType> ? 1 : 0 };
};
template <class iType>
struct is_integral_extent_type<Kokkos::pair<iType, iType>> {
- enum : bool { value = std::is_integral<iType>::value ? 1 : 0 };
+ enum : bool { value = std::is_integral_v<iType> ? 1 : 0 };
};
// Assuming '2 == initializer_list<iType>::size()'
template <class iType>
struct is_integral_extent_type<std::initializer_list<iType>> {
- enum : bool { value = std::is_integral<iType>::value ? 1 : 0 };
+ enum : bool { value = std::is_integral_v<iType> ? 1 : 0 };
};
template <unsigned I, class... Args>
enum : bool { value = is_integral_extent_type<type>::value };
- static_assert(value || std::is_integral<type>::value ||
- std::is_void<type>::value,
+ static_assert(value || std::is_integral_v<type> || std::is_void_v<type>,
"subview argument must be either integral or integral extent");
};
RankDest, RankSrc, CurrentArg, Arg,
SubViewArgs...> {
enum {
- value = (((CurrentArg == RankDest - 1) &&
- (Kokkos::Impl::is_integral_extent_type<Arg>::value)) ||
- ((CurrentArg >= RankDest) && (std::is_integral<Arg>::value)) ||
- ((CurrentArg < RankDest) &&
- (std::is_same<Arg, Kokkos::Impl::ALL_t>::value)) ||
- ((CurrentArg == 0) &&
- (Kokkos::Impl::is_integral_extent_type<Arg>::value))) &&
- (SubviewLegalArgsCompileTime<Kokkos::LayoutLeft, Kokkos::LayoutLeft,
- RankDest, RankSrc, CurrentArg + 1,
- SubViewArgs...>::value)
+ value =
+ (((CurrentArg == RankDest - 1) &&
+ (Kokkos::Impl::is_integral_extent_type<Arg>::value)) ||
+ ((CurrentArg >= RankDest) && (std::is_integral_v<Arg>)) ||
+ ((CurrentArg < RankDest) && (std::is_same_v<Arg, Kokkos::ALL_t>)) ||
+ ((CurrentArg == 0) &&
+ (Kokkos::Impl::is_integral_extent_type<Arg>::value))) &&
+ (SubviewLegalArgsCompileTime<Kokkos::LayoutLeft, Kokkos::LayoutLeft,
+ RankDest, RankSrc, CurrentArg + 1,
+ SubViewArgs...>::value)
};
};
struct SubviewLegalArgsCompileTime<Kokkos::LayoutLeft, Kokkos::LayoutLeft,
RankDest, RankSrc, CurrentArg, Arg> {
enum {
- value = ((CurrentArg == RankDest - 1) || (std::is_integral<Arg>::value)) &&
+ value = ((CurrentArg == RankDest - 1) || (std::is_integral_v<Arg>)) &&
(CurrentArg == RankSrc - 1)
};
};
enum {
value = (((CurrentArg == RankSrc - RankDest) &&
(Kokkos::Impl::is_integral_extent_type<Arg>::value)) ||
- ((CurrentArg < RankSrc - RankDest) &&
- (std::is_integral<Arg>::value)) ||
+ ((CurrentArg < RankSrc - RankDest) && (std::is_integral_v<Arg>)) ||
((CurrentArg >= RankSrc - RankDest) &&
- (std::is_same<Arg, Kokkos::Impl::ALL_t>::value))) &&
+ (std::is_same_v<Arg, Kokkos::ALL_t>))) &&
(SubviewLegalArgsCompileTime<Kokkos::LayoutRight,
Kokkos::LayoutRight, RankDest, RankSrc,
CurrentArg + 1, SubViewArgs...>::value)
struct SubviewLegalArgsCompileTime<Kokkos::LayoutRight, Kokkos::LayoutRight,
RankDest, RankSrc, CurrentArg, Arg> {
enum {
- value = ((CurrentArg == RankSrc - 1) &&
- (std::is_same<Arg, Kokkos::Impl::ALL_t>::value))
+ value =
+ ((CurrentArg == RankSrc - 1) && (std::is_same_v<Arg, Kokkos::ALL_t>))
};
};
KOKKOS_FORCEINLINE_FUNCTION bool set(unsigned domain_rank,
unsigned range_rank,
const ViewDimension<DimArgs...>& dim,
- const Kokkos::Impl::ALL_t,
- Args... args) {
+ Kokkos::ALL_t, Args... args) {
m_begin[domain_rank] = 0;
m_length[range_rank] = dim.extent(domain_rank);
m_index[range_rank] = domain_rank;
// std::pair range
template <size_t... DimArgs, class... Args>
void error(char* buf, int buf_len, unsigned domain_rank, unsigned range_rank,
- const ViewDimension<DimArgs...>& dim, const Kokkos::Impl::ALL_t,
+ const ViewDimension<DimArgs...>& dim, Kokkos::ALL_t,
Args... args) const {
const int n = std::min(buf_len, snprintf(buf, buf_len, " Kokkos::ALL %c",
int(sizeof...(Args) ? ',' : ')')));
const int n = snprintf(buffer, LEN, "Kokkos::subview bounds error (");
error(buffer + n, LEN - n, 0, 0, dim, args...);
- Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
+ Kokkos::abort(buffer);))
KOKKOS_IF_ON_DEVICE(((void)dim;
Kokkos::abort("Kokkos::subview bounds error");
template <size_t... DimArgs, class... Args>
KOKKOS_INLINE_FUNCTION SubviewExtents(const ViewDimension<DimArgs...>& dim,
Args... args) {
- static_assert(DomainRank == sizeof...(DimArgs), "");
- static_assert(DomainRank == sizeof...(Args), "");
+ static_assert(DomainRank == sizeof...(DimArgs));
+ static_assert(DomainRank == sizeof...(Args));
// Verifies that all arguments, up to 8, are integral types,
// integral extents, or don't exist.
- static_assert(
- RangeRank == unsigned(is_integral_extent<0, Args...>::value) +
- unsigned(is_integral_extent<1, Args...>::value) +
- unsigned(is_integral_extent<2, Args...>::value) +
- unsigned(is_integral_extent<3, Args...>::value) +
- unsigned(is_integral_extent<4, Args...>::value) +
- unsigned(is_integral_extent<5, Args...>::value) +
- unsigned(is_integral_extent<6, Args...>::value) +
- unsigned(is_integral_extent<7, Args...>::value),
- "");
+ static_assert(RangeRank ==
+ unsigned(is_integral_extent<0, Args...>::value) +
+ unsigned(is_integral_extent<1, Args...>::value) +
+ unsigned(is_integral_extent<2, Args...>::value) +
+ unsigned(is_integral_extent<3, Args...>::value) +
+ unsigned(is_integral_extent<4, Args...>::value) +
+ unsigned(is_integral_extent<5, Args...>::value) +
+ unsigned(is_integral_extent<6, Args...>::value) +
+ unsigned(is_integral_extent<7, Args...>::value));
if (RangeRank == 0) {
m_length[0] = 0;
namespace Kokkos {
namespace Impl {
-
-/** \brief Given a value type and dimension generate the View data type */
-template <class T, class Dim>
-struct ViewDataType;
-
-template <class T>
-struct ViewDataType<T, ViewDimension<>> {
- using type = T;
-};
-
-template <class T, size_t... Args>
-struct ViewDataType<T, ViewDimension<0, Args...>> {
- using type = typename ViewDataType<T*, ViewDimension<Args...>>::type;
-};
-
-template <class T, size_t N, size_t... Args>
-struct ViewDataType<T, ViewDimension<N, Args...>> {
- using type = typename ViewDataType<T, ViewDimension<Args...>>::type[N];
-};
-
-/**\brief Analysis of View data type.
- *
- * Data type conforms to one of the following patterns :
- * {const} value_type [][#][#][#]
- * {const} value_type ***[#][#][#]
- * Where the sum of counts of '*' and '[#]' is at most ten.
- *
- * Provide alias for ViewDimension<...> and value_type.
- */
-template <class T>
-struct ViewArrayAnalysis {
- using value_type = T;
- using const_value_type = std::add_const_t<T>;
- using non_const_value_type = std::remove_const_t<T>;
- using static_dimension = ViewDimension<>;
- using dynamic_dimension = ViewDimension<>;
- using dimension = ViewDimension<>;
-};
-
-template <class T, size_t N>
-struct ViewArrayAnalysis<T[N]> {
- private:
- using nested = ViewArrayAnalysis<T>;
-
- public:
- using value_type = typename nested::value_type;
- using const_value_type = typename nested::const_value_type;
- using non_const_value_type = typename nested::non_const_value_type;
-
- using static_dimension =
- typename nested::static_dimension::template prepend<N>::type;
-
- using dynamic_dimension = typename nested::dynamic_dimension;
-
- using dimension =
- typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
-};
-
-template <class T>
-struct ViewArrayAnalysis<T[]> {
- private:
- using nested = ViewArrayAnalysis<T>;
- using nested_dimension = typename nested::dimension;
-
- public:
- using value_type = typename nested::value_type;
- using const_value_type = typename nested::const_value_type;
- using non_const_value_type = typename nested::non_const_value_type;
-
- using dynamic_dimension =
- typename nested::dynamic_dimension::template prepend<0>::type;
-
- using static_dimension = typename nested::static_dimension;
-
- using dimension =
- typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
-};
-
-template <class T>
-struct ViewArrayAnalysis<T*> {
- private:
- using nested = ViewArrayAnalysis<T>;
-
- public:
- using value_type = typename nested::value_type;
- using const_value_type = typename nested::const_value_type;
- using non_const_value_type = typename nested::non_const_value_type;
-
- using dynamic_dimension =
- typename nested::dynamic_dimension::template prepend<0>::type;
-
- using static_dimension = typename nested::static_dimension;
-
- using dimension =
- typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
-};
-
-template <class DataType, class ArrayLayout, class ValueType>
-struct ViewDataAnalysis {
- private:
- using array_analysis = ViewArrayAnalysis<DataType>;
-
- // ValueType is opportunity for partial specialization.
- // Must match array analysis when this default template is used.
- static_assert(
- std::is_same<ValueType,
- typename array_analysis::non_const_value_type>::value,
- "");
-
- public:
- using specialize = void; // No specialization
-
- using dimension = typename array_analysis::dimension;
- using value_type = typename array_analysis::value_type;
- using const_value_type = typename array_analysis::const_value_type;
- using non_const_value_type = typename array_analysis::non_const_value_type;
-
- // Generate analogous multidimensional array specification type.
- using type = typename ViewDataType<value_type, dimension>::type;
- using const_type = typename ViewDataType<const_value_type, dimension>::type;
- using non_const_type =
- typename ViewDataType<non_const_value_type, dimension>::type;
-
- // Generate "flattened" multidimensional array specification type.
- using scalar_array_type = type;
- using const_scalar_array_type = const_type;
- using non_const_scalar_array_type = non_const_type;
-};
-
-} // namespace Impl
-} // namespace Kokkos
-
-//----------------------------------------------------------------------------
-//----------------------------------------------------------------------------
-
-namespace Kokkos {
-namespace Impl {
-
-template <class Dimension, class Layout, class Enable = void>
-struct ViewOffset {
- using is_mapping_plugin = std::false_type;
-};
-
//----------------------------------------------------------------------------
// LayoutLeft AND ( 1 >= rank OR 0 == rank_dynamic ) : no padding / striding
template <class Dimension>
m_dim.N5 * m_dim.N6;
}
- // Stride with [ rank ] value is the total length
+ // Fill the target unbounded array s with the stride.
+ // This method differs from stride() in that it does not write the total
+ // length to the last index of the array. Preconditions: s must be an array of
+ // dimension_type::rank elements
+ // FIXME: The version of clang-format in CI fails from maybe_unused
+ // clang-format off
template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
- s[0] = 1;
- if (0 < dimension_type::rank) {
- s[1] = m_dim.N0;
+ KOKKOS_INLINE_FUNCTION iType
+ stride_fill([[maybe_unused]] iType* const s) const {
+ iType n = 1;
+ if constexpr (0 < dimension_type::rank) {
+ s[0] = n;
+ n *= m_dim.N0;
}
- if (1 < dimension_type::rank) {
- s[2] = s[1] * m_dim.N1;
+ if constexpr (1 < dimension_type::rank) {
+ s[1] = n;
+ n *= m_dim.N1;
}
- if (2 < dimension_type::rank) {
- s[3] = s[2] * m_dim.N2;
+ if constexpr (2 < dimension_type::rank) {
+ s[2] = n;
+ n *= m_dim.N2;
}
- if (3 < dimension_type::rank) {
- s[4] = s[3] * m_dim.N3;
+ if constexpr (3 < dimension_type::rank) {
+ s[3] = n;
+ n *= m_dim.N3;
}
- if (4 < dimension_type::rank) {
- s[5] = s[4] * m_dim.N4;
+ if constexpr (4 < dimension_type::rank) {
+ s[4] = n;
+ n *= m_dim.N4;
}
- if (5 < dimension_type::rank) {
- s[6] = s[5] * m_dim.N5;
+ if constexpr (5 < dimension_type::rank) {
+ s[5] = n;
+ n *= m_dim.N5;
}
- if (6 < dimension_type::rank) {
- s[7] = s[6] * m_dim.N6;
+ if constexpr (6 < dimension_type::rank) {
+ s[6] = n;
+ n *= m_dim.N6;
}
- if (7 < dimension_type::rank) {
- s[8] = s[7] * m_dim.N7;
+ if constexpr (7 < dimension_type::rank) {
+ s[7] = n;
+ n *= m_dim.N7;
}
+ return n;
+ }
+ // clang-format on
+
+ // Fill the target unbounded array s with the stride and the total spanned
+ // size. This method differs from stride_fill() in that it writes the total
+ // spanned size to the last index of the array. Preconditions: s must be an
+ // array of dimension_type::rank + 1 elements Stride with [ rank ] value is
+ // the total length
+ template <typename iType>
+ KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+ s[dimension_type::rank] = stride_fill(s);
}
//----------------------------------------
return *this;
}
#else
- ViewOffset() = default;
- ViewOffset(const ViewOffset&) = default;
+ ViewOffset() = default;
+ ViewOffset(const ViewOffset&) = default;
ViewOffset& operator=(const ViewOffset&) = default;
#endif
KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
const ViewOffset<DimRHS, Kokkos::LayoutRight, void>& rhs)
: m_dim(rhs.m_dim.N0, 0, 0, 0, 0, 0, 0, 0) {
- static_assert((DimRHS::rank == 0 && dimension_type::rank == 0) ||
- (DimRHS::rank == 1 && dimension_type::rank == 1 &&
- dimension_type::rank_dynamic == 1),
+ static_assert(((DimRHS::rank == 0 && dimension_type::rank == 0) ||
+ (DimRHS::rank == 1 && dimension_type::rank == 1)),
"ViewOffset LayoutLeft and LayoutRight are only compatible "
"when rank <= 1");
}
KOKKOS_INLINE_FUNCTION
constexpr array_layout layout() const {
constexpr auto r = dimension_type::rank;
- return array_layout((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
- (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
- (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
- (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
- (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
- (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
- (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
- (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+ array_layout l((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
+ (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
+ (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
+ (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
+ (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
+ (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
+ (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
+ (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+ // Without span_is_contiguous Sacado hidden dimensions get messed up
+ l.stride = span_is_contiguous() ? KOKKOS_IMPL_CTOR_DEFAULT_ARG : m_stride;
+ return l;
}
KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
m_dim.N6;
}
- // Stride with [ rank ] value is the total length
+ // Fill the target unbounded array s with the stride.
+ // This method differs from stride() in that it does not write the total
+ // length to the last index of the array. Preconditions: s must be an array of
+ // dimension_type::rank elements
+ // The version of clang-format in CI fails from maybe_unused
+ // clang-format off
template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
- s[0] = 1;
- if (0 < dimension_type::rank) {
- s[1] = m_stride;
+ KOKKOS_INLINE_FUNCTION iType
+ stride_fill([[maybe_unused]] iType* const s) const {
+ iType n = 1;
+ if constexpr (0 < dimension_type::rank) {
+ s[0] = n;
+ n *= m_stride;
}
- if (1 < dimension_type::rank) {
- s[2] = s[1] * m_dim.N1;
+ if constexpr (1 < dimension_type::rank) {
+ s[1] = n;
+ n *= m_dim.N1;
}
- if (2 < dimension_type::rank) {
- s[3] = s[2] * m_dim.N2;
+ if constexpr (2 < dimension_type::rank) {
+ s[2] = n;
+ n *= m_dim.N2;
}
- if (3 < dimension_type::rank) {
- s[4] = s[3] * m_dim.N3;
+ if constexpr (3 < dimension_type::rank) {
+ s[3] = n;
+ n *= m_dim.N3;
}
- if (4 < dimension_type::rank) {
- s[5] = s[4] * m_dim.N4;
+ if constexpr (4 < dimension_type::rank) {
+ s[4] = n;
+ n *= m_dim.N4;
}
- if (5 < dimension_type::rank) {
- s[6] = s[5] * m_dim.N5;
+ if constexpr (5 < dimension_type::rank) {
+ s[5] = n;
+ n *= m_dim.N5;
}
- if (6 < dimension_type::rank) {
- s[7] = s[6] * m_dim.N6;
+ if constexpr (6 < dimension_type::rank) {
+ s[6] = n;
+ n *= m_dim.N6;
}
- if (7 < dimension_type::rank) {
- s[8] = s[7] * m_dim.N7;
+ if constexpr (7 < dimension_type::rank) {
+ s[7] = n;
+ n *= m_dim.N7;
}
+ return n;
+ }
+ // clang-format on
+
+ // Fill the target unbounded array s with the stride and the total spanned
+ // size. This method differs from stride_fill() in that it writes the total
+ // spanned size to the last index of the array. Preconditions: s must be an
+ // array of dimension_type::rank + 1 elements
+ template <typename iType>
+ KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+ s[dimension_type::rank] = stride_fill(s);
}
//----------------------------------------
}
#else
- ViewOffset() = default;
- ViewOffset(const ViewOffset&) = default;
+ ViewOffset() = default;
+ ViewOffset(const ViewOffset&) = default;
ViewOffset& operator=(const ViewOffset&) = default;
#endif
arg_layout.dimension[2], arg_layout.dimension[3],
arg_layout.dimension[4], arg_layout.dimension[5],
arg_layout.dimension[6], arg_layout.dimension[7]),
- m_stride(Padding<TrivialScalarSize>::stride(arg_layout.dimension[0])) {}
+ m_stride(
+ arg_layout.stride != KOKKOS_IMPL_CTOR_DEFAULT_ARG
+ ? arg_layout.stride
+ : Padding<TrivialScalarSize>::stride(arg_layout.dimension[0])) {
+ }
template <class DimRHS>
KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
m_dim.N1;
}
- // Stride with [ rank ] value is the total length
+ // Fill the target unbounded array s with the stride.
+ // This method differs from stride() in that it does not write the total
+ // length to the last index of the array. Preconditions: s must be an array of
+ // dimension_type::rank elements
+ // The version of clang-format in CI fails from maybe_unused
+ // clang-format off
template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+ KOKKOS_INLINE_FUNCTION iType
+ stride_fill([[maybe_unused]] iType* const s) const {
size_type n = 1;
- if (7 < dimension_type::rank) {
+ if constexpr (7 < dimension_type::rank) {
s[7] = n;
n *= m_dim.N7;
}
- if (6 < dimension_type::rank) {
+ if constexpr (6 < dimension_type::rank) {
s[6] = n;
n *= m_dim.N6;
}
- if (5 < dimension_type::rank) {
+ if constexpr (5 < dimension_type::rank) {
s[5] = n;
n *= m_dim.N5;
}
- if (4 < dimension_type::rank) {
+ if constexpr (4 < dimension_type::rank) {
s[4] = n;
n *= m_dim.N4;
}
- if (3 < dimension_type::rank) {
+ if constexpr (3 < dimension_type::rank) {
s[3] = n;
n *= m_dim.N3;
}
- if (2 < dimension_type::rank) {
+ if constexpr (2 < dimension_type::rank) {
s[2] = n;
n *= m_dim.N2;
}
- if (1 < dimension_type::rank) {
+ if constexpr (1 < dimension_type::rank) {
s[1] = n;
n *= m_dim.N1;
}
- if (0 < dimension_type::rank) {
+ if constexpr (0 < dimension_type::rank) {
s[0] = n;
}
- s[dimension_type::rank] = n * m_dim.N0;
+ return n * m_dim.N0;
+ }
+ // clang-format on
+
+ // Fill the target unbounded array s with the stride and the total spanned
+ // size. This method differs from stride_fill() in that it writes the total
+ // spanned size to the last index of the array. Preconditions: s must be an
+ // array of dimension_type::rank + 1 elements
+ template <typename iType>
+ KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+ s[dimension_type::rank] = stride_fill(s);
}
//----------------------------------------
}
#else
- ViewOffset() = default;
- ViewOffset(const ViewOffset&) = default;
+ ViewOffset() = default;
+ ViewOffset(const ViewOffset&) = default;
ViewOffset& operator=(const ViewOffset&) = default;
#endif
const ViewOffset<DimRHS, Kokkos::LayoutLeft, void>& rhs)
: m_dim(rhs.m_dim.N0, 0, 0, 0, 0, 0, 0, 0) {
static_assert((DimRHS::rank == 0 && dimension_type::rank == 0) ||
- (DimRHS::rank == 1 && dimension_type::rank == 1 &&
- dimension_type::rank_dynamic == 1),
+ (DimRHS::rank == 1 && dimension_type::rank == 1),
"ViewOffset LayoutRight and LayoutLeft are only compatible "
"when rank <= 1");
}
KOKKOS_INLINE_FUNCTION
constexpr array_layout layout() const {
constexpr auto r = dimension_type::rank;
- return array_layout((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
- (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
- (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
- (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
- (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
- (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
- (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
- (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+ array_layout l((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
+ (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
+ (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
+ (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
+ (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
+ (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
+ (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
+ (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+ // Without span_is_contiguous Sacado hidden dimensions get messed up
+ l.stride = span_is_contiguous() ? KOKKOS_IMPL_CTOR_DEFAULT_ARG : m_stride;
+ return l;
}
KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
}
KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
- return m_stride == m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3 *
- m_dim.N2 * m_dim.N1;
+ return m_stride == static_cast<size_type>(m_dim.N7) * m_dim.N6 * m_dim.N5 *
+ m_dim.N4 * m_dim.N3 * m_dim.N2 * m_dim.N1;
}
/* Strides of dimensions */
return m_dim.N7;
}
KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const {
- return m_dim.N7 * m_dim.N6;
+ return static_cast<size_type>(m_dim.N7) * m_dim.N6;
}
KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const {
- return m_dim.N7 * m_dim.N6 * m_dim.N5;
+ return static_cast<size_type>(m_dim.N7) * m_dim.N6 * m_dim.N5;
}
KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const {
- return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4;
+ return static_cast<size_type>(m_dim.N7) * m_dim.N6 * m_dim.N5 * m_dim.N4;
}
KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const {
- return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3;
+ return static_cast<size_type>(m_dim.N7) * m_dim.N6 * m_dim.N5 * m_dim.N4 *
+ m_dim.N3;
}
KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const {
- return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3 * m_dim.N2;
+ return static_cast<size_type>(m_dim.N7) * m_dim.N6 * m_dim.N5 * m_dim.N4 *
+ m_dim.N3 * m_dim.N2;
}
KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const {
return m_stride;
}
- // Stride with [ rank ] value is the total length
+ // Fill the target unbounded array s with the stride.
+ // This method differs from stride() in that it does not write the total
+ // length to the last index of the array. Preconditions: s must be an array of
+ // dimension_type::rank elements
+ // The version of clang-format in CI fails from maybe_unused
+ // clang-format off
template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+ KOKKOS_INLINE_FUNCTION iType
+ stride_fill([[maybe_unused]] iType* const s) const {
size_type n = 1;
- if (7 < dimension_type::rank) {
+ if constexpr (7 < dimension_type::rank) {
s[7] = n;
n *= m_dim.N7;
}
- if (6 < dimension_type::rank) {
+ if constexpr (6 < dimension_type::rank) {
s[6] = n;
n *= m_dim.N6;
}
- if (5 < dimension_type::rank) {
+ if constexpr (5 < dimension_type::rank) {
s[5] = n;
n *= m_dim.N5;
}
- if (4 < dimension_type::rank) {
+ if constexpr (4 < dimension_type::rank) {
s[4] = n;
n *= m_dim.N4;
}
- if (3 < dimension_type::rank) {
+ if constexpr (3 < dimension_type::rank) {
s[3] = n;
n *= m_dim.N3;
}
- if (2 < dimension_type::rank) {
+ if constexpr (2 < dimension_type::rank) {
s[2] = n;
n *= m_dim.N2;
}
- if (1 < dimension_type::rank) {
+ if constexpr (1 < dimension_type::rank) {
s[1] = n;
}
- if (0 < dimension_type::rank) {
+ if constexpr (0 < dimension_type::rank) {
s[0] = m_stride;
}
- s[dimension_type::rank] = m_stride * m_dim.N0;
+ return m_stride * m_dim.N0;
+ }
+ // clang-format on
+
+ // Fill the target unbounded array s with the stride and the total spanned
+ // size. This method differs from stride_fill() in that it writes the total
+ // spanned size to the last index of the array. Preconditions: s must be an
+ // array of dimension_type::rank + 1 elements
+ template <typename iType>
+ KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+ s[dimension_type::rank] = stride_fill(s);
}
//----------------------------------------
}
#else
- ViewOffset() = default;
- ViewOffset(const ViewOffset&) = default;
+ ViewOffset() = default;
+ ViewOffset(const ViewOffset&) = default;
ViewOffset& operator=(const ViewOffset&) = default;
#endif
/* Enable padding for trivial scalar types with non-zero trivial scalar size.
*/
+
+ private:
+ template <unsigned TrivialScalarSize>
+ KOKKOS_FUNCTION constexpr size_type compute_stride(
+ const Kokkos::LayoutRight& arg_layout) {
+ if (arg_layout.stride != KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+ return arg_layout.stride;
+ size_type value = m_dim.N1;
+ if constexpr (dimension_type::rank > 2) value *= m_dim.N2;
+ if constexpr (dimension_type::rank > 3) value *= m_dim.N3;
+ if constexpr (dimension_type::rank > 4) value *= m_dim.N4;
+ if constexpr (dimension_type::rank > 5) value *= m_dim.N5;
+ if constexpr (dimension_type::rank > 6) value *= m_dim.N6;
+ if constexpr (dimension_type::rank > 7) value *= m_dim.N7;
+ return Padding<TrivialScalarSize>::stride(value);
+ }
+
+ public:
template <unsigned TrivialScalarSize>
KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
std::integral_constant<unsigned, TrivialScalarSize> const&,
arg_layout.dimension[2], arg_layout.dimension[3],
arg_layout.dimension[4], arg_layout.dimension[5],
arg_layout.dimension[6], arg_layout.dimension[7]),
- m_stride(
- Padding<TrivialScalarSize>::
- stride(/* 2 <= rank */
- m_dim.N1 *
- (dimension_type::rank == 2
- ? size_t(1)
- : m_dim.N2 *
- (dimension_type::rank == 3
- ? size_t(1)
- : m_dim.N3 *
- (dimension_type::rank == 4
- ? size_t(1)
- : m_dim.N4 *
- (dimension_type::rank ==
- 5
- ? size_t(1)
- : m_dim.N5 *
- (dimension_type::
- rank ==
- 6
- ? size_t(
- 1)
- : m_dim.N6 *
- (dimension_type::
- rank ==
- 7
- ? size_t(
- 1)
- : m_dim
- .N7)))))))) {
- }
+ m_stride(compute_stride<TrivialScalarSize>(arg_layout)) {}
template <class DimRHS>
KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
template <>
struct ViewStride<0> {
- enum { S0 = 0, S1 = 0, S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+ static constexpr size_t S0 = 0, S1 = 0, S2 = 0, S3 = 0, S4 = 0, S5 = 0,
+ S6 = 0, S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
template <>
struct ViewStride<1> {
size_t S0;
- enum { S1 = 0, S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+ static constexpr size_t S1 = 0, S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0,
+ S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
template <>
struct ViewStride<2> {
size_t S0, S1;
- enum { S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+ static constexpr size_t S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
template <>
struct ViewStride<3> {
size_t S0, S1, S2;
- enum { S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+ static constexpr size_t S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
template <>
struct ViewStride<4> {
size_t S0, S1, S2, S3;
- enum { S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+ static constexpr size_t S4 = 0, S5 = 0, S6 = 0, S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
template <>
struct ViewStride<5> {
size_t S0, S1, S2, S3, S4;
- enum { S5 = 0, S6 = 0, S7 = 0 };
+ static constexpr size_t S5 = 0, S6 = 0, S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
template <>
struct ViewStride<6> {
size_t S0, S1, S2, S3, S4, S5;
- enum { S6 = 0, S7 = 0 };
+ static constexpr size_t S6 = 0, S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
template <>
struct ViewStride<7> {
size_t S0, S1, S2, S3, S4, S5, S6;
- enum { S7 = 0 };
+ static constexpr size_t S7 = 0;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
struct ViewStride<8> {
size_t S0, S1, S2, S3, S4, S5, S6, S7;
- ViewStride() = default;
- ViewStride(const ViewStride&) = default;
+ ViewStride() = default;
+ ViewStride(const ViewStride&) = default;
ViewStride& operator=(const ViewStride&) = default;
KOKKOS_INLINE_FUNCTION
return m_stride.S7;
}
- // Stride with [ rank ] value is the total length
+ // Fill the target unbounded array s with the stride.
+ // This method differs from stride() in that it does not write the total
+ // length to the last index of the array. Preconditions: s must be an array of
+ // dimension_type::rank elements
+ // The version of clang-format in CI fails from maybe_unused
+ // clang-format off
template <typename iType>
- KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
- if (0 < dimension_type::rank) {
+ KOKKOS_INLINE_FUNCTION iType
+ stride_fill([[maybe_unused]] iType* const s) const {
+ if constexpr (0 < dimension_type::rank) {
s[0] = m_stride.S0;
}
- if (1 < dimension_type::rank) {
+ if constexpr (1 < dimension_type::rank) {
s[1] = m_stride.S1;
}
- if (2 < dimension_type::rank) {
+ if constexpr (2 < dimension_type::rank) {
s[2] = m_stride.S2;
}
- if (3 < dimension_type::rank) {
+ if constexpr (3 < dimension_type::rank) {
s[3] = m_stride.S3;
}
- if (4 < dimension_type::rank) {
+ if constexpr (4 < dimension_type::rank) {
s[4] = m_stride.S4;
}
- if (5 < dimension_type::rank) {
+ if constexpr (5 < dimension_type::rank) {
s[5] = m_stride.S5;
}
- if (6 < dimension_type::rank) {
+ if constexpr (6 < dimension_type::rank) {
s[6] = m_stride.S6;
}
- if (7 < dimension_type::rank) {
+ if constexpr (7 < dimension_type::rank) {
s[7] = m_stride.S7;
}
- s[dimension_type::rank] = span();
+ return span();
+ }
+ // clang-format on
+
+ // Fill the target unbounded array s with the stride and the total spanned
+ // size. This method differs from stride_fill() in that it writes the total
+ // spanned size to the last index of the array. Preconditions: s must be an
+ // array of dimension_type::rank + 1 elements
+ template <typename iType>
+ KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+ s[dimension_type::rank] = stride_fill(s);
}
//----------------------------------------
}
#else
- ViewOffset() = default;
- ViewOffset(const ViewOffset&) = default;
+ ViewOffset() = default;
+ ViewOffset(const ViewOffset&) = default;
ViewOffset& operator=(const ViewOffset&) = default;
#endif
template <class Traits>
struct ViewDataHandle<
Traits,
- std::enable_if_t<(std::is_same<typename Traits::non_const_value_type,
- typename Traits::value_type>::value &&
- std::is_void<typename Traits::specialize>::value &&
+ std::enable_if_t<(std::is_same_v<typename Traits::non_const_value_type,
+ typename Traits::value_type> &&
+ std::is_void_v<typename Traits::specialize> &&
Traits::memory_traits::is_atomic)>> {
using value_type = typename Traits::value_type;
using handle_type = typename Kokkos::Impl::AtomicViewDataHandle<Traits>;
template <class Traits>
struct ViewDataHandle<
- Traits,
- std::enable_if_t<(std::is_void<typename Traits::specialize>::value &&
- (!Traits::memory_traits::is_aligned) &&
- Traits::memory_traits::is_restrict
-#ifdef KOKKOS_ENABLE_CUDA
- && (!(std::is_same<typename Traits::memory_space,
- Kokkos::CudaSpace>::value ||
- std::is_same<typename Traits::memory_space,
- Kokkos::CudaUVMSpace>::value))
-#endif
- && (!Traits::memory_traits::is_atomic))>> {
+ Traits, std::enable_if_t<(std::is_void_v<typename Traits::specialize> &&
+ (!Traits::memory_traits::is_aligned) &&
+ Traits::memory_traits::is_restrict &&
+ (!Traits::memory_traits::is_atomic))>> {
using value_type = typename Traits::value_type;
using handle_type = typename Traits::value_type* KOKKOS_RESTRICT;
using return_type = typename Traits::value_type& KOKKOS_RESTRICT;
template <class Traits>
struct ViewDataHandle<
- Traits,
- std::enable_if_t<(std::is_void<typename Traits::specialize>::value &&
- Traits::memory_traits::is_aligned &&
- (!Traits::memory_traits::is_restrict)
-#ifdef KOKKOS_ENABLE_CUDA
- && (!(std::is_same<typename Traits::memory_space,
- Kokkos::CudaSpace>::value ||
- std::is_same<typename Traits::memory_space,
- Kokkos::CudaUVMSpace>::value))
-#endif
- && (!Traits::memory_traits::is_atomic))>> {
+ Traits, std::enable_if_t<(std::is_void_v<typename Traits::specialize> &&
+ Traits::memory_traits::is_aligned &&
+ (!Traits::memory_traits::is_restrict) &&
+ (!Traits::memory_traits::is_atomic))>> {
using value_type = typename Traits::value_type;
// typedef work-around for intel compilers error #3186: expected typedef
// declaration
template <class Traits>
struct ViewDataHandle<
- Traits,
- std::enable_if_t<(std::is_void<typename Traits::specialize>::value &&
- Traits::memory_traits::is_aligned &&
- Traits::memory_traits::is_restrict
-#ifdef KOKKOS_ENABLE_CUDA
- && (!(std::is_same<typename Traits::memory_space,
- Kokkos::CudaSpace>::value ||
- std::is_same<typename Traits::memory_space,
- Kokkos::CudaUVMSpace>::value))
-#endif
- && (!Traits::memory_traits::is_atomic))>> {
+ Traits, std::enable_if_t<(std::is_void_v<typename Traits::specialize> &&
+ Traits::memory_traits::is_aligned &&
+ Traits::memory_traits::is_restrict &&
+ (!Traits::memory_traits::is_atomic))>> {
using value_type = typename Traits::value_type;
// typedef work-around for intel compilers error #3186: expected typedef
// declaration
namespace Kokkos {
namespace Impl {
-
-template <typename T>
-inline bool is_zero_byte(const T& t) {
- using comparison_type = std::conditional_t<
- sizeof(T) % sizeof(long long int) == 0, long long int,
- std::conditional_t<
- sizeof(T) % sizeof(long int) == 0, long int,
- std::conditional_t<
- sizeof(T) % sizeof(int) == 0, int,
- std::conditional_t<sizeof(T) % sizeof(short int) == 0, short int,
- char>>>>;
- const auto* const ptr = reinterpret_cast<const comparison_type*>(&t);
- for (std::size_t i = 0; i < sizeof(T) / sizeof(comparison_type); ++i)
- if (ptr[i] != 0) return false;
- return true;
-}
-
-//----------------------------------------------------------------------------
-
-/*
- * The construction, assignment to default, and destruction
- * are merged into a single functor.
- * Primarily to work around an unresolved CUDA back-end bug
- * that would lose the destruction cuda device function when
- * called from the shared memory tracking destruction.
- * Secondarily to have two fewer partial specializations.
- */
-template <class DeviceType, class ValueType,
- bool IsScalar = std::is_scalar<ValueType>::value>
-struct ViewValueFunctor;
-
-template <class DeviceType, class ValueType>
-struct ViewValueFunctor<DeviceType, ValueType, false /* is_scalar */> {
- using ExecSpace = typename DeviceType::execution_space;
- using PolicyType = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<int64_t>>;
-
- ExecSpace space;
- ValueType* ptr;
- size_t n;
- bool destroy;
- std::string name;
- bool default_exec_space;
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const size_t i) const {
- if (destroy) {
- (ptr + i)->~ValueType();
- } // KOKKOS_IMPL_CUDA_CLANG_WORKAROUND this line causes ptax error
- // __cxa_begin_catch in nested_view unit-test
- else {
- new (ptr + i) ValueType();
- }
- }
-
- ViewValueFunctor() = default;
- ViewValueFunctor(const ViewValueFunctor&) = default;
- ViewValueFunctor& operator=(const ViewValueFunctor&) = default;
-
- ViewValueFunctor(ExecSpace const& arg_space, ValueType* const arg_ptr,
- size_t const arg_n, std::string arg_name)
- : space(arg_space),
- ptr(arg_ptr),
- n(arg_n),
- destroy(false),
- name(std::move(arg_name)),
- default_exec_space(false) {}
-
- ViewValueFunctor(ValueType* const arg_ptr, size_t const arg_n,
- std::string arg_name)
- : space(ExecSpace{}),
- ptr(arg_ptr),
- n(arg_n),
- destroy(false),
- name(std::move(arg_name)),
- default_exec_space(true) {}
-
- template <typename Dummy = ValueType>
- std::enable_if_t<std::is_trivial<Dummy>::value &&
- std::is_trivially_copy_assignable<ValueType>::value>
- construct_dispatch() {
- ValueType value{};
-// On A64FX memset seems to do the wrong thing with regards to first touch
-// leading to the significant performance issues
-#ifndef KOKKOS_ARCH_A64FX
- if (Impl::is_zero_byte(value)) {
- uint64_t kpID = 0;
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- // We are not really using parallel_for here but using beginParallelFor
- // instead of begin_parallel_for (and adding "via memset") is the best
- // we can do to indicate that this is not supposed to be tunable (and
- // doesn't really execute a parallel_for).
- Kokkos::Profiling::beginParallelFor(
- "Kokkos::View::initialization [" + name + "] via memset",
- Kokkos::Profiling::Experimental::device_id(space), &kpID);
- }
- (void)ZeroMemset<ExecSpace, ValueType*, typename DeviceType::memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>(
- space,
- Kokkos::View<ValueType*, typename DeviceType::memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>(ptr, n),
- value);
-
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::endParallelFor(kpID);
- }
- if (default_exec_space)
- space.fence("Kokkos::Impl::ViewValueFunctor: View init/destroy fence");
- } else {
-#endif
- parallel_for_implementation(false);
-#ifndef KOKKOS_ARCH_A64FX
- }
-#endif
- }
-
- template <typename Dummy = ValueType>
- std::enable_if_t<!(std::is_trivial<Dummy>::value &&
- std::is_trivially_copy_assignable<ValueType>::value)>
- construct_dispatch() {
- parallel_for_implementation(false);
- }
-
- void parallel_for_implementation(bool arg) {
- destroy = arg;
- if (!space.in_parallel()) {
- PolicyType policy(0, n);
- std::string functor_name;
- uint64_t kpID = 0;
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- functor_name =
- (destroy ? "Kokkos::View::destruction [" + functor_name + "]"
- : "Kokkos::View::initialization [" + functor_name + "]");
- Kokkos::Profiling::beginParallelFor(
- "Kokkos::View::initialization [" + functor_name + "]",
- Kokkos::Profiling::Experimental::device_id(space), &kpID);
- }
-
-#ifdef KOKKOS_ENABLE_CUDA
- if (std::is_same<ExecSpace, Kokkos::Cuda>::value) {
- Kokkos::Impl::cuda_prefetch_pointer(space, ptr, sizeof(ValueType) * n,
- true);
- }
-#endif
- const Kokkos::Impl::ParallelFor<ViewValueFunctor, PolicyType> closure(
- *this, policy);
- closure.execute();
- if (default_exec_space || destroy)
- space.fence("Kokkos::Impl::ViewValueFunctor: View init/destroy fence");
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::endParallelFor(kpID);
- }
- } else {
- for (size_t i = 0; i < n; ++i) operator()(i);
- }
- }
-
- void construct_shared_allocation() { construct_dispatch(); }
-
- void destroy_shared_allocation() { parallel_for_implementation(true); }
-};
-
-template <class DeviceType, class ValueType>
-struct ViewValueFunctor<DeviceType, ValueType, true /* is_scalar */> {
- using ExecSpace = typename DeviceType::execution_space;
- using PolicyType = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<int64_t>>;
-
- ExecSpace space;
- ValueType* ptr;
- size_t n;
- std::string name;
- bool default_exec_space;
-
- KOKKOS_INLINE_FUNCTION
- void operator()(const size_t i) const { ptr[i] = ValueType(); }
-
- ViewValueFunctor() = default;
- ViewValueFunctor(const ViewValueFunctor&) = default;
- ViewValueFunctor& operator=(const ViewValueFunctor&) = default;
-
- ViewValueFunctor(ExecSpace const& arg_space, ValueType* const arg_ptr,
- size_t const arg_n, std::string arg_name)
- : space(arg_space),
- ptr(arg_ptr),
- n(arg_n),
- name(std::move(arg_name)),
- default_exec_space(false) {}
-
- ViewValueFunctor(ValueType* const arg_ptr, size_t const arg_n,
- std::string arg_name)
- : space(ExecSpace{}),
- ptr(arg_ptr),
- n(arg_n),
- name(std::move(arg_name)),
- default_exec_space(true) {}
-
- template <typename Dummy = ValueType>
- std::enable_if_t<std::is_trivial<Dummy>::value &&
- std::is_trivially_copy_assignable<Dummy>::value>
- construct_shared_allocation() {
- // Shortcut for zero initialization
- ValueType value{};
-// On A64FX memset seems to do the wrong thing with regards to first touch
-// leading to the significant performance issues
-#ifndef KOKKOS_ARCH_A64FX
- if (Impl::is_zero_byte(value)) {
- uint64_t kpID = 0;
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- // We are not really using parallel_for here but using beginParallelFor
- // instead of begin_parallel_for (and adding "via memset") is the best
- // we can do to indicate that this is not supposed to be tunable (and
- // doesn't really execute a parallel_for).
- Kokkos::Profiling::beginParallelFor(
- "Kokkos::View::initialization [" + name + "] via memset",
- Kokkos::Profiling::Experimental::device_id(space), &kpID);
- }
-
- (void)ZeroMemset<ExecSpace, ValueType*, typename DeviceType::memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>(
- space,
- Kokkos::View<ValueType*, typename DeviceType::memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>(ptr, n),
- value);
-
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::endParallelFor(kpID);
- }
- if (default_exec_space)
- space.fence("Kokkos::Impl::ViewValueFunctor: View init/destroy fence");
- } else {
-#endif
- parallel_for_implementation();
-#ifndef KOKKOS_ARCH_A64FX
- }
-#endif
- }
-
- template <typename Dummy = ValueType>
- std::enable_if_t<!(std::is_trivial<Dummy>::value &&
- std::is_trivially_copy_assignable<Dummy>::value)>
- construct_shared_allocation() {
- parallel_for_implementation();
- }
-
- void parallel_for_implementation() {
- if (!space.in_parallel()) {
- PolicyType policy(0, n);
- std::string functor_name = "Kokkos::View::initialization [" + name + "]";
- uint64_t kpID = 0;
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::beginParallelFor(
- "Kokkos::View::initialization [" + name + "]",
- Kokkos::Profiling::Experimental::device_id(space), &kpID);
- }
-#ifdef KOKKOS_ENABLE_CUDA
- if (std::is_same<ExecSpace, Kokkos::Cuda>::value) {
- Kokkos::Impl::cuda_prefetch_pointer(space, ptr, sizeof(ValueType) * n,
- true);
- }
-#endif
- const Kokkos::Impl::ParallelFor<ViewValueFunctor, PolicyType> closure(
- *this, PolicyType(0, n));
- closure.execute();
- if (default_exec_space)
- space.fence(
- "Kokkos::Impl::ViewValueFunctor: Fence after setting values in "
- "view");
- if (Kokkos::Profiling::profileLibraryLoaded()) {
- Kokkos::Profiling::endParallelFor(kpID);
- }
- } else {
- for (size_t i = 0; i < n; ++i) operator()(i);
- }
- }
-
- void destroy_shared_allocation() {}
-};
-
//----------------------------------------------------------------------------
/** \brief View mapping for non-specialized data type and standard layout */
template <class Traits>
class ViewMapping<
- Traits,
- std::enable_if_t<(
- std::is_void<typename Traits::specialize>::value &&
- ViewOffset<typename Traits::dimension, typename Traits::array_layout,
- void>::is_mapping_plugin::value)>> {
+ Traits, std::enable_if_t<(std::is_void_v<typename Traits::specialize> &&
+ ViewOffset<typename Traits::dimension,
+ typename Traits::array_layout,
+ void>::is_mapping_plugin::value)>> {
public:
using offset_type = ViewOffset<typename Traits::dimension,
typename Traits::array_layout, void>;
//----------------------------------------
// Domain dimensions
- enum { Rank = Traits::dimension::rank };
+ static constexpr unsigned Rank = Traits::dimension::rank;
template <typename iType>
KOKKOS_INLINE_FUNCTION constexpr size_t extent(const iType& r) const {
return m_impl_offset.stride_7();
}
+ // Fill the target unbounded array s with the stride and the total spanned
+ // size. This method differs from stride_fill() in that it writes the total
+ // spanned size to the last index of the array. Preconditions: s must be an
+ // array of dimension_type::rank + 1 elements
template <typename iType>
KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
m_impl_offset.stride(s);
}
+ // Fill the target unbounded array s with the stride.
+ // This method differs from stride() in that it does not write the total
+ // length to the last index of the array. Preconditions: s must be an array of
+ // dimension_type::rank elements
+ template <typename iType>
+ KOKKOS_INLINE_FUNCTION iType stride_fill(iType* const s) const {
+ return m_impl_offset.stride_fill(s);
+ }
+
//----------------------------------------
// Range span
reference_type reference() const { return m_impl_handle[0]; }
template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(std::is_integral<I0>::value &&
- // if layout is neither stride nor irregular,
- // then just use the handle directly
- !(std::is_same<typename Traits::array_layout,
- Kokkos::LayoutStride>::value ||
- !is_regular::value)),
- reference_type>
- reference(const I0& i0) const {
+ KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+ (std::is_integral_v<I0> &&
+ // if layout is neither stride nor irregular,
+ // then just use the handle directly
+ !(std::is_same_v<typename Traits::array_layout, Kokkos::LayoutStride> ||
+ !is_regular::value)),
+ reference_type>
+ reference(const I0& i0) const {
return m_impl_handle[i0];
}
template <typename I0>
- KOKKOS_FORCEINLINE_FUNCTION
- std::enable_if_t<(std::is_integral<I0>::value &&
- // if the layout is strided or irregular, then
- // we have to use the offset
- (std::is_same<typename Traits::array_layout,
- Kokkos::LayoutStride>::value ||
- !is_regular::value)),
- reference_type>
- reference(const I0& i0) const {
+ KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+ (std::is_integral_v<I0> &&
+ // if the layout is strided or irregular, then
+ // we have to use the offset
+ (std::is_same_v<typename Traits::array_layout, Kokkos::LayoutStride> ||
+ !is_regular::value)),
+ reference_type>
+ reference(const I0& i0) const {
return m_impl_handle[m_impl_offset(i0)];
}
KOKKOS_DEFAULTED_FUNCTION ViewMapping& operator=(const ViewMapping&) =
default;
- KOKKOS_DEFAULTED_FUNCTION ViewMapping(ViewMapping&&) = default;
+ KOKKOS_DEFAULTED_FUNCTION ViewMapping(ViewMapping&&) = default;
KOKKOS_DEFAULTED_FUNCTION ViewMapping& operator=(ViewMapping&&) = default;
//----------------------------------------
KOKKOS_INLINE_FUNCTION ViewMapping(
Kokkos::Impl::ViewCtorProp<P...> const& arg_prop,
typename Traits::array_layout const& arg_layout)
- : m_impl_handle(
- ((Kokkos::Impl::ViewCtorProp<void, pointer_type> const&)arg_prop)
- .value),
+ : m_impl_handle(Impl::get_property<Impl::PointerTag>(arg_prop)),
m_impl_offset(std::integral_constant<unsigned, 0>(), arg_layout) {}
/**\brief Assign data */
using execution_space = typename alloc_prop::execution_space;
using memory_space = typename Traits::memory_space;
- using value_type = typename Traits::value_type;
- using functor_type =
- ViewValueFunctor<Kokkos::Device<execution_space, memory_space>,
- value_type>;
+ static_assert(
+ SpaceAccessibility<execution_space, memory_space>::accessible);
+ using device_type = Kokkos::Device<execution_space, memory_space>;
+ using value_type = typename Traits::value_type;
+ using functor_type = std::conditional_t<
+ alloc_prop::sequential_host_init,
+ ViewValueFunctorSequentialHostInit<device_type, value_type>,
+ ViewValueFunctor<device_type, value_type>>;
using record_type =
Kokkos::Impl::SharedAllocationRecord<memory_space, functor_type>;
(m_impl_offset.span() * MemorySpanSize + MemorySpanMask) &
~size_t(MemorySpanMask);
const std::string& alloc_name =
- static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
- arg_prop)
- .value;
+ Impl::get_property<Impl::LabelTag>(arg_prop);
const execution_space& exec_space =
- static_cast<Kokkos::Impl::ViewCtorProp<void, execution_space> const&>(
- arg_prop)
- .value;
+ Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
const memory_space& mem_space =
- static_cast<Kokkos::Impl::ViewCtorProp<void, memory_space> const&>(
- arg_prop)
- .value;
+ Impl::get_property<Impl::MemorySpaceTag>(arg_prop);
// Create shared memory tracking record with allocate memory from the memory
// space
m_impl_handle = handle_type(reinterpret_cast<pointer_type>(record->data()));
+ functor_type functor =
+ execution_space_specified
+ ? functor_type(exec_space, (value_type*)m_impl_handle,
+ m_impl_offset.span(), alloc_name)
+ : functor_type((value_type*)m_impl_handle, m_impl_offset.span(),
+ alloc_name);
+
// Only initialize if the allocation is non-zero.
// May be zero if one of the dimensions is zero.
- if (alloc_size && alloc_prop::initialize) {
- // Assume destruction is only required when construction is requested.
- // The ViewValueFunctor has both value construction and destruction
- // operators.
- record->m_destroy =
- execution_space_specified
- ? functor_type(exec_space, (value_type*)m_impl_handle,
- m_impl_offset.span(), alloc_name)
- : functor_type((value_type*)m_impl_handle, m_impl_offset.span(),
- alloc_name);
-
- // Construct values
- record->m_destroy.construct_shared_allocation();
- }
+ if constexpr (alloc_prop::initialize)
+ if (alloc_size) {
+ // Assume destruction is only required when construction is requested.
+ // The ViewValueFunctor has both value construction and destruction
+ // operators.
+ record->m_destroy = std::move(functor);
+
+ // Construct values
+ record->m_destroy.construct_shared_allocation();
+ }
return record;
}
class ViewMapping<
DstTraits, SrcTraits,
std::enable_if_t<(
- !(std::is_same<typename SrcTraits::array_layout, LayoutStride>::
- value) && // Added to have a new specialization for SrcType of
- // LayoutStride
+ !(std::is_same_v<typename SrcTraits::array_layout,
+ LayoutStride>)&& // Added to have a new
+ // specialization for
+ // SrcType of
+ // LayoutStride
// default mappings
- std::is_void<typename DstTraits::specialize>::value &&
- std::is_void<typename SrcTraits::specialize>::value &&
+ std::is_void_v<typename DstTraits::specialize> &&
+ std::is_void_v<typename SrcTraits::specialize> &&
(
// same layout
- std::is_same<typename DstTraits::array_layout,
- typename SrcTraits::array_layout>::value ||
+ std::is_same_v<typename DstTraits::array_layout,
+ typename SrcTraits::array_layout> ||
// known layout
- ((std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutStride>::value) &&
- (std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutStride>::value))))>> {
+ ((std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<
+ typename DstTraits::array_layout,
+ Kokkos::LayoutStride>)&&(std::is_same_v<typename SrcTraits::
+ array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<
+ typename SrcTraits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<
+ typename SrcTraits::array_layout,
+ Kokkos::LayoutStride>))))>> {
private:
enum {
is_assignable_space = Kokkos::Impl::MemorySpaceAccess<
enum {
is_assignable_value_type =
- std::is_same<typename DstTraits::value_type,
- typename SrcTraits::value_type>::value ||
- std::is_same<typename DstTraits::value_type,
- typename SrcTraits::const_value_type>::value
+ std::is_same_v<typename DstTraits::value_type,
+ typename SrcTraits::value_type> ||
+ std::is_same_v<typename DstTraits::value_type,
+ typename SrcTraits::const_value_type>
};
enum {
};
enum {
- is_assignable_layout =
- std::is_same<typename DstTraits::array_layout,
- typename SrcTraits::array_layout>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutStride>::value ||
- (DstTraits::dimension::rank == 0) ||
- (DstTraits::dimension::rank == 1 &&
- DstTraits::dimension::rank_dynamic == 1)
+ is_assignable_layout = std::is_same_v<typename DstTraits::array_layout,
+ typename SrcTraits::array_layout> ||
+ std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutStride> ||
+ (DstTraits::dimension::rank == 0) ||
+ (DstTraits::dimension::rank == 1)
};
public:
template <class DstTraits, class SrcTraits>
class ViewMapping<
DstTraits, SrcTraits,
- std::enable_if_t<(
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutStride>::value &&
- std::is_void<typename DstTraits::specialize>::value &&
- std::is_void<typename SrcTraits::specialize>::value &&
- (
- // same layout
- std::is_same<typename DstTraits::array_layout,
- typename SrcTraits::array_layout>::value ||
- // known layout
- (std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutStride>::value)))>> {
+ std::enable_if_t<(std::is_same_v<typename SrcTraits::array_layout,
+ Kokkos::LayoutStride> &&
+ std::is_void_v<typename DstTraits::specialize> &&
+ std::is_void_v<typename SrcTraits::specialize> &&
+ (
+ // same layout
+ std::is_same_v<typename DstTraits::array_layout,
+ typename SrcTraits::array_layout> ||
+ // known layout
+ (std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutLeft> ||
+ std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutStride>)))>> {
private:
enum {
is_assignable_space = Kokkos::Impl::MemorySpaceAccess<
enum {
is_assignable_value_type =
- std::is_same<typename DstTraits::value_type,
- typename SrcTraits::value_type>::value ||
- std::is_same<typename DstTraits::value_type,
- typename SrcTraits::const_value_type>::value
+ std::is_same_v<typename DstTraits::value_type,
+ typename SrcTraits::value_type> ||
+ std::is_same_v<typename DstTraits::value_type,
+ typename SrcTraits::const_value_type>
};
enum {
bool assignable = true;
src.stride(strides);
size_t exp_stride = 1;
- if (std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutLeft>::value) {
- for (int i = 0; i < src.Rank; i++) {
+ if (std::is_same_v<typename DstTraits::array_layout, Kokkos::LayoutLeft>) {
+ for (int i = 0; i < (int)src.Rank; i++) {
if (i > 0) exp_stride *= src.extent(i - 1);
if (strides[i] != exp_stride) {
assignable = false;
break;
}
}
- } else if (std::is_same<typename DstTraits::array_layout,
- Kokkos::LayoutRight>::value) {
- for (int i = src.Rank - 1; i >= 0; i--) {
- if (i < src.Rank - 1) exp_stride *= src.extent(i + 1);
- if (strides[i] != exp_stride) {
+ } else if (std::is_same_v<typename DstTraits::array_layout,
+ Kokkos::LayoutRight>) {
+ for (int i = 0; i < (int)src.Rank; i++) {
+ if (i > 0) exp_stride *= src.extent(src.Rank - i);
+ if (strides[src.Rank - 1 - i] != exp_stride) {
assignable = false;
break;
}
};
/* for integral args, subview doesn't have that dimension */
-template <class ValueType, ptrdiff_t Ext, ptrdiff_t... Exts, class Integral,
+template <class ValueType, size_t Ext, size_t... Exts, class Integral,
class... Args>
struct SubViewDataTypeImpl<
- std::enable_if_t<std::is_integral<std::decay_t<Integral>>::value>,
- ValueType, Kokkos::Experimental::Extents<Ext, Exts...>, Integral, Args...>
+ std::enable_if_t<std::is_integral_v<std::decay_t<Integral>>>, ValueType,
+ Kokkos::Experimental::Extents<Ext, Exts...>, Integral, Args...>
: SubViewDataTypeImpl<void, ValueType,
Kokkos::Experimental::Extents<Exts...>, Args...> {};
/* for ALL slice, subview has the same dimension */
-template <class ValueType, ptrdiff_t Ext, ptrdiff_t... Exts, class... Args>
+template <class ValueType, size_t Ext, size_t... Exts, class... Args>
struct SubViewDataTypeImpl<void, ValueType,
- Kokkos::Experimental::Extents<Ext, Exts...>, ALL_t,
- Args...>
+ Kokkos::Experimental::Extents<Ext, Exts...>,
+ Kokkos::ALL_t, Args...>
: SubViewDataTypeImpl<void, typename ApplyExtent<ValueType, Ext>::type,
Kokkos::Experimental::Extents<Exts...>, Args...> {};
* static sizes */
/* Since we don't allow interleaving of dynamic and static extents, make all of
* the dimensions to the left dynamic */
-template <class ValueType, ptrdiff_t Ext, ptrdiff_t... Exts, class PairLike,
+template <class ValueType, size_t Ext, size_t... Exts, class PairLike,
class... Args>
struct SubViewDataTypeImpl<
std::enable_if_t<is_pair_like<PairLike>::value>, ValueType,
template <class SrcTraits, class... Args>
class ViewMapping<
- std::enable_if_t<(std::is_void<typename SrcTraits::specialize>::value &&
- (std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value ||
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutStride>::value))>,
+ std::enable_if_t<(
+ std::is_void_v<typename SrcTraits::specialize> &&
+ (std::is_same_v<typename SrcTraits::array_layout, Kokkos::LayoutLeft> ||
+ std::is_same_v<typename SrcTraits::array_layout,
+ Kokkos::LayoutRight> ||
+ std::is_same_v<typename SrcTraits::array_layout,
+ Kokkos::LayoutStride>))>,
SrcTraits, Args...> {
private:
static_assert(SrcTraits::rank == sizeof...(Args),
// OutputRank 1 or 2, InputLayout Left, Interval 0
// because single stride one or second index has a stride.
(rank <= 2 && R0 &&
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutLeft>::value) // replace with input rank
+ std::is_same_v<typename SrcTraits::array_layout,
+ Kokkos::LayoutLeft>) // replace with input rank
||
// OutputRank 1 or 2, InputLayout Right, Interval [InputRank-1]
// because single stride one or second index has a stride.
(rank <= 2 && R0_rev &&
- std::is_same<typename SrcTraits::array_layout,
- Kokkos::LayoutRight>::value) // replace input rank
+ std::is_same_v<typename SrcTraits::array_layout,
+ Kokkos::LayoutRight>) // replace input rank
),
typename SrcTraits::array_layout, Kokkos::LayoutStride>;
template <class MemoryTraits>
struct apply {
- static_assert(Kokkos::is_memory_traits<MemoryTraits>::value, "");
+ static_assert(Kokkos::is_memory_traits<MemoryTraits>::value);
using traits_type =
Kokkos::ViewTraits<data_type, array_layout,
namespace Kokkos {
namespace Impl {
-template <unsigned, class MapType>
-KOKKOS_INLINE_FUNCTION bool view_verify_operator_bounds(const MapType&) {
- return true;
+template <class Map, class... Indices, std::size_t... Enumerate>
+KOKKOS_FUNCTION bool within_range(Map const& map,
+ std::index_sequence<Enumerate...>,
+ Indices... indices) {
+ return (((std::size_t)indices < map.extent(Enumerate)) && ...);
}
-template <unsigned R, class MapType, class iType, class... Args>
-KOKKOS_INLINE_FUNCTION bool view_verify_operator_bounds(const MapType& map,
- const iType& i,
- Args... args) {
- return (size_t(i) < map.extent(R)) &&
- view_verify_operator_bounds<R + 1>(map, args...);
+template <class... Indices>
+KOKKOS_FUNCTION constexpr char* append_formatted_multidimensional_index(
+ char* dest, Indices... indices) {
+ char* d = dest;
+ strcat(d, "[");
+ (
+ [&] {
+ d += strlen(d);
+ to_chars_i(d,
+ d + 20, // 20 digits ought to be enough
+ indices);
+ strcat(d, ",");
+ }(),
+ ...);
+ d[strlen(d) - 1] = ']'; // overwrite trailing comma
+ return dest;
}
-template <unsigned, class MapType>
-inline void view_error_operator_bounds(char*, int, const MapType&) {}
-
-template <unsigned R, class MapType, class iType, class... Args>
-inline void view_error_operator_bounds(char* buf, int len, const MapType& map,
- const iType& i, Args... args) {
- const int n = snprintf(
- buf, len, " %ld < %ld %c", static_cast<unsigned long>(i),
- static_cast<unsigned long>(map.extent(R)), (sizeof...(Args) ? ',' : ')'));
- view_error_operator_bounds<R + 1>(buf + n, len - n, map, args...);
+template <class Map, class... Indices, std::size_t... Enumerate>
+KOKKOS_FUNCTION void print_extents(char* dest, Map const& map,
+ std::index_sequence<Enumerate...>) {
+ append_formatted_multidimensional_index(dest, map.extent(Enumerate)...);
}
-/* Check #3: is the View managed as determined by the MemoryTraits? */
-template <class MapType, bool is_managed = (MapType::is_managed != 0)>
-struct OperatorBoundsErrorOnDevice;
-
-template <class MapType>
-struct OperatorBoundsErrorOnDevice<MapType, false> {
- KOKKOS_INLINE_FUNCTION
- static void run(MapType const&) { Kokkos::abort("View bounds error"); }
-};
-
-template <class MapType>
-struct OperatorBoundsErrorOnDevice<MapType, true> {
- KOKKOS_INLINE_FUNCTION
- static void run(MapType const& map) {
- SharedAllocationHeader const* const header =
- SharedAllocationHeader::get_header(
- static_cast<void const*>(map.data()));
- char const* const label = header->label();
- enum { LEN = 128 };
- char msg[LEN];
- char const* const first_part = "View bounds error of view ";
- char* p = msg;
- char* const end = msg + LEN - 1;
- for (char const* p2 = first_part; (*p2 != '\0') && (p < end); ++p, ++p2) {
- *p = *p2;
- }
- for (char const* p2 = label; (*p2 != '\0') && (p < end); ++p, ++p2) {
- *p = *p2;
- }
- *p = '\0';
- Kokkos::abort(msg);
- }
-};
-
-/* Check #2: does the ViewMapping have the printable_label_typedef defined?
- See above that only the non-specialized standard-layout ViewMapping has
- this defined by default.
- The existence of this alias indicates the existence of MapType::is_managed
- */
template <class T>
using printable_label_typedef_t = typename T::printable_label_typedef;
-template <class Map>
-KOKKOS_FUNCTION
- std::enable_if_t<!is_detected<printable_label_typedef_t, Map>::value>
- operator_bounds_error_on_device(Map const&) {
- Kokkos::abort("View bounds error");
-}
-
-template <class Map>
-KOKKOS_FUNCTION
- std::enable_if_t<is_detected<printable_label_typedef_t, Map>::value>
- operator_bounds_error_on_device(Map const& map) {
- OperatorBoundsErrorOnDevice<Map>::run(map);
-}
-
template <class MemorySpace, class ViewType, class MapType, class... Args>
KOKKOS_INLINE_FUNCTION void view_verify_operator_bounds(
Kokkos::Impl::ViewTracker<ViewType> const& tracker, const MapType& map,
Args... args) {
- if (!view_verify_operator_bounds<0>(map, args...)) {
+ if (!within_range(map, std::make_index_sequence<sizeof...(Args)>(),
+ args...)) {
+ char err[256] = "";
+ strcat(err, "Kokkos::View ERROR: out of bounds access");
+ strcat(err, " label=(\"");
KOKKOS_IF_ON_HOST(
- (enum {LEN = 1024}; char buffer[LEN];
- const std::string label =
- tracker.m_tracker.template get_label<MemorySpace>();
- int n = snprintf(buffer, LEN, "View bounds error of view %s (",
- label.c_str());
- view_error_operator_bounds<0>(buffer + n, LEN - n, map, args...);
- Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
-
- KOKKOS_IF_ON_DEVICE((
- /* Check #1: is there a SharedAllocationRecord?
- (we won't use it, but if its not there then there isn't
- a corresponding SharedAllocationHeader containing a label).
- This check should cover the case of Views that don't
- have the Unmanaged trait but were initialized by pointer. */
if (tracker.m_tracker.has_record()) {
- operator_bounds_error_on_device(map);
- } else { Kokkos::abort("View bounds error"); }))
+ strncat(err, tracker.m_tracker.template get_label<void>().c_str(),
+ 128);
+ } else { strcat(err, "**UNMANAGED**"); })
+ KOKKOS_IF_ON_DEVICE([&] {
+ // Check #1: is there a SharedAllocationRecord? (we won't use it, but
+ // if its not there then there isn't a corresponding
+ // SharedAllocationHeader containing a label). This check should cover
+ // the case of Views that don't have the Unmanaged trait but were
+ // initialized by pointer.
+ if (!tracker.m_tracker.has_record()) {
+ strcat(err, "**UNMANAGED**");
+ return;
+ }
+ // Check #2: does the ViewMapping have the printable_label_typedef
+ // defined? See above that only the non-specialized standard-layout
+ // ViewMapping has this defined by default. The existence of this
+ // alias indicates the existence of MapType::is_managed
+ if constexpr (is_detected_v<printable_label_typedef_t, MapType>) {
+ // Check #3: is the View managed as determined by the MemoryTraits?
+ if constexpr (MapType::is_managed != 0) {
+ SharedAllocationHeader const* const header =
+ SharedAllocationHeader::get_header(
+ static_cast<void const*>(map.data()));
+ char const* const label = header->label();
+ strcat(err, label);
+ return;
+ }
+ strcat(err, "**UNAVAILABLE**");
+ }
+ }();)
+ strcat(err, "\") with indices ");
+ append_formatted_multidimensional_index(err, args...);
+ strcat(err, " but extents ");
+ print_extents(err, map, std::make_index_sequence<sizeof...(Args)>());
+ Kokkos::abort(err);
}
}
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_VIEW_TRACKER_HPP
#define KOKKOS_VIEW_TRACKER_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+#ifndef KOKKOS_VIEWTRAITS_HPP
+#define KOKKOS_VIEWTRAITS_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <View/Hooks/Kokkos_ViewHooks.hpp>
+#ifdef KOKKOS_ENABLE_IMPL_MDSPAN
+#include <View/MDSpan/Kokkos_MDSpan_Layout.hpp>
+#include <View/MDSpan/Kokkos_MDSpan_Accessor.hpp>
+#endif
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+struct ALL_t {
+ KOKKOS_FUNCTION
+ constexpr const ALL_t& operator()() const { return *this; }
+
+ KOKKOS_FUNCTION
+ constexpr bool operator==(const ALL_t&) const { return true; }
+};
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+namespace Impl {
+// TODO This alias declaration forces us to fully qualify ALL_t inside the
+// Kokkos::Impl namespace to avoid deprecation warnings. Replace the
+// fully-qualified name when we remove Kokkos::Impl::ALL_t.
+using ALL_t KOKKOS_DEPRECATED_WITH_COMMENT("Use Kokkos::ALL_t instead!") =
+ Kokkos::ALL_t;
+} // namespace Impl
+#endif
+
+// FIXME_OPENMPTARGET - The `declare target` is needed for the Intel GPUs with
+// the OpenMPTarget backend
+#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(KOKKOS_COMPILER_INTEL_LLVM)
+#pragma omp declare target
+#endif
+
+inline constexpr Kokkos::ALL_t ALL{};
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(KOKKOS_COMPILER_INTEL_LLVM)
+#pragma omp end declare target
+#endif
+
+namespace Impl {
+
+template <class DataType>
+struct ViewArrayAnalysis;
+
+template <class DataType, class ArrayLayout,
+ typename ValueType =
+ typename ViewArrayAnalysis<DataType>::non_const_value_type>
+struct ViewDataAnalysis;
+
+template <class, class...>
+class ViewMapping {
+ public:
+ enum : bool { is_assignable_data_type = false };
+ enum : bool { is_assignable = false };
+};
+
+template <typename IntType>
+constexpr KOKKOS_INLINE_FUNCTION std::size_t count_valid_integers(
+ const IntType i0, const IntType i1, const IntType i2, const IntType i3,
+ const IntType i4, const IntType i5, const IntType i6, const IntType i7) {
+ static_assert(std::is_integral_v<IntType>,
+ "count_valid_integers() must have integer arguments.");
+
+ return (i0 != KOKKOS_INVALID_INDEX) + (i1 != KOKKOS_INVALID_INDEX) +
+ (i2 != KOKKOS_INVALID_INDEX) + (i3 != KOKKOS_INVALID_INDEX) +
+ (i4 != KOKKOS_INVALID_INDEX) + (i5 != KOKKOS_INVALID_INDEX) +
+ (i6 != KOKKOS_INVALID_INDEX) + (i7 != KOKKOS_INVALID_INDEX);
+}
+
+// FIXME Ideally, we would not instantiate this function for every possible View
+// type. We should be able to only pass "extent" when we use mdspan.
+template <typename View>
+KOKKOS_INLINE_FUNCTION void runtime_check_rank(
+ const View&, const bool is_void_spec, const size_t i0, const size_t i1,
+ const size_t i2, const size_t i3, const size_t i4, const size_t i5,
+ const size_t i6, const size_t i7, const char* label) {
+ (void)(label);
+
+ if (is_void_spec) {
+ const size_t num_passed_args =
+ count_valid_integers(i0, i1, i2, i3, i4, i5, i6, i7);
+ // We either allow to pass as many extents as the dynamic rank is, or
+ // as many extents as the total rank is. In the latter case, the given
+ // extents for the static dimensions must match the
+ // compile-time extents.
+ constexpr int rank = View::rank();
+ constexpr int dyn_rank = View::rank_dynamic();
+ const bool n_args_is_dyn_rank = num_passed_args == dyn_rank;
+ const bool n_args_is_rank = num_passed_args == rank;
+
+ if constexpr (rank != dyn_rank) {
+ if (n_args_is_rank) {
+ size_t new_extents[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
+ for (int i = dyn_rank; i < rank; ++i)
+ if (new_extents[i] != View::static_extent(i)) {
+ KOKKOS_IF_ON_HOST(
+ const std::string message =
+ "The specified run-time extent for Kokkos::View '" +
+ std::string(label) +
+ "' does not match the compile-time extent in dimension " +
+ std::to_string(i) + ". The given extent is " +
+ std::to_string(new_extents[i]) + " but should be " +
+ std::to_string(View::static_extent(i)) + ".\n";
+ Kokkos::abort(message.c_str());)
+ KOKKOS_IF_ON_DEVICE(
+ Kokkos::abort(
+ "The specified run-time extents for a Kokkos::View "
+ "do not match the compile-time extents.");)
+ }
+ }
+ }
+
+ if (!n_args_is_dyn_rank && !n_args_is_rank) {
+ KOKKOS_IF_ON_HOST(
+ const std::string message =
+ "Constructor for Kokkos::View '" + std::string(label) +
+ "' has mismatched number of arguments. The number "
+ "of arguments = " +
+ std::to_string(num_passed_args) +
+ " neither matches the dynamic rank = " +
+ std::to_string(dyn_rank) +
+ " nor the total rank = " + std::to_string(rank) + "\n";
+ Kokkos::abort(message.c_str());)
+ KOKKOS_IF_ON_DEVICE(Kokkos::abort("Constructor for Kokkos View has "
+ "mismatched number of arguments.");)
+ }
+ }
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+// Class to provide a uniform type
+namespace Kokkos {
+namespace Impl {
+template <class ViewType, int Traits = 0>
+struct ViewUniformType;
+}
+} // namespace Kokkos
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_IMPL_MDSPAN
+namespace Impl {
+struct UnsupportedKokkosArrayLayout;
+
+template <class Traits, class Enabled = void>
+struct MDSpanViewTraits {
+ using mdspan_type = UnsupportedKokkosArrayLayout;
+};
+
+// "Natural" mdspan for a view if the View's ArrayLayout is supported.
+template <class Traits>
+struct MDSpanViewTraits<Traits, std::void_t<typename LayoutFromArrayLayout<
+ typename Traits::array_layout>::type>> {
+ using index_type = std::size_t;
+ using extents_type =
+ typename Impl::ExtentsFromDataType<index_type,
+ typename Traits::data_type>::type;
+ using mdspan_layout_type =
+ typename LayoutFromArrayLayout<typename Traits::array_layout>::type;
+ using accessor_type =
+ SpaceAwareAccessor<typename Traits::memory_space,
+ Kokkos::default_accessor<typename Traits::value_type>>;
+ using mdspan_type = mdspan<typename Traits::value_type, extents_type,
+ mdspan_layout_type, accessor_type>;
+};
+} // namespace Impl
+#endif // KOKKOS_ENABLE_IMPL_MDSPAN
+
+/** \class ViewTraits
+ * \brief Traits class for accessing attributes of a View.
+ *
+ * This is an implementation detail of View. It is only of interest
+ * to developers implementing a new specialization of View.
+ *
+ * Template argument options:
+ * - View< DataType >
+ * - View< DataType , Space >
+ * - View< DataType , Space , MemoryTraits >
+ * - View< DataType , ArrayLayout >
+ * - View< DataType , ArrayLayout , Space >
+ * - View< DataType , ArrayLayout , MemoryTraits >
+ * - View< DataType , ArrayLayout , Space , MemoryTraits >
+ * - View< DataType , MemoryTraits >
+ */
+
+template <class DataType, class... Properties>
+struct ViewTraits;
+
+template <>
+struct ViewTraits<void> {
+ using execution_space = void;
+ using memory_space = void;
+ using HostMirrorSpace = void;
+ using array_layout = void;
+ using memory_traits = void;
+ using specialize = void;
+ using hooks_policy = void;
+};
+
+template <class... Prop>
+struct ViewTraits<void, void, Prop...> {
+ // Ignore an extraneous 'void'
+ using execution_space = typename ViewTraits<void, Prop...>::execution_space;
+ using memory_space = typename ViewTraits<void, Prop...>::memory_space;
+ using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
+ using array_layout = typename ViewTraits<void, Prop...>::array_layout;
+ using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
+ using specialize = typename ViewTraits<void, Prop...>::specialize;
+ using hooks_policy = typename ViewTraits<void, Prop...>::hooks_policy;
+};
+
+template <class HooksPolicy, class... Prop>
+struct ViewTraits<
+ std::enable_if_t<Kokkos::Experimental::is_hooks_policy<HooksPolicy>::value>,
+ HooksPolicy, Prop...> {
+ using execution_space = typename ViewTraits<void, Prop...>::execution_space;
+ using memory_space = typename ViewTraits<void, Prop...>::memory_space;
+ using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
+ using array_layout = typename ViewTraits<void, Prop...>::array_layout;
+ using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
+ using specialize = typename ViewTraits<void, Prop...>::specialize;
+ using hooks_policy = HooksPolicy;
+};
+
+template <class ArrayLayout, class... Prop>
+struct ViewTraits<std::enable_if_t<Kokkos::is_array_layout<ArrayLayout>::value>,
+ ArrayLayout, Prop...> {
+ // Specify layout, keep subsequent space and memory traits arguments
+
+ using execution_space = typename ViewTraits<void, Prop...>::execution_space;
+ using memory_space = typename ViewTraits<void, Prop...>::memory_space;
+ using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
+ using array_layout = ArrayLayout;
+ using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
+ using specialize = typename ViewTraits<void, Prop...>::specialize;
+ using hooks_policy = typename ViewTraits<void, Prop...>::hooks_policy;
+};
+
+template <class Space, class... Prop>
+struct ViewTraits<std::enable_if_t<Kokkos::is_space<Space>::value>, Space,
+ Prop...> {
+ // Specify Space, memory traits should be the only subsequent argument.
+
+ static_assert(
+ std::is_same_v<typename ViewTraits<void, Prop...>::execution_space,
+ void> &&
+ std::is_same_v<typename ViewTraits<void, Prop...>::memory_space,
+ void> &&
+ std::is_same_v<typename ViewTraits<void, Prop...>::HostMirrorSpace,
+ void> &&
+ std::is_same_v<typename ViewTraits<void, Prop...>::array_layout,
+ void>,
+ "Only one View Execution or Memory Space template argument");
+
+ using execution_space = typename Space::execution_space;
+ using memory_space = typename Space::memory_space;
+ using HostMirrorSpace =
+ typename Kokkos::Impl::HostMirror<Space>::Space::memory_space;
+ using array_layout = typename execution_space::array_layout;
+ using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
+ using specialize = typename ViewTraits<void, Prop...>::specialize;
+ using hooks_policy = typename ViewTraits<void, Prop...>::hooks_policy;
+};
+
+template <class MemoryTraits, class... Prop>
+struct ViewTraits<
+ std::enable_if_t<Kokkos::is_memory_traits<MemoryTraits>::value>,
+ MemoryTraits, Prop...> {
+ // Specify memory trait, should not be any subsequent arguments
+
+ static_assert(
+ std::is_same_v<typename ViewTraits<void, Prop...>::execution_space,
+ void> &&
+ std::is_same_v<typename ViewTraits<void, Prop...>::memory_space,
+ void> &&
+ std::is_same_v<typename ViewTraits<void, Prop...>::array_layout,
+ void> &&
+ std::is_same_v<typename ViewTraits<void, Prop...>::memory_traits,
+ void> &&
+ std::is_same_v<typename ViewTraits<void, Prop...>::hooks_policy,
+ void>,
+ "MemoryTrait is the final optional template argument for a View");
+
+ using execution_space = void;
+ using memory_space = void;
+ using HostMirrorSpace = void;
+ using array_layout = void;
+ using memory_traits = MemoryTraits;
+ using specialize = void;
+ using hooks_policy = void;
+};
+
+template <class DataType, class... Properties>
+struct ViewTraits {
+ private:
+ // Unpack the properties arguments
+ using prop = ViewTraits<void, Properties...>;
+
+ using ExecutionSpace =
+ std::conditional_t<!std::is_void_v<typename prop::execution_space>,
+ typename prop::execution_space,
+ Kokkos::DefaultExecutionSpace>;
+
+ using MemorySpace =
+ std::conditional_t<!std::is_void_v<typename prop::memory_space>,
+ typename prop::memory_space,
+ typename ExecutionSpace::memory_space>;
+
+ using ArrayLayout =
+ std::conditional_t<!std::is_void_v<typename prop::array_layout>,
+ typename prop::array_layout,
+ typename ExecutionSpace::array_layout>;
+
+ using HostMirrorSpace = std::conditional_t<
+ !std::is_void_v<typename prop::HostMirrorSpace>,
+ typename prop::HostMirrorSpace,
+ typename Kokkos::Impl::HostMirror<ExecutionSpace>::Space>;
+
+ using MemoryTraits =
+ std::conditional_t<!std::is_void_v<typename prop::memory_traits>,
+ typename prop::memory_traits,
+ typename Kokkos::MemoryManaged>;
+
+ using HooksPolicy =
+ std::conditional_t<!std::is_void_v<typename prop::hooks_policy>,
+ typename prop::hooks_policy,
+ Kokkos::Experimental::DefaultViewHooks>;
+
+ // Analyze data type's properties,
+ // May be specialized based upon the layout and value type
+ using data_analysis = Kokkos::Impl::ViewDataAnalysis<DataType, ArrayLayout>;
+
+ public:
+ //------------------------------------
+ // Data type traits:
+
+ using data_type = typename data_analysis::type;
+ using const_data_type = typename data_analysis::const_type;
+ using non_const_data_type = typename data_analysis::non_const_type;
+
+ //------------------------------------
+ // Compatible array of trivial type traits:
+
+ using scalar_array_type = typename data_analysis::scalar_array_type;
+ using const_scalar_array_type =
+ typename data_analysis::const_scalar_array_type;
+ using non_const_scalar_array_type =
+ typename data_analysis::non_const_scalar_array_type;
+
+ //------------------------------------
+ // Value type traits:
+
+ using value_type = typename data_analysis::value_type;
+ using const_value_type = typename data_analysis::const_value_type;
+ using non_const_value_type = typename data_analysis::non_const_value_type;
+
+ //------------------------------------
+ // Mapping traits:
+
+ using array_layout = ArrayLayout;
+ using dimension = typename data_analysis::dimension;
+
+ using specialize = std::conditional_t<
+ std::is_void_v<typename data_analysis::specialize>,
+ typename prop::specialize,
+ typename data_analysis::specialize>; /* mapping specialization tag */
+
+ static constexpr unsigned rank = dimension::rank;
+ static constexpr unsigned rank_dynamic = dimension::rank_dynamic;
+
+ //------------------------------------
+ // Execution space, memory space, memory access traits, and host mirror space.
+
+ using execution_space = ExecutionSpace;
+ using memory_space = MemorySpace;
+ using device_type = Kokkos::Device<ExecutionSpace, MemorySpace>;
+ using memory_traits = MemoryTraits;
+ using host_mirror_space = HostMirrorSpace;
+ using hooks_policy = HooksPolicy;
+
+ using size_type = typename MemorySpace::size_type;
+
+ enum { is_hostspace = std::is_same_v<MemorySpace, HostSpace> };
+ enum { is_managed = MemoryTraits::is_unmanaged == 0 };
+ enum { is_random_access = MemoryTraits::is_random_access == 1 };
+
+ //------------------------------------
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Impl {
+template <class ValueType, class TypeList>
+struct TypeListToViewTraits;
+
+template <class ValueType, class... Properties>
+struct TypeListToViewTraits<ValueType, Kokkos::Impl::type_list<Properties...>> {
+ using type = ViewTraits<ValueType, Properties...>;
+};
+
+// It is not safe to assume that subviews of views with the Aligned memory trait
+// are also aligned. Hence, just remove that attribute for subviews.
+template <class D, class... P>
+struct RemoveAlignedMemoryTrait {
+ private:
+ using type_list_in = Kokkos::Impl::type_list<P...>;
+ using memory_traits = typename ViewTraits<D, P...>::memory_traits;
+ using type_list_in_wo_memory_traits =
+ typename Kokkos::Impl::type_list_remove_first<memory_traits,
+ type_list_in>::type;
+ using new_memory_traits =
+ Kokkos::MemoryTraits<memory_traits::impl_value & ~Kokkos::Aligned>;
+ using new_type_list = typename Kokkos::Impl::concat_type_list<
+ type_list_in_wo_memory_traits,
+ Kokkos::Impl::type_list<new_memory_traits>>::type;
+
+ public:
+ using type = typename TypeListToViewTraits<D, new_type_list>::type;
+};
+} // namespace Impl
+
+} /* namespace Kokkos */
+
+#endif /* KOKKOS_VIEWTRAITS_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_EXPERIMENTAL_VIEWUNIFORMTYPE_HPP
#define KOKKOS_EXPERIMENTAL_VIEWUNIFORMTYPE_HPP
template <class ScalarType, int Rank>
struct ViewScalarToDataType {
using type = typename ViewScalarToDataType<ScalarType, Rank - 1>::type *;
+ using const_type =
+ typename ViewScalarToDataType<ScalarType, Rank - 1>::const_type *;
};
template <class ScalarType>
struct ViewScalarToDataType<ScalarType, 0> {
- using type = ScalarType;
+ using type = ScalarType;
+ using const_type = const ScalarType;
};
template <class LayoutType, int Rank>
template <class ViewType, int Traits>
struct ViewUniformType {
using data_type = typename ViewType::data_type;
- using const_data_type = std::add_const_t<typename ViewType::data_type>;
+ using const_data_type = typename ViewType::const_data_type;
using runtime_data_type =
typename ViewScalarToDataType<typename ViewType::value_type,
ViewType::rank>::type;
- using runtime_const_data_type = typename ViewScalarToDataType<
- std::add_const_t<typename ViewType::value_type>, ViewType::rank>::type;
+ using runtime_const_data_type =
+ typename ViewScalarToDataType<typename ViewType::value_type,
+ ViewType::rank>::const_type;
using array_layout =
typename ViewUniformLayout<typename ViewType::array_layout,
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_MDSPAN_ACCESSOR_HPP
+#define KOKKOS_MDSPAN_ACCESSOR_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Concepts.hpp>
+#include <Kokkos_Core_fwd.hpp>
+#include <desul/atomics.hpp>
+
+namespace Kokkos {
+
+// For now use the accessors in Impl namespace, as an
+// implementation detail for rebasing View on mdspan
+namespace Impl {
+
+template <class MemorySpace, class NestedAccessor>
+struct SpaceAwareAccessor {
+ // Part of Accessor Requirements
+ using element_type = typename NestedAccessor::element_type;
+ using reference = typename NestedAccessor::reference;
+ using data_handle_type = typename NestedAccessor::data_handle_type;
+ using offset_policy =
+ SpaceAwareAccessor<MemorySpace, typename NestedAccessor::offset_policy>;
+
+ // Specific to SpaceAwareAccessor
+ using memory_space = MemorySpace;
+ using nested_accessor_type = NestedAccessor;
+
+ static_assert(is_memory_space_v<memory_space>);
+
+ KOKKOS_DEFAULTED_FUNCTION
+ constexpr SpaceAwareAccessor() = default;
+
+ template <
+ class OtherMemorySpace, class OtherNestedAccessorType,
+ std::enable_if_t<
+ MemorySpaceAccess<MemorySpace, OtherMemorySpace>::assignable &&
+ std::is_constructible_v<NestedAccessor, OtherNestedAccessorType>,
+ int> = 0>
+ KOKKOS_FUNCTION constexpr SpaceAwareAccessor(
+ const SpaceAwareAccessor<OtherMemorySpace, OtherNestedAccessorType>&
+ other) noexcept
+ : nested_acc(other.nested_acc) {}
+
+ KOKKOS_FUNCTION
+ SpaceAwareAccessor(const NestedAccessor& acc) : nested_acc(acc) {}
+
+ KOKKOS_FUNCTION
+ explicit operator NestedAccessor() const { return nested_acc; }
+
+ KOKKOS_FUNCTION
+ constexpr reference access(data_handle_type p, size_t i) const noexcept {
+ Kokkos::Impl::runtime_check_memory_access_violation<memory_space>(
+ "Kokkos::SpaceAwareAccessor ERROR: attempt to access inaccessible "
+ "memory space");
+ return nested_acc.access(p, i);
+ }
+
+ KOKKOS_FUNCTION
+ constexpr typename offset_policy::data_handle_type offset(
+ data_handle_type p, size_t i) const noexcept {
+ return nested_acc.offset(p, i);
+ }
+
+ // Canonical way for accessing nested accessor see ISO C++
+ // [linalg.scaled.scaledaccessor]
+ KOKKOS_FUNCTION
+ constexpr const NestedAccessor& nested_accessor() const noexcept {
+ return nested_acc;
+ }
+
+ private:
+// We either compile with our custom mdspan impl
+// in which case we discover inside it whether no_unique_address
+// works, or we use C++23 in which case it better be available
+#ifdef _MDSPAN_NO_UNIQUE_ADDRESS
+ _MDSPAN_NO_UNIQUE_ADDRESS
+#else
+ [[no_unique_address]]
+#endif
+ NestedAccessor nested_acc;
+ template <class, class>
+ friend struct SpaceAwareAccessor;
+};
+
+template <class NestedAccessor>
+struct SpaceAwareAccessor<AnonymousSpace, NestedAccessor> {
+ // Part of Accessor Requirements
+ using element_type = typename NestedAccessor::element_type;
+ using reference = typename NestedAccessor::reference;
+ using data_handle_type = typename NestedAccessor::data_handle_type;
+
+ using offset_policy =
+ SpaceAwareAccessor<AnonymousSpace,
+ typename NestedAccessor::offset_policy>;
+
+ // Specific to SpaceAwareAccessor
+ using memory_space = AnonymousSpace;
+ using nested_accessor_type = NestedAccessor;
+
+ KOKKOS_DEFAULTED_FUNCTION
+ constexpr SpaceAwareAccessor() = default;
+
+ template <class OtherMemorySpace, class OtherNestedAccessorType,
+ std::enable_if_t<std::is_constructible_v<NestedAccessor,
+ OtherNestedAccessorType>,
+ int> = 0>
+ KOKKOS_FUNCTION constexpr SpaceAwareAccessor(
+ const SpaceAwareAccessor<OtherMemorySpace, OtherNestedAccessorType>&
+ other) noexcept
+ : nested_acc(other.nested_acc) {}
+
+ KOKKOS_FUNCTION
+ SpaceAwareAccessor(const NestedAccessor& acc) : nested_acc(acc) {}
+
+ KOKKOS_FUNCTION
+ explicit operator NestedAccessor() const { return nested_acc; }
+
+ KOKKOS_FUNCTION
+ constexpr reference access(data_handle_type p, size_t i) const noexcept {
+ return nested_acc.access(p, i);
+ }
+
+ KOKKOS_FUNCTION
+ constexpr typename offset_policy::data_handle_type offset(
+ data_handle_type p, size_t i) const noexcept {
+ return nested_acc.offset(p, i);
+ }
+
+ // Canonical way for accessing nested accessor see ISO C++
+ // [linalg.scaled.scaledaccessor]
+ KOKKOS_FUNCTION
+ constexpr const NestedAccessor& nested_accessor() const noexcept {
+ return nested_acc;
+ }
+
+ private:
+// We either compile with our custom mdspan impl
+// in which case we discover inside it whether no_unique_address
+// works, or we use C++23 in which case it better be available
+#ifdef _MDSPAN_NO_UNIQUE_ADDRESS
+ _MDSPAN_NO_UNIQUE_ADDRESS
+#else
+ [[no_unique_address]]
+#endif
+ NestedAccessor nested_acc;
+ template <class, class>
+ friend struct SpaceAwareAccessor;
+};
+
+// Like atomic_accessor_relaxed proposed for ISO C++26 but with
+// defaulted memory scope - similar to how desul's AtomicRef has a memory scope
+template <class ElementType, class MemoryScope = desul::MemoryScopeDevice>
+struct AtomicAccessorRelaxed {
+ using element_type = ElementType;
+ using reference =
+ desul::AtomicRef<ElementType, desul::MemoryOrderRelaxed, MemoryScope>;
+ using data_handle_type = ElementType*;
+ using offset_policy = AtomicAccessorRelaxed;
+
+ KOKKOS_DEFAULTED_FUNCTION
+ AtomicAccessorRelaxed() = default;
+
+ // Conversions from non-const to const element type
+ template <class OtherElementType,
+ std::enable_if_t<std::is_convertible_v<
+ OtherElementType (*)[], element_type (*)[]>>* = nullptr>
+ KOKKOS_FUNCTION constexpr AtomicAccessorRelaxed(
+ Kokkos::default_accessor<OtherElementType>) noexcept {}
+
+ template <class OtherElementType,
+ std::enable_if_t<std::is_convertible_v<
+ OtherElementType (*)[], element_type (*)[]>>* = nullptr>
+ KOKKOS_FUNCTION constexpr AtomicAccessorRelaxed(
+ AtomicAccessorRelaxed<OtherElementType, MemoryScope>) noexcept {}
+
+ template <class OtherElementType,
+ std::enable_if_t<std::is_convertible_v<
+ element_type (*)[], OtherElementType (*)[]>>* = nullptr>
+ KOKKOS_FUNCTION explicit operator default_accessor<OtherElementType>() const {
+ return default_accessor<OtherElementType>{};
+ }
+
+ KOKKOS_FUNCTION
+ reference access(data_handle_type p, size_t i) const noexcept {
+ return reference(p[i]);
+ }
+
+ KOKKOS_FUNCTION
+ data_handle_type offset(data_handle_type p, size_t i) const noexcept {
+ return p + i;
+ }
+};
+
+//=====================================================================
+//============= Reference Counted Accessor and DataHandle =============
+//=====================================================================
+
+template <class ElementType, class MemorySpace>
+class ReferenceCountedDataHandle {
+ public:
+ using value_type = ElementType;
+ using pointer = value_type*;
+ using reference = value_type&;
+ using memory_space = MemorySpace;
+
+ KOKKOS_DEFAULTED_FUNCTION
+ ReferenceCountedDataHandle() = default;
+
+ // this only ever works on host
+ explicit ReferenceCountedDataHandle(SharedAllocationRecord<void, void>* rec) {
+ m_tracker.assign_allocated_record_to_uninitialized(rec);
+ m_handle = static_cast<pointer>(get_record()->data());
+ }
+
+ KOKKOS_FUNCTION
+ ReferenceCountedDataHandle(const SharedAllocationTracker& tracker,
+ pointer data_handle)
+ : m_tracker(tracker), m_handle(data_handle) {}
+
+ // unmanaged ctor
+ template <class OtherElementType,
+ class = std::enable_if_t<std::is_convertible_v<
+ OtherElementType (*)[], value_type (*)[]>>>
+ KOKKOS_FUNCTION ReferenceCountedDataHandle(OtherElementType* ptr)
+ : m_tracker(), m_handle(ptr) {}
+
+ // subview ctor
+ template <class OtherElementType,
+ class = std::enable_if_t<std::is_convertible_v<
+ OtherElementType (*)[], value_type (*)[]>>>
+ KOKKOS_FUNCTION ReferenceCountedDataHandle(
+ const ReferenceCountedDataHandle& other, OtherElementType* ptr)
+ : m_tracker(other.m_tracker), m_handle(ptr) {}
+
+ // converting ctor
+ template <class OtherElementType,
+ class = std::enable_if_t<std::is_convertible_v<
+ OtherElementType (*)[], value_type (*)[]>>>
+ KOKKOS_FUNCTION ReferenceCountedDataHandle(
+ const ReferenceCountedDataHandle<OtherElementType, memory_space>& other)
+ : m_tracker(other.m_tracker), m_handle(other.m_handle) {}
+
+ template <
+ class OtherElementType, class OtherSpace,
+ class = std::enable_if_t<
+ std::is_convertible_v<OtherElementType (*)[], value_type (*)[]> &&
+ (std::is_same_v<OtherSpace, AnonymousSpace> ||
+ std::is_same_v<memory_space, AnonymousSpace>)>>
+ KOKKOS_FUNCTION ReferenceCountedDataHandle(
+ const ReferenceCountedDataHandle<OtherElementType, OtherSpace>& other)
+ : m_tracker(other.m_tracker), m_handle(other.m_handle) {}
+
+ KOKKOS_FUNCTION
+ pointer get() const noexcept { return m_handle; }
+ KOKKOS_FUNCTION
+ explicit operator pointer() const noexcept { return m_handle; }
+
+ bool has_record() const { return m_tracker.has_record(); }
+ auto* get_record() const { return m_tracker.get_record<memory_space>(); }
+ int use_count() const noexcept { return m_tracker.use_count(); }
+
+ std::string get_label() const { return m_tracker.get_label<memory_space>(); }
+ KOKKOS_FUNCTION
+ const SharedAllocationTracker& tracker() const noexcept { return m_tracker; }
+
+ KOKKOS_FUNCTION
+ friend bool operator==(const ReferenceCountedDataHandle& lhs,
+ const value_type* rhs) {
+ return lhs.m_handle == rhs;
+ }
+
+ KOKKOS_FUNCTION
+ friend bool operator==(const value_type* lhs,
+ const ReferenceCountedDataHandle& rhs) {
+ return lhs == rhs.m_handle;
+ }
+
+ private:
+ template <class OtherElementType, class OtherSpace>
+ friend class ReferenceCountedDataHandle;
+
+ template <class OtherElementType, class OtherSpace, class NestedAccessor>
+ friend class ReferenceCountedAccessor;
+
+ SharedAllocationTracker m_tracker;
+ pointer m_handle = nullptr;
+};
+
+template <class ElementType, class MemorySpace, class NestedAccessor>
+class ReferenceCountedAccessor;
+
+template <class Accessor>
+struct IsReferenceCountedAccessor : std::false_type {};
+
+template <class ElementType, class MemorySpace, class NestedAccessor>
+struct IsReferenceCountedAccessor<
+ ReferenceCountedAccessor<ElementType, MemorySpace, NestedAccessor>>
+ : std::true_type {};
+
+template <class ElementType, class MemorySpace, class NestedAccessor>
+class ReferenceCountedAccessor {
+ public:
+ using element_type = ElementType;
+ using data_handle_type = ReferenceCountedDataHandle<ElementType, MemorySpace>;
+ using reference = typename NestedAccessor::reference;
+ using offset_policy =
+ ReferenceCountedAccessor<ElementType, MemorySpace,
+ typename NestedAccessor::offset_policy>;
+ using memory_space = MemorySpace;
+
+ KOKKOS_DEFAULTED_FUNCTION
+ constexpr ReferenceCountedAccessor() noexcept = default;
+
+ template <
+ class OtherElementType, class OtherNestedAccessor,
+ class = std::enable_if_t<
+ std::is_convertible_v<OtherElementType (*)[], element_type (*)[]> &&
+ std::is_constructible_v<NestedAccessor, OtherNestedAccessor>>>
+ KOKKOS_FUNCTION constexpr ReferenceCountedAccessor(
+ const ReferenceCountedAccessor<OtherElementType, MemorySpace,
+ OtherNestedAccessor>&) {}
+
+ template <
+ class OtherElementType, class OtherSpace, class OtherNestedAccessor,
+ class = std::enable_if_t<
+ std::is_convertible_v<OtherElementType (*)[], element_type (*)[]> &&
+ (std::is_same_v<OtherSpace, AnonymousSpace> ||
+ std::is_same_v<memory_space, AnonymousSpace>)&&std::
+ is_constructible_v<NestedAccessor, OtherNestedAccessor>>>
+ KOKKOS_FUNCTION constexpr ReferenceCountedAccessor(
+ const ReferenceCountedAccessor<OtherElementType, OtherSpace,
+ OtherNestedAccessor>&) {}
+
+ template <class OtherElementType,
+ class = std::enable_if_t<std::is_convertible_v<
+ OtherElementType (*)[], element_type (*)[]>>>
+ KOKKOS_FUNCTION constexpr ReferenceCountedAccessor(
+ const default_accessor<OtherElementType>&) {}
+
+ template <class DstAccessor,
+ typename = std::enable_if_t<
+ !IsReferenceCountedAccessor<DstAccessor>::value &&
+ std::is_convertible_v<NestedAccessor, DstAccessor>>>
+ KOKKOS_FUNCTION operator DstAccessor() const {
+ return m_nested_acc;
+ }
+
+ KOKKOS_FUNCTION
+ constexpr reference access(data_handle_type p, size_t i) const {
+ return m_nested_acc.access(p.get(), i);
+ }
+
+ KOKKOS_FUNCTION
+ constexpr data_handle_type offset(data_handle_type p, size_t i) const {
+ return data_handle_type(p, m_nested_acc.offset(p.get(), i));
+ }
+
+ KOKKOS_FUNCTION
+ constexpr auto nested_accessor() const { return m_nested_acc; }
+
+ private:
+#ifdef _MDSPAN_NO_UNIQUE_ADDRESS
+ _MDSPAN_NO_UNIQUE_ADDRESS
+#else
+ [[no_unique_address]]
+#endif
+ NestedAccessor m_nested_acc;
+};
+
+template <class ElementType, class MemorySpace>
+using CheckedReferenceCountedAccessor =
+ SpaceAwareAccessor<MemorySpace,
+ ReferenceCountedAccessor<ElementType, MemorySpace,
+ default_accessor<ElementType>>>;
+
+template <class ElementType, class MemorySpace,
+ class MemoryScope = desul::MemoryScopeDevice>
+using CheckedRelaxedAtomicAccessor =
+ SpaceAwareAccessor<MemorySpace, AtomicAccessorRelaxed<ElementType>>;
+
+template <class ElementType, class MemorySpace,
+ class MemoryScope = desul::MemoryScopeDevice>
+using CheckedReferenceCountedRelaxedAtomicAccessor = SpaceAwareAccessor<
+ MemorySpace, ReferenceCountedAccessor<ElementType, MemorySpace,
+ AtomicAccessorRelaxed<ElementType>>>;
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_EXPERIMENTAL_MDSPAN_EXTENTS_HPP
+#define KOKKOS_EXPERIMENTAL_MDSPAN_EXTENTS_HPP
+
+#include "Kokkos_MDSpan_Header.hpp"
+
+namespace Kokkos::Impl {
+
+// Forward declarations from impl/Kokkos_ViewMapping.hpp
+// We cannot include directly since ViewMapping is used elsewhere in View.
+// After View is fully moved to mdspan we can include it only from here.
+template <class DataType>
+struct ViewArrayAnalysis;
+
+template <std::size_t... Vals>
+struct ViewDimension;
+
+template <class T, class Dim>
+struct ViewDataType;
+
+// A few things to note --
+// - mdspan allows for 0-rank extents similarly to View, so we don't need
+// special handling of this case
+// - View dynamic dimensions must be appear before static dimensions. This isn't
+// a requirement in mdspan but won't cause an issue here
+template <std::size_t N>
+struct ExtentFromDimension {
+ static constexpr std::size_t value = N;
+};
+
+// Kokkos uses a dimension of '0' to denote a dynamic dimension.
+template <>
+struct ExtentFromDimension<std::size_t{0}> {
+ static constexpr std::size_t value = dynamic_extent;
+};
+
+template <std::size_t N>
+struct DimensionFromExtent {
+ static constexpr std::size_t value = N;
+};
+
+template <>
+struct DimensionFromExtent<dynamic_extent> {
+ static constexpr std::size_t value = std::size_t{0};
+};
+
+template <class IndexType, class Dimension, class Indices>
+struct ExtentsFromDimension;
+
+template <class IndexType, class Dimension, std::size_t... Indices>
+struct ExtentsFromDimension<IndexType, Dimension,
+ std::index_sequence<Indices...>> {
+ using type =
+ extents<IndexType,
+ ExtentFromDimension<Dimension::static_extent(Indices)>::value...>;
+};
+
+template <class Extents, class Indices>
+struct DimensionsFromExtent;
+
+template <class Extents, std::size_t... Indices>
+struct DimensionsFromExtent<Extents, std::index_sequence<Indices...>> {
+ using type = ::Kokkos::Impl::ViewDimension<
+ DimensionFromExtent<Extents::static_extent(Indices)>::value...>;
+};
+
+template <class IndexType, class DataType>
+struct ExtentsFromDataType {
+ using array_analysis = ::Kokkos::Impl::ViewArrayAnalysis<DataType>;
+ using dimension_type = typename array_analysis::dimension;
+
+ using type = typename ExtentsFromDimension<
+ IndexType, dimension_type,
+ std::make_index_sequence<dimension_type::rank>>::type;
+};
+
+template <class T, class Extents>
+struct DataTypeFromExtents {
+ using extents_type = Extents;
+ using dimension_type = typename DimensionsFromExtent<
+ Extents, std::make_index_sequence<extents_type::rank()>>::type;
+
+ // Will cause a compile error if it is malformed (i.e. dynamic after static)
+ using type = typename ::Kokkos::Impl::ViewDataType<T, dimension_type>::type;
+};
+
+template <class Extents, class VM, std::size_t... Indices>
+constexpr KOKKOS_INLINE_FUNCTION auto extents_from_view_mapping_impl(
+ const VM &view_mapping, std::index_sequence<Indices...>) {
+ return Extents{view_mapping.extent(Indices)...};
+}
+
+template <class Extents, class VM>
+constexpr KOKKOS_INLINE_FUNCTION auto extents_from_view_mapping(
+ const VM &view_mapping) {
+ static_assert(Extents::rank() == VM::Rank);
+ return extents_from_view_mapping_impl<Extents>(
+ view_mapping, std::make_index_sequence<Extents::rank()>{});
+}
+} // namespace Kokkos::Impl
+
+#endif // KOKKOS_EXPERIMENTAL_MDSPAN_EXTENTS_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_EXPERIMENTAL_MDSPAN_HPP
+#define KOKKOS_EXPERIMENTAL_MDSPAN_HPP
+
+// Look for the right mdspan
+#if __cplusplus >= 202002L
+#include <version>
+#endif
+
+// Only use standard library mdspan if we are not running Cuda or HIP.
+// Likely these implementations won't be supported on device, so we should use
+// our own device-compatible version for now.
+#if (__cpp_lib_mdspan >= 202207L) && !defined(KOKKOS_ENABLE_CUDA) && \
+ !defined(KOKKOS_ENABLE_HIP)
+#include <mdspan>
+namespace Kokkos {
+using std::default_accessor;
+using std::dextents;
+using std::dynamic_extent;
+using std::extents;
+using std::layout_left;
+using std::layout_right;
+using std::layout_stride;
+using std::mdspan;
+} // namespace Kokkos
+#else
+#include <mdspan/mdspan.hpp>
+#endif
+
+#endif // KOKKOS_EXPERIMENTAL_MDSPAN_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_EXPERIMENTAL_MDSPAN_LAYOUT_HPP
+#define KOKKOS_EXPERIMENTAL_MDSPAN_LAYOUT_HPP
+
+#include "Kokkos_MDSpan_Extents.hpp"
+#include <View/Kokkos_ViewDataAnalysis.hpp>
+
+// The difference between a legacy Kokkos array layout and an
+// mdspan layout is that the array layouts can have state, but don't have the
+// nested mapping. This file provides interoperability helpers.
+
+namespace Kokkos::Impl {
+
+template <class ArrayLayout>
+struct LayoutFromArrayLayout;
+
+template <>
+struct LayoutFromArrayLayout<Kokkos::LayoutLeft> {
+ using type = Kokkos::Experimental::layout_left_padded<dynamic_extent>;
+};
+
+template <>
+struct LayoutFromArrayLayout<Kokkos::LayoutRight> {
+ using type = Kokkos::Experimental::layout_right_padded<dynamic_extent>;
+};
+
+template <>
+struct LayoutFromArrayLayout<Kokkos::LayoutStride> {
+ using type = layout_stride;
+};
+
+template <class ArrayLayout, class MDSpanType>
+KOKKOS_INLINE_FUNCTION auto array_layout_from_mapping(
+ const typename MDSpanType::mapping_type &mapping) {
+ using mapping_type = typename MDSpanType::mapping_type;
+ using extents_type = typename mapping_type::extents_type;
+
+ constexpr auto rank = extents_type::rank();
+ const auto &ext = mapping.extents();
+
+ static_assert(rank <= ARRAY_LAYOUT_MAX_RANK,
+ "Unsupported rank for mdspan (must be <= 8)");
+
+ if constexpr (std::is_same_v<ArrayLayout, LayoutStride>) {
+ return Kokkos::LayoutStride{
+ rank > 0 ? ext.extent(0) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 0 ? mapping.stride(0) : 0,
+ rank > 1 ? ext.extent(1) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 1 ? mapping.stride(1) : 0,
+ rank > 2 ? ext.extent(2) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 2 ? mapping.stride(2) : 0,
+ rank > 3 ? ext.extent(3) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 3 ? mapping.stride(3) : 0,
+ rank > 4 ? ext.extent(4) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 4 ? mapping.stride(4) : 0,
+ rank > 5 ? ext.extent(5) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 5 ? mapping.stride(5) : 0,
+ rank > 6 ? ext.extent(6) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 6 ? mapping.stride(6) : 0,
+ rank > 7 ? ext.extent(7) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 7 ? mapping.stride(7) : 0,
+ };
+ } else {
+ ArrayLayout layout{rank > 0 ? ext.extent(0) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 1 ? ext.extent(1) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 2 ? ext.extent(2) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 3 ? ext.extent(3) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 4 ? ext.extent(4) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 5 ? ext.extent(5) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 6 ? ext.extent(6) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+ rank > 7 ? ext.extent(7) : KOKKOS_IMPL_CTOR_DEFAULT_ARG};
+
+ if constexpr (rank > 1 &&
+ std::is_same_v<typename mapping_type::layout_type,
+ Kokkos::Experimental::layout_left_padded<
+ dynamic_extent>>) {
+ layout.stride = mapping.stride(1);
+ }
+ if constexpr (std::is_same_v<typename mapping_type::layout_type,
+ Kokkos::Experimental::layout_right_padded<
+ dynamic_extent>>) {
+ if constexpr (rank == 2) {
+ layout.stride = mapping.stride(0);
+ }
+ if constexpr (rank > 2) {
+ if (mapping.stride(rank - 2) != mapping.extents().extent(rank - 1))
+ Kokkos::abort(
+ "Invalid conversion from layout_right_padded to LayoutRight");
+ }
+ }
+ return layout;
+ }
+#ifdef KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
+}
+
+template <class MappingType, class ArrayLayout, size_t... Idx>
+KOKKOS_INLINE_FUNCTION auto mapping_from_array_layout_impl(
+ ArrayLayout layout, std::index_sequence<Idx...>) {
+ using index_type = typename MappingType::index_type;
+ using extents_type = typename MappingType::extents_type;
+ if constexpr (std::is_same_v<typename MappingType::layout_type,
+ layout_left> ||
+ std::is_same_v<typename MappingType::layout_type,
+ layout_right>) {
+ return MappingType{
+ extents_type{dextents<index_type, MappingType::extents_type::rank()>{
+ layout.dimension[Idx]...}}};
+ } else {
+ if (layout.stride == KOKKOS_IMPL_CTOR_DEFAULT_ARG ||
+ extents_type::rank() < 2) {
+ return MappingType{
+ extents_type{dextents<index_type, MappingType::extents_type::rank()>{
+ layout.dimension[Idx]...}}};
+ } else {
+ if constexpr (std::is_same_v<ArrayLayout, LayoutRight> &&
+ extents_type::rank() > 2) {
+ size_t product_of_dimensions = 1;
+ for (size_t r = 1; r < extents_type::rank(); r++)
+ product_of_dimensions *= layout.dimension[r];
+ if (product_of_dimensions != layout.stride)
+ Kokkos::abort(
+ "Invalid conversion from LayoutRight to layout_right_padded");
+ } else {
+ return MappingType{
+ extents_type{
+ dextents<index_type, MappingType::extents_type::rank()>{
+ layout.dimension[Idx]...}},
+ layout.stride};
+ }
+ }
+ }
+}
+template <class MappingType, size_t... Idx>
+KOKKOS_INLINE_FUNCTION auto mapping_from_array_layout_impl(
+ LayoutStride layout, std::index_sequence<Idx...>) {
+ static_assert(
+ std::is_same_v<typename MappingType::layout_type, layout_stride>);
+ using index_type = typename MappingType::index_type;
+ index_type strides[MappingType::extents_type::rank()] = {
+ layout.stride[Idx]...};
+ return MappingType{
+ mdspan_non_standard_tag(),
+ static_cast<typename MappingType::extents_type>(
+ dextents<index_type, MappingType::extents_type::rank()>{
+ layout.dimension[Idx]...}),
+ strides};
+}
+
+// specialization for rank 0 to avoid empty array
+template <class MappingType>
+KOKKOS_INLINE_FUNCTION auto mapping_from_array_layout_impl(
+ LayoutStride, std::index_sequence<>) {
+ return MappingType{};
+}
+
+template <class MappingType, class ArrayLayout>
+KOKKOS_INLINE_FUNCTION auto mapping_from_array_layout(ArrayLayout layout) {
+ return mapping_from_array_layout_impl<MappingType>(
+ layout, std::make_index_sequence<MappingType::extents_type::rank()>());
+}
+
+template <class MDSpanType, class VM>
+KOKKOS_INLINE_FUNCTION auto mapping_from_view_mapping(const VM &view_mapping) {
+ using mapping_type = typename MDSpanType::mapping_type;
+ using extents_type = typename mapping_type::extents_type;
+
+ // std::span is not available in C++17 (our current requirements),
+ // so we need to use the std::array constructor for layout mappings.
+ // FIXME When C++20 is available, we can use std::span here instead
+ std::size_t strides[VM::Rank];
+ view_mapping.stride_fill(&strides[0]);
+ if constexpr (std::is_same_v<typename mapping_type::layout_type,
+ Kokkos::layout_stride>) {
+ return mapping_type(Kokkos::mdspan_non_standard,
+ extents_from_view_mapping<extents_type>(view_mapping),
+ strides);
+ } else if constexpr (VM::Rank > 1 &&
+ std::is_same_v<typename mapping_type::layout_type,
+ Kokkos::Experimental::layout_left_padded<
+ Kokkos::dynamic_extent>>) {
+ return mapping_type(extents_from_view_mapping<extents_type>(view_mapping),
+ strides[1]);
+ } else if constexpr (VM::Rank > 1 &&
+ std::is_same_v<typename mapping_type::layout_type,
+ Kokkos::Experimental::layout_right_padded<
+ Kokkos::dynamic_extent>>) {
+ return mapping_type(extents_from_view_mapping<extents_type>(view_mapping),
+ strides[VM::Rank - 2]);
+ } else {
+ return mapping_type(extents_from_view_mapping<extents_type>(view_mapping));
+ }
+#ifdef KOKKOS_COMPILER_INTEL
+ __builtin_unreachable();
+#endif
+}
+
+} // namespace Kokkos::Impl
+
+#endif // KOKKOS_EXPERIMENTAL_MDSPAN_LAYOUT_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_CUDA_HPP
+#define KOKKOS_DECLARE_CUDA_HPP
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda.hpp>
+#include <Cuda/Kokkos_Cuda_Half_Impl_Type.hpp>
+#include <Cuda/Kokkos_Cuda_Half_Conversion.hpp>
+#include <Cuda/Kokkos_Cuda_Parallel_MDRange.hpp>
+#include <Cuda/Kokkos_Cuda_Parallel_Range.hpp>
+#include <Cuda/Kokkos_Cuda_Parallel_Team.hpp>
+#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
+#include <Cuda/Kokkos_Cuda_Instance.hpp>
+#include <Cuda/Kokkos_Cuda_View.hpp>
+#include <Cuda/Kokkos_Cuda_Team.hpp>
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#include <Cuda/Kokkos_Cuda_Task.hpp>
+#endif
+#include <Cuda/Kokkos_Cuda_MDRangePolicy.hpp>
+#include <Cuda/Kokkos_Cuda_UniqueToken.hpp>
+#include <Cuda/Kokkos_Cuda_ZeroMemset.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_HIP_HPP
+#define KOKKOS_DECLARE_HIP_HPP
+
+#if defined(KOKKOS_ENABLE_HIP)
+#include <HIP/Kokkos_HIP.hpp>
+#include <HIP/Kokkos_HIP_Space.hpp>
+#include <HIP/Kokkos_HIP_DeepCopy.hpp>
+#include <HIP/Kokkos_HIP_Half_Impl_Type.hpp>
+#include <HIP/Kokkos_HIP_Half_Conversion.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <HIP/Kokkos_HIP_MDRangePolicy.hpp>
+#include <HIP/Kokkos_HIP_ParallelFor_Range.hpp>
+#include <HIP/Kokkos_HIP_ParallelFor_MDRange.hpp>
+#include <HIP/Kokkos_HIP_ParallelFor_Team.hpp>
+#include <HIP/Kokkos_HIP_ParallelReduce_Range.hpp>
+#include <HIP/Kokkos_HIP_ParallelReduce_MDRange.hpp>
+#include <HIP/Kokkos_HIP_ParallelReduce_Team.hpp>
+#include <HIP/Kokkos_HIP_ParallelScan_Range.hpp>
+#include <HIP/Kokkos_HIP_SharedAllocationRecord.hpp>
+#include <HIP/Kokkos_HIP_UniqueToken.hpp>
+#include <HIP/Kokkos_HIP_ZeroMemset.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+using HIPSpace = ::Kokkos::HIPSpace;
+using HIPHostPinnedSpace = ::Kokkos::HIPHostPinnedSpace;
+using HIPManagedSpace = ::Kokkos::HIPManagedSpace;
+using HIP = ::Kokkos::HIP;
+} // namespace Experimental
+} // namespace Kokkos
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_HPX_HPP
+#define KOKKOS_DECLARE_HPX_HPP
+
+#if defined(KOKKOS_ENABLE_HPX)
+#include <HPX/Kokkos_HPX.hpp>
+#include <HPX/Kokkos_HPX_MDRangePolicy.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_OPENACC_HPP
+#define KOKKOS_DECLARE_OPENACC_HPP
+
+#if defined(KOKKOS_ENABLE_OPENACC)
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+#include <OpenACC/Kokkos_OpenACC_DeepCopy.hpp>
+#include <OpenACC/Kokkos_OpenACC_SharedAllocationRecord.hpp>
+#include <OpenACC/Kokkos_OpenACC_ParallelFor_Range.hpp>
+#include <OpenACC/Kokkos_OpenACC_ParallelReduce_Range.hpp>
+#include <OpenACC/Kokkos_OpenACC_ParallelScan_Range.hpp>
+#include <OpenACC/Kokkos_OpenACC_MDRangePolicy.hpp>
+#include <OpenACC/Kokkos_OpenACC_ParallelFor_MDRange.hpp>
+#include <OpenACC/Kokkos_OpenACC_ParallelReduce_MDRange.hpp>
+#include <OpenACC/Kokkos_OpenACC_ParallelFor_Team.hpp>
+#include <OpenACC/Kokkos_OpenACC_ParallelReduce_Team.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_OPENMP_HPP
+#define KOKKOS_DECLARE_OPENMP_HPP
+
+#if defined(KOKKOS_ENABLE_OPENMP)
+#include <OpenMP/Kokkos_OpenMP.hpp>
+#include <OpenMP/Kokkos_OpenMP_MDRangePolicy.hpp>
+#include <OpenMP/Kokkos_OpenMP_UniqueToken.hpp>
+#include <OpenMP/Kokkos_OpenMP_Parallel_For.hpp>
+#include <OpenMP/Kokkos_OpenMP_Parallel_Reduce.hpp>
+#include <OpenMP/Kokkos_OpenMP_Parallel_Scan.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_OPENMPTARGET_HPP
+#define KOKKOS_DECLARE_OPENMPTARGET_HPP
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET)
+#include <OpenMPTarget/Kokkos_OpenMPTarget.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTargetSpace.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Reducer.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_MDRangePolicy.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelFor_Range.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelFor_Team.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelReduce_Range.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelReduce_Team.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelScan_Range.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_ParallelScan_Team.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_SERIAL_HPP
+#define KOKKOS_DECLARE_SERIAL_HPP
+
+#if defined(KOKKOS_ENABLE_SERIAL)
+#include <Serial/Kokkos_Serial.hpp>
+#include <Serial/Kokkos_Serial_MDRangePolicy.hpp>
+#include <Serial/Kokkos_Serial_ZeroMemset.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_SYCL_HPP
+#define KOKKOS_DECLARE_SYCL_HPP
+
+#if defined(KOKKOS_ENABLE_SYCL)
+#include <SYCL/Kokkos_SYCL.hpp>
+#ifdef SYCL_EXT_ONEAPI_GRAPH
+#include <SYCL/Kokkos_SYCL_GraphNodeKernel.hpp>
+#endif
+#include <SYCL/Kokkos_SYCL_Half_Impl_Type.hpp>
+#include <SYCL/Kokkos_SYCL_Half_Conversion.hpp>
+#include <SYCL/Kokkos_SYCL_DeepCopy.hpp>
+#include <SYCL/Kokkos_SYCL_MDRangePolicy.hpp>
+#include <SYCL/Kokkos_SYCL_ParallelFor_Range.hpp>
+#include <SYCL/Kokkos_SYCL_ParallelFor_MDRange.hpp>
+#include <SYCL/Kokkos_SYCL_ParallelFor_Team.hpp>
+#include <SYCL/Kokkos_SYCL_ParallelReduce_Range.hpp>
+#include <SYCL/Kokkos_SYCL_ParallelReduce_MDRange.hpp>
+#include <SYCL/Kokkos_SYCL_ParallelReduce_Team.hpp>
+#include <SYCL/Kokkos_SYCL_ParallelScan_Range.hpp>
+#include <SYCL/Kokkos_SYCL_UniqueToken.hpp>
+#include <SYCL/Kokkos_SYCL_ZeroMemset.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+using SYCLDeviceUSMSpace = ::Kokkos::SYCLDeviceUSMSpace;
+using SYCLHostUSMSpace = ::Kokkos::SYCLHostUSMSpace;
+using SYCLSharedUSMSpace = ::Kokkos::SYCLSharedUSMSpace;
+using SYCL = ::Kokkos::SYCL;
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DECLARE_THREADS_HPP
+#define KOKKOS_DECLARE_THREADS_HPP
+
+#if defined(KOKKOS_ENABLE_THREADS)
+#include <Threads/Kokkos_Threads.hpp>
+#include <Threads/Kokkos_Threads_Instance.hpp>
+#include <Threads/Kokkos_Threads_MDRangePolicy.hpp>
+#include <Threads/Kokkos_Threads_ParallelFor_Range.hpp>
+#include <Threads/Kokkos_Threads_ParallelFor_MDRange.hpp>
+#include <Threads/Kokkos_Threads_ParallelFor_Team.hpp>
+#include <Threads/Kokkos_Threads_ParallelReduce_Range.hpp>
+#include <Threads/Kokkos_Threads_ParallelReduce_MDRange.hpp>
+#include <Threads/Kokkos_Threads_ParallelReduce_Team.hpp>
+#include <Threads/Kokkos_Threads_ParallelScan_Range.hpp>
+#include <Threads/Kokkos_Threads_Team.hpp>
+#include <Threads/Kokkos_Threads_UniqueToken.hpp>
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_FWD_HPP_
+#define KOKKOS_CUDA_FWD_HPP_
+#if defined(KOKKOS_ENABLE_CUDA)
+namespace Kokkos {
+
+class CudaSpace; ///< Memory space on Cuda GPU
+class CudaUVMSpace; ///< Memory space on Cuda GPU with UVM
+class CudaHostPinnedSpace; ///< Memory space on Host accessible to Cuda GPU
+class Cuda; ///< Execution space for Cuda GPU
+
+namespace Impl {
+
+template <class ExecSpace>
+void cuda_prefetch_pointer(const ExecSpace& /*space*/, const void* /*ptr*/,
+ size_t /*bytes*/, bool /*to_device*/) {}
+
+void cuda_prefetch_pointer(const Cuda& space, const void* ptr, size_t bytes,
+ bool to_device);
+
+} // namespace Impl
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HIP_FWD_HPP_
+#define KOKKOS_HIP_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_HIP)
+namespace Kokkos {
+class HIPSpace; ///< Memory space on HIP GPU
+class HIPHostPinnedSpace; ///< Memory space on Host accessible to HIP GPU
+class HIPManagedSpace; ///< Memory migratable between Host and HIP GPU
+class HIP; ///< Execution space for HIP GPU
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HPX_FWD_HPP_
+#define KOKKOS_HPX_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_HPX)
+namespace Kokkos {
+namespace Experimental {
+class HPX; ///< Execution space with HPX back-end.
+} // namespace Experimental
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENACC_FWD_HPP_
+#define KOKKOS_OPENACC_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_OPENACC)
+namespace Kokkos {
+namespace Experimental {
+class OpenACC; ///< OpenACC execution space.
+class OpenACCSpace;
+} // namespace Experimental
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMP_FWD_HPP_
+#define KOKKOS_OPENMP_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_OPENMP)
+namespace Kokkos {
+class OpenMP; ///< OpenMP execution space.
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_OPENMPTARGET_FWD_HPP_
+#define KOKKOS_OPENMPTARGET_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET)
+namespace Kokkos {
+namespace Experimental {
+class OpenMPTarget; ///< OpenMPTarget execution space.
+class OpenMPTargetSpace;
+} // namespace Experimental
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SERIAL_FWD_HPP_
+#define KOKKOS_SERIAL_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_SERIAL)
+namespace Kokkos {
+class Serial; ///< Execution space main process on CPU.
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SYCL_FWD_HPP_
+#define KOKKOS_SYCL_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_SYCL)
+namespace Kokkos {
+class SYCLDeviceUSMSpace; ///< Memory space on SYCL device, not accessible from
+ ///< the host
+class SYCLSharedUSMSpace; ///< Memory space accessible from both the SYCL
+ ///< device and the host
+class SYCLHostUSMSpace; ///< Memory space accessible from both the SYCL
+ ///< device and the host (host pinned)
+class SYCL; ///< Execution space for SYCL
+} // namespace Kokkos
+#endif
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_THREADS_FWD_HPP_
+#define KOKKOS_THREADS_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_THREADS)
+namespace Kokkos {
+class Threads; ///< Execution space with C++11 threads back-end.
+} // namespace Kokkos
+#endif
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HOST_EXP_ITERATE_TILE_HPP
#define KOKKOS_HOST_EXP_ITERATE_TILE_HPP
// Temporary, for testing new loop macros
#define KOKKOS_ENABLE_NEW_LOOP_MACROS 1
-#define LOOP_1L(type, tile) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
+#define KOKKOS_IMPL_LOOP_1L(type, tile) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
for (type i0 = 0; i0 < static_cast<type>(tile[0]); ++i0)
-#define LOOP_2L(type, tile) \
- for (type i1 = 0; i1 < static_cast<type>(tile[1]); ++i1) LOOP_1L(type, tile)
+#define KOKKOS_IMPL_LOOP_2L(type, tile) \
+ for (type i1 = 0; i1 < static_cast<type>(tile[1]); ++i1) \
+ KOKKOS_IMPL_LOOP_1L(type, tile)
-#define LOOP_3L(type, tile) \
- for (type i2 = 0; i2 < static_cast<type>(tile[2]); ++i2) LOOP_2L(type, tile)
+#define KOKKOS_IMPL_LOOP_3L(type, tile) \
+ for (type i2 = 0; i2 < static_cast<type>(tile[2]); ++i2) \
+ KOKKOS_IMPL_LOOP_2L(type, tile)
-#define LOOP_4L(type, tile) \
- for (type i3 = 0; i3 < static_cast<type>(tile[3]); ++i3) LOOP_3L(type, tile)
+#define KOKKOS_IMPL_LOOP_4L(type, tile) \
+ for (type i3 = 0; i3 < static_cast<type>(tile[3]); ++i3) \
+ KOKKOS_IMPL_LOOP_3L(type, tile)
-#define LOOP_5L(type, tile) \
- for (type i4 = 0; i4 < static_cast<type>(tile[4]); ++i4) LOOP_4L(type, tile)
+#define KOKKOS_IMPL_LOOP_5L(type, tile) \
+ for (type i4 = 0; i4 < static_cast<type>(tile[4]); ++i4) \
+ KOKKOS_IMPL_LOOP_4L(type, tile)
-#define LOOP_6L(type, tile) \
- for (type i5 = 0; i5 < static_cast<type>(tile[5]); ++i5) LOOP_5L(type, tile)
+#define KOKKOS_IMPL_LOOP_6L(type, tile) \
+ for (type i5 = 0; i5 < static_cast<type>(tile[5]); ++i5) \
+ KOKKOS_IMPL_LOOP_5L(type, tile)
-#define LOOP_7L(type, tile) \
- for (type i6 = 0; i6 < static_cast<type>(tile[6]); ++i6) LOOP_6L(type, tile)
+#define KOKKOS_IMPL_LOOP_7L(type, tile) \
+ for (type i6 = 0; i6 < static_cast<type>(tile[6]); ++i6) \
+ KOKKOS_IMPL_LOOP_6L(type, tile)
-#define LOOP_8L(type, tile) \
- for (type i7 = 0; i7 < static_cast<type>(tile[7]); ++i7) LOOP_7L(type, tile)
+#define KOKKOS_IMPL_LOOP_8L(type, tile) \
+ for (type i7 = 0; i7 < static_cast<type>(tile[7]); ++i7) \
+ KOKKOS_IMPL_LOOP_7L(type, tile)
-#define LOOP_1R(type, tile) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
+#define KOKKOS_IMPL_LOOP_1R(type, tile) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
for (type i0 = 0; i0 < static_cast<type>(tile[0]); ++i0)
-#define LOOP_2R(type, tile) \
- LOOP_1R(type, tile) \
+#define KOKKOS_IMPL_LOOP_2R(type, tile) \
+ KOKKOS_IMPL_LOOP_1R(type, tile) \
for (type i1 = 0; i1 < static_cast<type>(tile[1]); ++i1)
-#define LOOP_3R(type, tile) \
- LOOP_2R(type, tile) \
+#define KOKKOS_IMPL_LOOP_3R(type, tile) \
+ KOKKOS_IMPL_LOOP_2R(type, tile) \
for (type i2 = 0; i2 < static_cast<type>(tile[2]); ++i2)
-#define LOOP_4R(type, tile) \
- LOOP_3R(type, tile) \
+#define KOKKOS_IMPL_LOOP_4R(type, tile) \
+ KOKKOS_IMPL_LOOP_3R(type, tile) \
for (type i3 = 0; i3 < static_cast<type>(tile[3]); ++i3)
-#define LOOP_5R(type, tile) \
- LOOP_4R(type, tile) \
+#define KOKKOS_IMPL_LOOP_5R(type, tile) \
+ KOKKOS_IMPL_LOOP_4R(type, tile) \
for (type i4 = 0; i4 < static_cast<type>(tile[4]); ++i4)
-#define LOOP_6R(type, tile) \
- LOOP_5R(type, tile) \
+#define KOKKOS_IMPL_LOOP_6R(type, tile) \
+ KOKKOS_IMPL_LOOP_5R(type, tile) \
for (type i5 = 0; i5 < static_cast<type>(tile[5]); ++i5)
-#define LOOP_7R(type, tile) \
- LOOP_6R(type, tile) \
+#define KOKKOS_IMPL_LOOP_7R(type, tile) \
+ KOKKOS_IMPL_LOOP_6R(type, tile) \
for (type i6 = 0; i6 < static_cast<type>(tile[6]); ++i6)
-#define LOOP_8R(type, tile) \
- LOOP_7R(type, tile) \
+#define KOKKOS_IMPL_LOOP_8R(type, tile) \
+ KOKKOS_IMPL_LOOP_7R(type, tile) \
for (type i7 = 0; i7 < static_cast<type>(tile[7]); ++i7)
-#define LOOP_ARGS_1 i0 + m_offset[0]
-#define LOOP_ARGS_2 LOOP_ARGS_1, i1 + m_offset[1]
-#define LOOP_ARGS_3 LOOP_ARGS_2, i2 + m_offset[2]
-#define LOOP_ARGS_4 LOOP_ARGS_3, i3 + m_offset[3]
-#define LOOP_ARGS_5 LOOP_ARGS_4, i4 + m_offset[4]
-#define LOOP_ARGS_6 LOOP_ARGS_5, i5 + m_offset[5]
-#define LOOP_ARGS_7 LOOP_ARGS_6, i6 + m_offset[6]
-#define LOOP_ARGS_8 LOOP_ARGS_7, i7 + m_offset[7]
+#define KOKKOS_IMPL_LOOP_ARGS_1 i0 + m_offset[0]
+#define KOKKOS_IMPL_LOOP_ARGS_2 KOKKOS_IMPL_LOOP_ARGS_1, i1 + m_offset[1]
+#define KOKKOS_IMPL_LOOP_ARGS_3 KOKKOS_IMPL_LOOP_ARGS_2, i2 + m_offset[2]
+#define KOKKOS_IMPL_LOOP_ARGS_4 KOKKOS_IMPL_LOOP_ARGS_3, i3 + m_offset[3]
+#define KOKKOS_IMPL_LOOP_ARGS_5 KOKKOS_IMPL_LOOP_ARGS_4, i4 + m_offset[4]
+#define KOKKOS_IMPL_LOOP_ARGS_6 KOKKOS_IMPL_LOOP_ARGS_5, i5 + m_offset[5]
+#define KOKKOS_IMPL_LOOP_ARGS_7 KOKKOS_IMPL_LOOP_ARGS_6, i6 + m_offset[6]
+#define KOKKOS_IMPL_LOOP_ARGS_8 KOKKOS_IMPL_LOOP_ARGS_7, i7 + m_offset[7]
// New Loop Macros...
// parallel_for, non-tagged
-#define APPLY(func, ...) func(__VA_ARGS__);
+#define KOKKOS_IMPL_APPLY(func, ...) func(__VA_ARGS__);
// LayoutRight
// d = 0 to start
-#define LOOP_R_1(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_R_1(func, type, m_offset, extent, d, ...) \
KOKKOS_ENABLE_IVDEP_MDRANGE \
for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- APPLY(func, __VA_ARGS__, i0 + m_offset[d]) \
+ KOKKOS_IMPL_APPLY(func, __VA_ARGS__, i0 + m_offset[d]) \
}
-#define LOOP_R_2(func, type, m_offset, extent, d, ...) \
- for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- LOOP_R_1(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i1 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_2(func, type, m_offset, extent, d, ...) \
+ for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+ KOKKOS_IMPL_LOOP_R_1(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+ i1 + m_offset[d]) \
}
-#define LOOP_R_3(func, type, m_offset, extent, d, ...) \
- for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- LOOP_R_2(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i2 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_3(func, type, m_offset, extent, d, ...) \
+ for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+ KOKKOS_IMPL_LOOP_R_2(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+ i2 + m_offset[d]) \
}
-#define LOOP_R_4(func, type, m_offset, extent, d, ...) \
- for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- LOOP_R_3(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i3 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_4(func, type, m_offset, extent, d, ...) \
+ for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+ KOKKOS_IMPL_LOOP_R_3(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+ i3 + m_offset[d]) \
}
-#define LOOP_R_5(func, type, m_offset, extent, d, ...) \
- for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- LOOP_R_4(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i4 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_5(func, type, m_offset, extent, d, ...) \
+ for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+ KOKKOS_IMPL_LOOP_R_4(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+ i4 + m_offset[d]) \
}
-#define LOOP_R_6(func, type, m_offset, extent, d, ...) \
- for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- LOOP_R_5(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i5 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_6(func, type, m_offset, extent, d, ...) \
+ for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+ KOKKOS_IMPL_LOOP_R_5(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+ i5 + m_offset[d]) \
}
-#define LOOP_R_7(func, type, m_offset, extent, d, ...) \
- for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- LOOP_R_6(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i6 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_7(func, type, m_offset, extent, d, ...) \
+ for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+ KOKKOS_IMPL_LOOP_R_6(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+ i6 + m_offset[d]) \
}
-#define LOOP_R_8(func, type, m_offset, extent, d, ...) \
- for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- LOOP_R_7(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i7 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_8(func, type, m_offset, extent, d, ...) \
+ for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+ KOKKOS_IMPL_LOOP_R_7(func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+ i7 + m_offset[d]) \
}
// LayoutLeft
// d = rank-1 to start
-#define LOOP_L_1(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_1(func, type, m_offset, extent, d, ...) \
KOKKOS_ENABLE_IVDEP_MDRANGE \
for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- APPLY(func, i0 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_APPLY(func, i0 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_2(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_2(func, type, m_offset, extent, d, ...) \
for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- LOOP_L_1(func, type, m_offset, extent, d - 1, i1 + m_offset[d], \
- __VA_ARGS__) \
+ KOKKOS_IMPL_LOOP_L_1(func, type, m_offset, extent, d - 1, \
+ i1 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_3(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_3(func, type, m_offset, extent, d, ...) \
for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- LOOP_L_2(func, type, m_offset, extent, d - 1, i2 + m_offset[d], \
- __VA_ARGS__) \
+ KOKKOS_IMPL_LOOP_L_2(func, type, m_offset, extent, d - 1, \
+ i2 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_4(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_4(func, type, m_offset, extent, d, ...) \
for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- LOOP_L_3(func, type, m_offset, extent, d - 1, i3 + m_offset[d], \
- __VA_ARGS__) \
+ KOKKOS_IMPL_LOOP_L_3(func, type, m_offset, extent, d - 1, \
+ i3 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_5(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_5(func, type, m_offset, extent, d, ...) \
for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- LOOP_L_4(func, type, m_offset, extent, d - 1, i4 + m_offset[d], \
- __VA_ARGS__) \
+ KOKKOS_IMPL_LOOP_L_4(func, type, m_offset, extent, d - 1, \
+ i4 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_6(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_6(func, type, m_offset, extent, d, ...) \
for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- LOOP_L_5(func, type, m_offset, extent, d - 1, i5 + m_offset[d], \
- __VA_ARGS__) \
+ KOKKOS_IMPL_LOOP_L_5(func, type, m_offset, extent, d - 1, \
+ i5 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_7(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_7(func, type, m_offset, extent, d, ...) \
for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- LOOP_L_6(func, type, m_offset, extent, d - 1, i6 + m_offset[d], \
- __VA_ARGS__) \
+ KOKKOS_IMPL_LOOP_L_6(func, type, m_offset, extent, d - 1, \
+ i6 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_8(func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_LOOP_L_8(func, type, m_offset, extent, d, ...) \
for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- LOOP_L_7(func, type, m_offset, extent, d - 1, i7 + m_offset[d], \
- __VA_ARGS__) \
+ KOKKOS_IMPL_LOOP_L_7(func, type, m_offset, extent, d - 1, \
+ i7 + m_offset[d], __VA_ARGS__) \
}
// Left vs Right
// TODO: rank not necessary to pass through, can hardcode the values
-#define LOOP_LAYOUT_1(func, type, is_left, m_offset, extent, rank) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
- APPLY(func, i0 + m_offset[0]) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_1(func, type, is_left, m_offset, extent, rank) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
+ KOKKOS_IMPL_APPLY(func, i0 + m_offset[0]) \
}
-#define LOOP_LAYOUT_2(func, type, is_left, m_offset, extent, rank) \
- if (is_left) { \
- for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
- LOOP_L_1(func, type, m_offset, extent, rank - 2, \
- i1 + m_offset[rank - 1]) \
- } \
- } else { \
- for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) { \
- LOOP_R_1(func, type, m_offset, extent, 1, i1 + m_offset[0]) \
- } \
+#define KOKKOS_IMPL_LOOP_LAYOUT_2(func, type, is_left, m_offset, extent, rank) \
+ if (is_left) { \
+ for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
+ KOKKOS_IMPL_LOOP_L_1(func, type, m_offset, extent, rank - 2, \
+ i1 + m_offset[rank - 1]) \
+ } \
+ } else { \
+ for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) { \
+ KOKKOS_IMPL_LOOP_R_1(func, type, m_offset, extent, 1, i1 + m_offset[0]) \
+ } \
}
-#define LOOP_LAYOUT_3(func, type, is_left, m_offset, extent, rank) \
- if (is_left) { \
- for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
- LOOP_L_2(func, type, m_offset, extent, rank - 2, \
- i2 + m_offset[rank - 1]) \
- } \
- } else { \
- for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) { \
- LOOP_R_2(func, type, m_offset, extent, 1, i2 + m_offset[0]) \
- } \
+#define KOKKOS_IMPL_LOOP_LAYOUT_3(func, type, is_left, m_offset, extent, rank) \
+ if (is_left) { \
+ for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
+ KOKKOS_IMPL_LOOP_L_2(func, type, m_offset, extent, rank - 2, \
+ i2 + m_offset[rank - 1]) \
+ } \
+ } else { \
+ for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) { \
+ KOKKOS_IMPL_LOOP_R_2(func, type, m_offset, extent, 1, i2 + m_offset[0]) \
+ } \
}
-#define LOOP_LAYOUT_4(func, type, is_left, m_offset, extent, rank) \
- if (is_left) { \
- for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
- LOOP_L_3(func, type, m_offset, extent, rank - 2, \
- i3 + m_offset[rank - 1]) \
- } \
- } else { \
- for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) { \
- LOOP_R_3(func, type, m_offset, extent, 1, i3 + m_offset[0]) \
- } \
+#define KOKKOS_IMPL_LOOP_LAYOUT_4(func, type, is_left, m_offset, extent, rank) \
+ if (is_left) { \
+ for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
+ KOKKOS_IMPL_LOOP_L_3(func, type, m_offset, extent, rank - 2, \
+ i3 + m_offset[rank - 1]) \
+ } \
+ } else { \
+ for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) { \
+ KOKKOS_IMPL_LOOP_R_3(func, type, m_offset, extent, 1, i3 + m_offset[0]) \
+ } \
}
-#define LOOP_LAYOUT_5(func, type, is_left, m_offset, extent, rank) \
- if (is_left) { \
- for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
- LOOP_L_4(func, type, m_offset, extent, rank - 2, \
- i4 + m_offset[rank - 1]) \
- } \
- } else { \
- for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) { \
- LOOP_R_4(func, type, m_offset, extent, 1, i4 + m_offset[0]) \
- } \
+#define KOKKOS_IMPL_LOOP_LAYOUT_5(func, type, is_left, m_offset, extent, rank) \
+ if (is_left) { \
+ for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
+ KOKKOS_IMPL_LOOP_L_4(func, type, m_offset, extent, rank - 2, \
+ i4 + m_offset[rank - 1]) \
+ } \
+ } else { \
+ for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) { \
+ KOKKOS_IMPL_LOOP_R_4(func, type, m_offset, extent, 1, i4 + m_offset[0]) \
+ } \
}
-#define LOOP_LAYOUT_6(func, type, is_left, m_offset, extent, rank) \
- if (is_left) { \
- for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
- LOOP_L_5(func, type, m_offset, extent, rank - 2, \
- i5 + m_offset[rank - 1]) \
- } \
- } else { \
- for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) { \
- LOOP_R_5(func, type, m_offset, extent, 1, i5 + m_offset[0]) \
- } \
+#define KOKKOS_IMPL_LOOP_LAYOUT_6(func, type, is_left, m_offset, extent, rank) \
+ if (is_left) { \
+ for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
+ KOKKOS_IMPL_LOOP_L_5(func, type, m_offset, extent, rank - 2, \
+ i5 + m_offset[rank - 1]) \
+ } \
+ } else { \
+ for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) { \
+ KOKKOS_IMPL_LOOP_R_5(func, type, m_offset, extent, 1, i5 + m_offset[0]) \
+ } \
}
-#define LOOP_LAYOUT_7(func, type, is_left, m_offset, extent, rank) \
- if (is_left) { \
- for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
- LOOP_L_6(func, type, m_offset, extent, rank - 2, \
- i6 + m_offset[rank - 1]) \
- } \
- } else { \
- for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) { \
- LOOP_R_6(func, type, m_offset, extent, 1, i6 + m_offset[0]) \
- } \
+#define KOKKOS_IMPL_LOOP_LAYOUT_7(func, type, is_left, m_offset, extent, rank) \
+ if (is_left) { \
+ for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
+ KOKKOS_IMPL_LOOP_L_6(func, type, m_offset, extent, rank - 2, \
+ i6 + m_offset[rank - 1]) \
+ } \
+ } else { \
+ for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) { \
+ KOKKOS_IMPL_LOOP_R_6(func, type, m_offset, extent, 1, i6 + m_offset[0]) \
+ } \
}
-#define LOOP_LAYOUT_8(func, type, is_left, m_offset, extent, rank) \
- if (is_left) { \
- for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
- LOOP_L_7(func, type, m_offset, extent, rank - 2, \
- i7 + m_offset[rank - 1]) \
- } \
- } else { \
- for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) { \
- LOOP_R_7(func, type, m_offset, extent, 1, i7 + m_offset[0]) \
- } \
+#define KOKKOS_IMPL_LOOP_LAYOUT_8(func, type, is_left, m_offset, extent, rank) \
+ if (is_left) { \
+ for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
+ KOKKOS_IMPL_LOOP_L_7(func, type, m_offset, extent, rank - 2, \
+ i7 + m_offset[rank - 1]) \
+ } \
+ } else { \
+ for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) { \
+ KOKKOS_IMPL_LOOP_R_7(func, type, m_offset, extent, 1, i7 + m_offset[0]) \
+ } \
}
// Partial vs Full Tile
-#define TILE_LOOP_1(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_1(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_1(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_1(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_1(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_1(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
-#define TILE_LOOP_2(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_2(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_2(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_2(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_2(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_2(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
-#define TILE_LOOP_3(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_3(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_3(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_3(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_3(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_3(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
-#define TILE_LOOP_4(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_4(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_4(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_4(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_4(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_4(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
-#define TILE_LOOP_5(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_5(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_5(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_5(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_5(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_5(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
-#define TILE_LOOP_6(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_6(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_6(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_6(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_6(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_6(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
-#define TILE_LOOP_7(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_7(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_7(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_7(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_7(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_7(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
-#define TILE_LOOP_8(func, type, is_left, cond, m_offset, extent_full, \
- extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_8(func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_8(func, type, is_left, m_offset, extent_partial, rank) \
+#define KOKKOS_IMPL_TILE_LOOP_8(func, type, is_left, cond, m_offset, \
+ extent_full, extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_8(func, type, is_left, m_offset, extent_full, \
+ rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_8(func, type, is_left, m_offset, extent_partial, \
+ rank) \
}
// parallel_reduce, non-tagged
// Reduction version
-#define APPLY_REDUX(val, func, ...) func(__VA_ARGS__, val);
+#define KOKKOS_IMPL_APPLY_REDUX(val, func, ...) func(__VA_ARGS__, val);
// LayoutRight
// d = 0 to start
-#define LOOP_R_1_REDUX(val, func, type, m_offset, extent, d, ...) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- APPLY_REDUX(val, func, __VA_ARGS__, i0 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_1_REDUX(val, func, type, m_offset, extent, d, ...) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+ KOKKOS_IMPL_APPLY_REDUX(val, func, __VA_ARGS__, i0 + m_offset[d]) \
}
-#define LOOP_R_2_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- LOOP_R_1_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i1 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_2_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+ KOKKOS_IMPL_LOOP_R_1_REDUX(val, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i1 + m_offset[d]) \
}
-#define LOOP_R_3_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- LOOP_R_2_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i2 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_3_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+ KOKKOS_IMPL_LOOP_R_2_REDUX(val, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i2 + m_offset[d]) \
}
-#define LOOP_R_4_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- LOOP_R_3_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i3 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_4_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+ KOKKOS_IMPL_LOOP_R_3_REDUX(val, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i3 + m_offset[d]) \
}
-#define LOOP_R_5_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- LOOP_R_4_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i4 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_5_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+ KOKKOS_IMPL_LOOP_R_4_REDUX(val, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i4 + m_offset[d]) \
}
-#define LOOP_R_6_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- LOOP_R_5_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i5 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_6_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+ KOKKOS_IMPL_LOOP_R_5_REDUX(val, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i5 + m_offset[d]) \
}
-#define LOOP_R_7_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- LOOP_R_6_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i6 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_7_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+ KOKKOS_IMPL_LOOP_R_6_REDUX(val, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i6 + m_offset[d]) \
}
-#define LOOP_R_8_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- LOOP_R_7_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i7 + m_offset[d]) \
+#define KOKKOS_IMPL_LOOP_R_8_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+ KOKKOS_IMPL_LOOP_R_7_REDUX(val, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i7 + m_offset[d]) \
}
// LayoutLeft
// d = rank-1 to start
-#define LOOP_L_1_REDUX(val, func, type, m_offset, extent, d, ...) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- APPLY_REDUX(val, func, i0 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_1_REDUX(val, func, type, m_offset, extent, d, ...) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+ KOKKOS_IMPL_APPLY_REDUX(val, func, i0 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_2_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- LOOP_L_1_REDUX(val, func, type, m_offset, extent, d - 1, i1 + m_offset[d], \
- __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_2_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+ KOKKOS_IMPL_LOOP_L_1_REDUX(val, func, type, m_offset, extent, d - 1, \
+ i1 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_3_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- LOOP_L_2_REDUX(val, func, type, m_offset, extent, d - 1, i2 + m_offset[d], \
- __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_3_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+ KOKKOS_IMPL_LOOP_L_2_REDUX(val, func, type, m_offset, extent, d - 1, \
+ i2 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_4_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- LOOP_L_3_REDUX(val, func, type, m_offset, extent, d - 1, i3 + m_offset[d], \
- __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_4_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+ KOKKOS_IMPL_LOOP_L_3_REDUX(val, func, type, m_offset, extent, d - 1, \
+ i3 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_5_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- LOOP_L_4_REDUX(val, func, type, m_offset, extent, d - 1, i4 + m_offset[d], \
- __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_5_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+ KOKKOS_IMPL_LOOP_L_4_REDUX(val, func, type, m_offset, extent, d - 1, \
+ i4 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_6_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- LOOP_L_5_REDUX(val, func, type, m_offset, extent, d - 1, i5 + m_offset[d], \
- __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_6_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+ KOKKOS_IMPL_LOOP_L_5_REDUX(val, func, type, m_offset, extent, d - 1, \
+ i5 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_7_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- LOOP_L_6_REDUX(val, func, type, m_offset, extent, d - 1, i6 + m_offset[d], \
- __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_7_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+ KOKKOS_IMPL_LOOP_L_6_REDUX(val, func, type, m_offset, extent, d - 1, \
+ i6 + m_offset[d], __VA_ARGS__) \
}
-#define LOOP_L_8_REDUX(val, func, type, m_offset, extent, d, ...) \
- for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- LOOP_L_7_REDUX(val, func, type, m_offset, extent, d - 1, i7 + m_offset[d], \
- __VA_ARGS__) \
+#define KOKKOS_IMPL_LOOP_L_8_REDUX(val, func, type, m_offset, extent, d, ...) \
+ for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+ KOKKOS_IMPL_LOOP_L_7_REDUX(val, func, type, m_offset, extent, d - 1, \
+ i7 + m_offset[d], __VA_ARGS__) \
}
// Left vs Right
-#define LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, extent, rank) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
- APPLY_REDUX(val, func, i0 + m_offset[0]) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
+ KOKKOS_IMPL_APPLY_REDUX(val, func, i0 + m_offset[0]) \
}
-#define LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
- LOOP_L_1_REDUX(val, func, type, m_offset, extent, rank - 2, \
- i1 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_LOOP_L_1_REDUX(val, func, type, m_offset, extent, rank - 2, \
+ i1 + m_offset[rank - 1]) \
} \
} else { \
for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) { \
- LOOP_R_1_REDUX(val, func, type, m_offset, extent, 1, i1 + m_offset[0]) \
+ KOKKOS_IMPL_LOOP_R_1_REDUX(val, func, type, m_offset, extent, 1, \
+ i1 + m_offset[0]) \
} \
}
-#define LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
- LOOP_L_2_REDUX(val, func, type, m_offset, extent, rank - 2, \
- i2 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_LOOP_L_2_REDUX(val, func, type, m_offset, extent, rank - 2, \
+ i2 + m_offset[rank - 1]) \
} \
} else { \
for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) { \
- LOOP_R_2_REDUX(val, func, type, m_offset, extent, 1, i2 + m_offset[0]) \
+ KOKKOS_IMPL_LOOP_R_2_REDUX(val, func, type, m_offset, extent, 1, \
+ i2 + m_offset[0]) \
} \
}
-#define LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
- LOOP_L_3_REDUX(val, func, type, m_offset, extent, rank - 2, \
- i3 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_LOOP_L_3_REDUX(val, func, type, m_offset, extent, rank - 2, \
+ i3 + m_offset[rank - 1]) \
} \
} else { \
for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) { \
- LOOP_R_3_REDUX(val, func, type, m_offset, extent, 1, i3 + m_offset[0]) \
+ KOKKOS_IMPL_LOOP_R_3_REDUX(val, func, type, m_offset, extent, 1, \
+ i3 + m_offset[0]) \
} \
}
-#define LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
- LOOP_L_4_REDUX(val, func, type, m_offset, extent, rank - 2, \
- i4 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_LOOP_L_4_REDUX(val, func, type, m_offset, extent, rank - 2, \
+ i4 + m_offset[rank - 1]) \
} \
} else { \
for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) { \
- LOOP_R_4_REDUX(val, func, type, m_offset, extent, 1, i4 + m_offset[0]) \
+ KOKKOS_IMPL_LOOP_R_4_REDUX(val, func, type, m_offset, extent, 1, \
+ i4 + m_offset[0]) \
} \
}
-#define LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
- LOOP_L_5_REDUX(val, func, type, m_offset, extent, rank - 2, \
- i5 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_LOOP_L_5_REDUX(val, func, type, m_offset, extent, rank - 2, \
+ i5 + m_offset[rank - 1]) \
} \
} else { \
for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) { \
- LOOP_R_5_REDUX(val, func, type, m_offset, extent, 1, i5 + m_offset[0]) \
+ KOKKOS_IMPL_LOOP_R_5_REDUX(val, func, type, m_offset, extent, 1, \
+ i5 + m_offset[0]) \
} \
}
-#define LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
- LOOP_L_6_REDUX(val, func, type, m_offset, extent, rank - 2, \
- i6 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_LOOP_L_6_REDUX(val, func, type, m_offset, extent, rank - 2, \
+ i6 + m_offset[rank - 1]) \
} \
} else { \
for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) { \
- LOOP_R_6_REDUX(val, func, type, m_offset, extent, 1, i6 + m_offset[0]) \
+ KOKKOS_IMPL_LOOP_R_6_REDUX(val, func, type, m_offset, extent, 1, \
+ i6 + m_offset[0]) \
} \
}
-#define LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
- LOOP_L_7_REDUX(val, func, type, m_offset, extent, rank - 2, \
- i7 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_LOOP_L_7_REDUX(val, func, type, m_offset, extent, rank - 2, \
+ i7 + m_offset[rank - 1]) \
} \
} else { \
for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) { \
- LOOP_R_7_REDUX(val, func, type, m_offset, extent, 1, i7 + m_offset[0]) \
+ KOKKOS_IMPL_LOOP_R_7_REDUX(val, func, type, m_offset, extent, 1, \
+ i7 + m_offset[0]) \
} \
}
// Partial vs Full Tile
-#define TILE_LOOP_1_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_1_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TILE_LOOP_2_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_2_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TILE_LOOP_3_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_3_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TILE_LOOP_4_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_4_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TILE_LOOP_5_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_5_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TILE_LOOP_6_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_6_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TILE_LOOP_7_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_7_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TILE_LOOP_8_REDUX(val, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
- } else { \
- LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TILE_LOOP_8_REDUX(val, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
// end New Loop Macros
// tagged macros
-#define TAGGED_APPLY(tag, func, ...) func(tag, __VA_ARGS__);
+#define KOKKOS_IMPL_TAGGED_APPLY(tag, func, ...) func(tag, __VA_ARGS__);
// LayoutRight
// d = 0 to start
-#define TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, d, ...) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- TAGGED_APPLY(tag, func, __VA_ARGS__, i0 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, d, ...) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+ KOKKOS_IMPL_TAGGED_APPLY(tag, func, __VA_ARGS__, i0 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, d, ...) \
- for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i1 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, d, ...) \
+ for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+ KOKKOS_IMPL_TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i1 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, d, ...) \
- for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i2 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, d, ...) \
+ for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+ KOKKOS_IMPL_TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i2 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, d, ...) \
- for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i3 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, d, ...) \
+ for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+ KOKKOS_IMPL_TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i3 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, d, ...) \
- for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i4 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, d, ...) \
+ for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+ KOKKOS_IMPL_TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i4 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, d, ...) \
- for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i5 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, d, ...) \
+ for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+ KOKKOS_IMPL_TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i5 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, d, ...) \
- for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i6 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, d, ...) \
+ for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+ KOKKOS_IMPL_TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i6 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_8(tag, func, type, m_offset, extent, d, ...) \
- for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
- i7 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_8(tag, func, type, m_offset, extent, d, ...) \
+ for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+ KOKKOS_IMPL_TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, d + 1, \
+ __VA_ARGS__, i7 + m_offset[d]) \
}
// LayoutLeft
// d = rank-1 to start
-#define TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, d, ...) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- TAGGED_APPLY(tag, func, i0 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, d, ...) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+ KOKKOS_IMPL_TAGGED_APPLY(tag, func, i0 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, d, ...) \
- for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, d - 1, \
- i1 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, d, ...) \
+ for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+ KOKKOS_IMPL_TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, d - 1, \
+ i1 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, d, ...) \
- for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, d - 1, \
- i2 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, d, ...) \
+ for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+ KOKKOS_IMPL_TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, d - 1, \
+ i2 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, d, ...) \
- for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, d - 1, \
- i3 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, d, ...) \
+ for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+ KOKKOS_IMPL_TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, d - 1, \
+ i3 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, d, ...) \
- for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, d - 1, \
- i4 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, d, ...) \
+ for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+ KOKKOS_IMPL_TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, d - 1, \
+ i4 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, d, ...) \
- for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, d - 1, \
- i5 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, d, ...) \
+ for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+ KOKKOS_IMPL_TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, d - 1, \
+ i5 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, d, ...) \
- for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, d - 1, \
- i6 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, d, ...) \
+ for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+ KOKKOS_IMPL_TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, d - 1, \
+ i6 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_8(tag, func, type, m_offset, extent, d, ...) \
- for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, d - 1, \
- i7 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_8(tag, func, type, m_offset, extent, d, ...) \
+ for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+ KOKKOS_IMPL_TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, d - 1, \
+ i7 + m_offset[d], __VA_ARGS__) \
}
// Left vs Right
// TODO: rank not necessary to pass through, can hardcode the values
-#define TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, extent, rank) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
- TAGGED_APPLY(tag, func, i0 + m_offset[0]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, \
+ extent, rank) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
+ KOKKOS_IMPL_TAGGED_APPLY(tag, func, i0 + m_offset[0]) \
}
-#define TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
- TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, rank - 2, \
- i1 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, rank - 2, \
+ i1 + m_offset[rank - 1]) \
} \
} else { \
for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) { \
- TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, 1, i1 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, 1, \
+ i1 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
- TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, rank - 2, \
- i2 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, rank - 2, \
+ i2 + m_offset[rank - 1]) \
} \
} else { \
for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) { \
- TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, 1, i2 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, 1, \
+ i2 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
- TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, rank - 2, \
- i3 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, rank - 2, \
+ i3 + m_offset[rank - 1]) \
} \
} else { \
for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) { \
- TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, 1, i3 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, 1, \
+ i3 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
- TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, rank - 2, \
- i4 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, rank - 2, \
+ i4 + m_offset[rank - 1]) \
} \
} else { \
for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) { \
- TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, 1, i4 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, 1, \
+ i4 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
- TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, rank - 2, \
- i5 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, rank - 2, \
+ i5 + m_offset[rank - 1]) \
} \
} else { \
for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) { \
- TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, 1, i5 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, 1, \
+ i5 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
- TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, rank - 2, \
- i6 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, rank - 2, \
+ i6 + m_offset[rank - 1]) \
} \
} else { \
for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) { \
- TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, 1, i6 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, 1, \
+ i6 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, \
+ extent, rank) \
if (is_left) { \
for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
- TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, rank - 2, \
- i7 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, rank - 2, \
+ i7 + m_offset[rank - 1]) \
} \
} else { \
for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) { \
- TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, 1, i7 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, 1, \
+ i7 + m_offset[0]) \
} \
}
// Partial vs Full Tile
-#define TAGGED_TILE_LOOP_1(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_1(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TAGGED_TILE_LOOP_2(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_2(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TAGGED_TILE_LOOP_3(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_3(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TAGGED_TILE_LOOP_4(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_4(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TAGGED_TILE_LOOP_5(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_5(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TAGGED_TILE_LOOP_6(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_6(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TAGGED_TILE_LOOP_7(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_7(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
-#define TAGGED_TILE_LOOP_8(tag, func, type, is_left, cond, m_offset, \
- extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, extent_full, \
- rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, extent_partial, \
- rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_8(tag, func, type, is_left, cond, \
+ m_offset, extent_full, extent_partial, \
+ rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, \
+ extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, \
+ extent_partial, rank) \
}
// parallel_reduce, tagged
// Reduction version
-#define TAGGED_APPLY_REDUX(val, tag, func, ...) func(tag, __VA_ARGS__, val);
+#define KOKKOS_IMPL_TAGGED_APPLY_REDUX(val, tag, func, ...) \
+ func(tag, __VA_ARGS__, val);
// LayoutRight
// d = 0 to start
-#define TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- TAGGED_APPLY_REDUX(val, tag, func, __VA_ARGS__, i0 + m_offset[d]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+ KOKKOS_IMPL_TAGGED_APPLY_REDUX(val, tag, func, __VA_ARGS__, \
+ i0 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, extent, d + 1, \
- __VA_ARGS__, i1 + m_offset[d]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, extent, \
+ d + 1, __VA_ARGS__, i1 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, extent, d + 1, \
- __VA_ARGS__, i2 + m_offset[d]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, extent, \
+ d + 1, __VA_ARGS__, i2 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, extent, d + 1, \
- __VA_ARGS__, i3 + m_offset[d]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, extent, \
+ d + 1, __VA_ARGS__, i3 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, extent, d + 1, \
- __VA_ARGS__, i4 + m_offset[d]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, extent, \
+ d + 1, __VA_ARGS__, i4 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, extent, d + 1, \
- __VA_ARGS__, i5 + m_offset[d]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, extent, \
+ d + 1, __VA_ARGS__, i5 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, extent, d + 1, \
- __VA_ARGS__, i6 + m_offset[d]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, extent, \
+ d + 1, __VA_ARGS__, i6 + m_offset[d]) \
}
-#define TAGGED_LOOP_R_8_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_R_8_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, extent, d + 1, \
- __VA_ARGS__, i7 + m_offset[d]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, extent, \
+ d + 1, __VA_ARGS__, i7 + m_offset[d]) \
}
// LayoutLeft
// d = rank-1 to start
-#define TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
- TAGGED_APPLY_REDUX(val, tag, func, i0 + m_offset[d], __VA_ARGS__) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+ KOKKOS_IMPL_TAGGED_APPLY_REDUX(val, tag, func, i0 + m_offset[d], \
+ __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
- TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, extent, d - 1, \
- i1 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, extent, \
+ d - 1, i1 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
- TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, extent, d - 1, \
- i2 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, extent, \
+ d - 1, i2 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
- TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, extent, d - 1, \
- i3 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, extent, \
+ d - 1, i3 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
- TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, extent, d - 1, \
- i4 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, extent, \
+ d - 1, i4 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
- TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, extent, d - 1, \
- i5 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, extent, \
+ d - 1, i5 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
- TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, extent, d - 1, \
- i6 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, extent, \
+ d - 1, i6 + m_offset[d], __VA_ARGS__) \
}
-#define TAGGED_LOOP_L_8_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+#define KOKKOS_IMPL_TAGGED_LOOP_L_8_REDUX(val, tag, func, type, m_offset, \
+ extent, d, ...) \
for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
- TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, extent, d - 1, \
- i7 + m_offset[d], __VA_ARGS__) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, extent, \
+ d - 1, i7 + m_offset[d], __VA_ARGS__) \
}
// Left vs Right
-#define TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
- KOKKOS_ENABLE_IVDEP_MDRANGE \
- for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
- TAGGED_APPLY_REDUX(val, tag, func, i0 + m_offset[0]) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
+ KOKKOS_ENABLE_IVDEP_MDRANGE \
+ for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
+ KOKKOS_IMPL_TAGGED_APPLY_REDUX(val, tag, func, i0 + m_offset[0]) \
}
-#define TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
if (is_left) { \
for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
- TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
- i1 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, \
+ extent, rank - 2, \
+ i1 + m_offset[rank - 1]) \
} \
} else { \
for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) { \
- TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, extent, 1, \
- i1 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, \
+ extent, 1, i1 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
if (is_left) { \
for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
- TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
- i2 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, \
+ extent, rank - 2, \
+ i2 + m_offset[rank - 1]) \
} \
} else { \
for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) { \
- TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, extent, 1, \
- i2 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, \
+ extent, 1, i2 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
if (is_left) { \
for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
- TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
- i3 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, \
+ extent, rank - 2, \
+ i3 + m_offset[rank - 1]) \
} \
} else { \
for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) { \
- TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, extent, 1, \
- i3 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, \
+ extent, 1, i3 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
if (is_left) { \
for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
- TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
- i4 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, \
+ extent, rank - 2, \
+ i4 + m_offset[rank - 1]) \
} \
} else { \
for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) { \
- TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, extent, 1, \
- i4 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, \
+ extent, 1, i4 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
if (is_left) { \
for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
- TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
- i5 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, \
+ extent, rank - 2, \
+ i5 + m_offset[rank - 1]) \
} \
} else { \
for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) { \
- TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, extent, 1, \
- i5 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, \
+ extent, 1, i5 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
if (is_left) { \
for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
- TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
- i6 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, \
+ extent, rank - 2, \
+ i6 + m_offset[rank - 1]) \
} \
} else { \
for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) { \
- TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, extent, 1, \
- i6 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, \
+ extent, 1, i6 + m_offset[0]) \
} \
}
-#define TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, m_offset, \
- extent, rank) \
+#define KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent, rank) \
if (is_left) { \
for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
- TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
- i7 + m_offset[rank - 1]) \
+ KOKKOS_IMPL_TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, \
+ extent, rank - 2, \
+ i7 + m_offset[rank - 1]) \
} \
} else { \
for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) { \
- TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, extent, 1, \
- i7 + m_offset[0]) \
+ KOKKOS_IMPL_TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, \
+ extent, 1, i7 + m_offset[0]) \
} \
}
// Partial vs Full Tile
-#define TAGGED_TILE_LOOP_1_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
- }
-
-#define TAGGED_TILE_LOOP_2_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
- }
-
-#define TAGGED_TILE_LOOP_3_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
- }
-
-#define TAGGED_TILE_LOOP_4_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
- }
-
-#define TAGGED_TILE_LOOP_5_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
- }
-
-#define TAGGED_TILE_LOOP_6_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
- }
-
-#define TAGGED_TILE_LOOP_7_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
- }
-
-#define TAGGED_TILE_LOOP_8_REDUX(val, tag, func, type, is_left, cond, \
- m_offset, extent_full, extent_partial, rank) \
- if (cond) { \
- TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_full, rank) \
- } else { \
- TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, m_offset, \
- extent_partial, rank) \
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_1_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
+ }
+
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_2_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
+ }
+
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_3_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
+ }
+
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_4_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
+ }
+
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_5_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
+ }
+
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_6_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
+ }
+
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_7_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
+ }
+
+#define KOKKOS_IMPL_TAGGED_TILE_LOOP_8_REDUX(val, tag, func, type, is_left, \
+ cond, m_offset, extent_full, \
+ extent_partial, rank) \
+ if (cond) { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_full, rank) \
+ } else { \
+ KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, \
+ m_offset, extent_partial, rank) \
}
// end tagged macros
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_1(func, IType, IsLeft, cond, offset, a, b, 1);
+ KOKKOS_IMPL_TILE_LOOP_1(func, IType, IsLeft, cond, offset, a, b, 1);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_1_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 1);
+ KOKKOS_IMPL_TILE_LOOP_1_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 1);
}
};
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_2(func, IType, IsLeft, cond, offset, a, b, 2);
+ KOKKOS_IMPL_TILE_LOOP_2(func, IType, IsLeft, cond, offset, a, b, 2);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_2_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 2);
+ KOKKOS_IMPL_TILE_LOOP_2_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 2);
}
};
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_3(func, IType, IsLeft, cond, offset, a, b, 3);
+ KOKKOS_IMPL_TILE_LOOP_3(func, IType, IsLeft, cond, offset, a, b, 3);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_3_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 3);
+ KOKKOS_IMPL_TILE_LOOP_3_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 3);
}
};
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_4(func, IType, IsLeft, cond, offset, a, b, 4);
+ KOKKOS_IMPL_TILE_LOOP_4(func, IType, IsLeft, cond, offset, a, b, 4);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_4_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 4);
+ KOKKOS_IMPL_TILE_LOOP_4_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 4);
}
};
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_5(func, IType, IsLeft, cond, offset, a, b, 5);
+ KOKKOS_IMPL_TILE_LOOP_5(func, IType, IsLeft, cond, offset, a, b, 5);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_5_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 5);
+ KOKKOS_IMPL_TILE_LOOP_5_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 5);
}
};
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_6(func, IType, IsLeft, cond, offset, a, b, 6);
+ KOKKOS_IMPL_TILE_LOOP_6(func, IType, IsLeft, cond, offset, a, b, 6);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_6_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 6);
+ KOKKOS_IMPL_TILE_LOOP_6_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 6);
}
};
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_7(func, IType, IsLeft, cond, offset, a, b, 7);
+ KOKKOS_IMPL_TILE_LOOP_7(func, IType, IsLeft, cond, offset, a, b, 7);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_7_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 7);
+ KOKKOS_IMPL_TILE_LOOP_7_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 7);
}
};
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_8(func, IType, IsLeft, cond, offset, a, b, 8);
+ KOKKOS_IMPL_TILE_LOOP_8(func, IType, IsLeft, cond, offset, a, b, 8);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TILE_LOOP_8_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 8);
+ KOKKOS_IMPL_TILE_LOOP_8_REDUX(value, func, IType, IsLeft, cond, offset, a,
+ b, 8);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<1, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_1(Tagged(), func, IType, IsLeft, cond, offset, a, b, 1);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_1(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 1);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_1_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 1);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_1_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 1);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<2, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_2(Tagged(), func, IType, IsLeft, cond, offset, a, b, 2);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_2(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 2);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_2_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 2);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_2_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 2);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<3, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_3(Tagged(), func, IType, IsLeft, cond, offset, a, b, 3);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_3(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 3);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_3_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 3);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_3_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 3);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<4, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_4(Tagged(), func, IType, IsLeft, cond, offset, a, b, 4);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_4(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 4);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_4_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 4);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_4_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 4);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<5, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_5(Tagged(), func, IType, IsLeft, cond, offset, a, b, 5);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_5(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 5);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_5_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 5);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_5_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 5);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<6, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_6(Tagged(), func, IType, IsLeft, cond, offset, a, b, 6);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_6(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 6);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_6_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 6);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_6_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 6);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<7, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_7(Tagged(), func, IType, IsLeft, cond, offset, a, b, 7);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_7(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 7);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_7_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 7);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_7_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 7);
}
};
template <bool IsLeft, typename IType, typename Tagged>
struct Tile_Loop_Type<8, IsLeft, IType, Tagged,
- std::enable_if_t<!std::is_void<Tagged>::value>> {
+ std::enable_if_t<!std::is_void_v<Tagged>>> {
template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
static void apply(Func const& func, bool cond, Offset const& offset,
ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_8(Tagged(), func, IType, IsLeft, cond, offset, a, b, 8);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_8(Tagged(), func, IType, IsLeft, cond, offset,
+ a, b, 8);
}
template <typename ValType, typename Func, typename Offset, typename ExtentA,
typename ExtentB>
static void apply(ValType& value, Func const& func, bool cond,
Offset const& offset, ExtentA const& a, ExtentB const& b) {
- TAGGED_TILE_LOOP_8_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
- a, b, 8);
+ KOKKOS_IMPL_TAGGED_TILE_LOOP_8_REDUX(value, Tagged(), func, IType, IsLeft,
+ cond, offset, a, b, 8);
}
};
// end Structs for calling loops
// For ParallelFor
template <typename RP, typename Functor, typename Tag, typename ValueType>
struct HostIterateTile<RP, Functor, Tag, ValueType,
- std::enable_if_t<std::is_void<ValueType>::value>> {
+ std::enable_if_t<std::is_void_v<ValueType>>> {
using index_type = typename RP::index_type;
using point_type = typename RP::point_type;
} else {
is_full_tile = false;
partial_tile[i] =
- (m_rp.m_upper[i] - 1 - offset[i]) == 0
- ? 1
- : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
- ? (m_rp.m_upper[i] - offset[i])
- : (m_rp.m_upper[i] -
- m_rp.m_lower[i]); // when single tile encloses range
+ (m_rp.m_upper[i] - 1 - offset[i]) == 0 ? 1
+ : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
+ ? (m_rp.m_upper[i] - offset[i])
+ : (m_rp.m_upper[i] -
+ m_rp.m_lower[i]); // when single tile encloses range
}
}
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
} else {
// #pragma simd
- LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
} else {
// #pragma simd
- LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
} else {
// #pragma simd
- LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
} else {
// #pragma simd
- LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
} else {
// #pragma simd
- LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
} else {
// #pragma simd
- LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
} else {
// #pragma simd
- LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
} else {
// #pragma simd
- LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
} else {
// #pragma simd
- LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
} else {
// #pragma simd
- LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
} else {
// #pragma simd
- LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
} else {
// #pragma simd
- LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
} else {
// #pragma simd
- LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
} else {
// #pragma simd
- LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
}
} // end Iterate::Right
#endif
template <typename... Args>
- std::enable_if_t<(sizeof...(Args) == RP::rank && std::is_void<Tag>::value),
- void>
+ std::enable_if_t<(sizeof...(Args) == RP::rank && std::is_void_v<Tag>), void>
apply(Args&&... args) const {
m_func(args...);
}
template <typename... Args>
- std::enable_if_t<(sizeof...(Args) == RP::rank && !std::is_void<Tag>::value),
- void>
+ std::enable_if_t<(sizeof...(Args) == RP::rank && !std::is_void_v<Tag>), void>
apply(Args&&... args) const {
m_func(m_tag, args...);
}
- RP const& m_rp;
- Functor const& m_func;
- std::conditional_t<std::is_void<Tag>::value, int, Tag> m_tag;
+ RP const m_rp;
+ Functor const m_func;
+ std::conditional_t<std::is_void_v<Tag>, int, Tag> m_tag;
};
// For ParallelReduce
// ValueType - scalar: For reductions
template <typename RP, typename Functor, typename Tag, typename ValueType>
struct HostIterateTile<RP, Functor, Tag, ValueType,
- std::enable_if_t<!std::is_void<ValueType>::value &&
- !std::is_array<ValueType>::value>> {
+ std::enable_if_t<!std::is_void_v<ValueType> &&
+ !std::is_array_v<ValueType>>> {
using index_type = typename RP::index_type;
using point_type = typename RP::point_type;
using value_type = ValueType;
- inline HostIterateTile(RP const& rp, Functor const& func, value_type& v)
+ inline HostIterateTile(RP const& rp, Functor const& func)
: m_rp(rp) // Cuda 7.0 does not like braces...
,
- m_func(func),
- m_v(v) // use with non-void ValueType struct
- {
+ m_func(func) {
// Errors due to braces rather than parenthesis for init (with cuda 7.0)
// /home/ndellin/kokkos/core/src/impl/KokkosExp_Host_IterateTile.hpp:1216:98:
// error: too many braces around initializer for ‘int’ [-fpermissive]
} else {
is_full_tile = false;
partial_tile[i] =
- (m_rp.m_upper[i] - 1 - offset[i]) == 0
- ? 1
- : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
- ? (m_rp.m_upper[i] - offset[i])
- : (m_rp.m_upper[i] -
- m_rp.m_lower[i]); // when single tile encloses range
+ (m_rp.m_upper[i] - 1 - offset[i]) == 0 ? 1
+ : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
+ ? (m_rp.m_upper[i] - offset[i])
+ : (m_rp.m_upper[i] -
+ m_rp.m_lower[i]); // when single tile encloses range
}
}
#if KOKKOS_ENABLE_NEW_LOOP_MACROS
template <typename IType>
- inline void operator()(IType tile_idx) const {
+ inline void operator()(IType tile_idx, value_type& val) const {
point_type m_offset;
point_type m_tiledims;
const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
Tile_Loop_Type<RP::rank, (RP::inner_direction == Iterate::Left), index_type,
- Tag>::apply(m_v, m_func, full_tile, m_offset, m_rp.m_tile,
- m_tiledims);
+ Tag>::apply(val, m_func.get_functor(), full_tile, m_offset,
+ m_rp.m_tile, m_tiledims);
}
#else
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
} else {
// #pragma simd
- LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
} else {
// #pragma simd
- LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
} else {
// #pragma simd
- LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
} else {
// #pragma simd
- LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
} else {
// #pragma simd
- LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
} else {
// #pragma simd
- LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
} else {
// #pragma simd
- LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
} else {
// #pragma simd
- LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
} else {
// #pragma simd
- LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
} else {
// #pragma simd
- LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
} else {
// #pragma simd
- LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
} else {
// #pragma simd
- LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
} else {
// #pragma simd
- LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
} else {
// #pragma simd
- LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
}
} // end Iterate::Right
} // end op() rank == 8
-#endif
template <typename... Args>
std::enable_if_t<(sizeof...(Args) == RP::rank && std::is_void<Tag>::value),
apply(Args&&... args) const {
m_func(m_tag, args..., m_v);
}
+#endif
- RP const& m_rp;
- Functor const& m_func;
- value_type& m_v;
- std::conditional_t<std::is_void<Tag>::value, int, Tag> m_tag;
+ RP const m_rp;
+ Functor const m_func;
+ std::conditional_t<std::is_void_v<Tag>, int, Tag> m_tag;
};
// For ParallelReduce
// ValueType[]: For array reductions
template <typename RP, typename Functor, typename Tag, typename ValueType>
struct HostIterateTile<RP, Functor, Tag, ValueType,
- std::enable_if_t<!std::is_void<ValueType>::value &&
- std::is_array<ValueType>::value>> {
+ std::enable_if_t<!std::is_void_v<ValueType> &&
+ std::is_array_v<ValueType>>> {
using index_type = typename RP::index_type;
using point_type = typename RP::point_type;
// 'array-ness' [], only
// underlying type remains
- inline HostIterateTile(
- RP const& rp, Functor const& func,
- value_type* v) // v should be an array; treat as pointer for
- // compatibility since size is not known nor needed here
- : m_rp(rp) // Cuda 7.0 does not like braces...
+ inline HostIterateTile(RP const& rp, Functor const& func)
+ : m_rp(rp) // Cuda 7.0 does not like braces...
,
- m_func(func),
- m_v(v) // use with non-void ValueType struct
- {}
+ m_func(func) {}
inline bool check_iteration_bounds(point_type& partial_tile,
point_type& offset) const {
} else {
is_full_tile = false;
partial_tile[i] =
- (m_rp.m_upper[i] - 1 - offset[i]) == 0
- ? 1
- : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
- ? (m_rp.m_upper[i] - offset[i])
- : (m_rp.m_upper[i] -
- m_rp.m_lower[i]); // when single tile encloses range
+ (m_rp.m_upper[i] - 1 - offset[i]) == 0 ? 1
+ : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
+ ? (m_rp.m_upper[i] - offset[i])
+ : (m_rp.m_upper[i] -
+ m_rp.m_lower[i]); // when single tile encloses range
}
}
#if KOKKOS_ENABLE_NEW_LOOP_MACROS
template <typename IType>
- inline void operator()(IType tile_idx) const {
+ inline void operator()(IType tile_idx, value_type* val) const {
point_type m_offset;
point_type m_tiledims;
const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
Tile_Loop_Type<RP::rank, (RP::inner_direction == Iterate::Left), index_type,
- Tag>::apply(m_v, m_func, full_tile, m_offset, m_rp.m_tile,
+ Tag>::apply(val, m_func, full_tile, m_offset, m_rp.m_tile,
m_tiledims);
}
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
} else {
// #pragma simd
- LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
} else {
// #pragma simd
- LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+ KOKKOS_IMPL_LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
} else {
// #pragma simd
- LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
} else {
// #pragma simd
- LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+ KOKKOS_IMPL_LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
} else {
// #pragma simd
- LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
} else {
// #pragma simd
- LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+ KOKKOS_IMPL_LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
} else {
// #pragma simd
- LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
} else {
// #pragma simd
- LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+ KOKKOS_IMPL_LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
} else {
// #pragma simd
- LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
} else {
// #pragma simd
- LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+ KOKKOS_IMPL_LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
} else {
// #pragma simd
- LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
} else {
// #pragma simd
- LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+ KOKKOS_IMPL_LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
}
} // end Iterate::Right
if (RP::inner_direction == Iterate::Left) {
if (full_tile) {
// #pragma simd
- LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
} else {
// #pragma simd
- LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
}
} // end Iterate::Left
else {
if (full_tile) {
// #pragma simd
- LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
} else {
// #pragma simd
- LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+ KOKKOS_IMPL_LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
}
} // end Iterate::Right
} // end op() rank == 8
-#endif
-
template <typename... Args>
std::enable_if_t<(sizeof...(Args) == RP::rank && std::is_void<Tag>::value),
void>
apply(Args&&... args) const {
m_func(m_tag, args..., m_v);
}
+#endif
- RP const& m_rp;
- Functor const& m_func;
- value_type* m_v;
- std::conditional_t<std::is_void<Tag>::value, int, Tag> m_tag;
+ RP const m_rp;
+ Functor const m_func;
+ std::conditional_t<std::is_void_v<Tag>, int, Tag> m_tag;
};
// ------------------------------------------------------------------ //
#undef KOKKOS_ENABLE_NEW_LOOP_MACROS
+#undef KOKKOS_IMPL_LOOP_1L
+#undef KOKKOS_IMPL_LOOP_2L
+#undef KOKKOS_IMPL_LOOP_3L
+#undef KOKKOS_IMPL_LOOP_4L
+#undef KOKKOS_IMPL_LOOP_5L
+#undef KOKKOS_IMPL_LOOP_6L
+#undef KOKKOS_IMPL_LOOP_7L
+#undef KOKKOS_IMPL_LOOP_8L
+#undef KOKKOS_IMPL_LOOP_1R
+#undef KOKKOS_IMPL_LOOP_2R
+#undef KOKKOS_IMPL_LOOP_3R
+#undef KOKKOS_IMPL_LOOP_4R
+#undef KOKKOS_IMPL_LOOP_5R
+#undef KOKKOS_IMPL_LOOP_6R
+#undef KOKKOS_IMPL_LOOP_7R
+#undef KOKKOS_IMPL_LOOP_8R
+#undef KOKKOS_IMPL_LOOP_ARGS_1
+#undef KOKKOS_IMPL_LOOP_ARGS_2
+#undef KOKKOS_IMPL_LOOP_ARGS_3
+#undef KOKKOS_IMPL_LOOP_ARGS_4
+#undef KOKKOS_IMPL_LOOP_ARGS_5
+#undef KOKKOS_IMPL_LOOP_ARGS_6
+#undef KOKKOS_IMPL_LOOP_ARGS_7
+#undef KOKKOS_IMPL_LOOP_ARGS_8
+#undef KOKKOS_IMPL_APPLY
+#undef KOKKOS_IMPL_LOOP_R_1
+#undef KOKKOS_IMPL_LOOP_R_2
+#undef KOKKOS_IMPL_LOOP_R_3
+#undef KOKKOS_IMPL_LOOP_R_4
+#undef KOKKOS_IMPL_LOOP_R_5
+#undef KOKKOS_IMPL_LOOP_R_6
+#undef KOKKOS_IMPL_LOOP_R_7
+#undef KOKKOS_IMPL_LOOP_R_8
+#undef KOKKOS_IMPL_LOOP_L_1
+#undef KOKKOS_IMPL_LOOP_L_2
+#undef KOKKOS_IMPL_LOOP_L_3
+#undef KOKKOS_IMPL_LOOP_L_4
+#undef KOKKOS_IMPL_LOOP_L_5
+#undef KOKKOS_IMPL_LOOP_L_6
+#undef KOKKOS_IMPL_LOOP_L_7
+#undef KOKKOS_IMPL_LOOP_L_8
+#undef KOKKOS_IMPL_LOOP_LAYOUT_1
+#undef KOKKOS_IMPL_LOOP_LAYOUT_2
+#undef KOKKOS_IMPL_LOOP_LAYOUT_3
+#undef KOKKOS_IMPL_LOOP_LAYOUT_4
+#undef KOKKOS_IMPL_LOOP_LAYOUT_5
+#undef KOKKOS_IMPL_LOOP_LAYOUT_6
+#undef KOKKOS_IMPL_LOOP_LAYOUT_7
+#undef KOKKOS_IMPL_LOOP_LAYOUT_8
+#undef KOKKOS_IMPL_TILE_LOOP_1
+#undef KOKKOS_IMPL_TILE_LOOP_2
+#undef KOKKOS_IMPL_TILE_LOOP_3
+#undef KOKKOS_IMPL_TILE_LOOP_4
+#undef KOKKOS_IMPL_TILE_LOOP_5
+#undef KOKKOS_IMPL_TILE_LOOP_6
+#undef KOKKOS_IMPL_TILE_LOOP_7
+#undef KOKKOS_IMPL_TILE_LOOP_8
+#undef KOKKOS_IMPL_APPLY_REDUX
+#undef KOKKOS_IMPL_LOOP_R_1_REDUX
+#undef KOKKOS_IMPL_LOOP_R_2_REDUX
+#undef KOKKOS_IMPL_LOOP_R_3_REDUX
+#undef KOKKOS_IMPL_LOOP_R_4_REDUX
+#undef KOKKOS_IMPL_LOOP_R_5_REDUX
+#undef KOKKOS_IMPL_LOOP_R_6_REDUX
+#undef KOKKOS_IMPL_LOOP_R_7_REDUX
+#undef KOKKOS_IMPL_LOOP_R_8_REDUX
+#undef KOKKOS_IMPL_LOOP_L_1_REDUX
+#undef KOKKOS_IMPL_LOOP_L_2_REDUX
+#undef KOKKOS_IMPL_LOOP_L_3_REDUX
+#undef KOKKOS_IMPL_LOOP_L_4_REDUX
+#undef KOKKOS_IMPL_LOOP_L_5_REDUX
+#undef KOKKOS_IMPL_LOOP_L_6_REDUX
+#undef KOKKOS_IMPL_LOOP_L_7_REDUX
+#undef KOKKOS_IMPL_LOOP_L_8_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_1_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_2_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_3_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_4_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_5_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_6_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_7_REDUX
+#undef KOKKOS_IMPL_LOOP_LAYOUT_8_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_1_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_2_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_3_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_4_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_5_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_6_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_7_REDUX
+#undef KOKKOS_IMPL_TILE_LOOP_8_REDUX
+#undef KOKKOS_IMPL_TAGGED_APPLY
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_1
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_2
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_3
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_4
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_5
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_6
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_7
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_8
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_1
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_2
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_3
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_4
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_5
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_6
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_7
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_8
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_1
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_2
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_3
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_4
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_5
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_6
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_7
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_8
+#undef KOKKOS_IMPL_TAGGED_APPLY_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_1_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_2_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_3_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_4_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_5_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_6_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_7_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_R_8_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_1_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_2_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_3_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_4_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_5_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_6_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_7_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_L_8_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_1_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_2_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_3_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_4_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_5_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_6_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_7_REDUX
+#undef KOKKOS_IMPL_TAGGED_LOOP_LAYOUT_8_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_1_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_2_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_3_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_4_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_5_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_6_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_7_REDUX
+#undef KOKKOS_IMPL_TAGGED_TILE_LOOP_8_REDUX
} // namespace Impl
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_EXP_ITERATE_TILE_GPU_HPP
#define KOKKOS_EXP_ITERATE_TILE_GPU_HPP
template <class Tag, class Functor, class... Args>
KOKKOS_IMPL_FORCEINLINE_FUNCTION std::enable_if_t<std::is_void<Tag>::value>
_tag_invoke(Functor const& f, Args&&... args) {
- f((Args &&) args...);
+ f((Args&&)args...);
}
template <class Tag, class Functor, class... Args>
KOKKOS_IMPL_FORCEINLINE_FUNCTION std::enable_if_t<!std::is_void<Tag>::value>
_tag_invoke(Functor const& f, Args&&... args) {
- f(Tag{}, (Args &&) args...);
+ f(Tag{}, (Args&&)args...);
}
template <class Tag, class Functor, class T, size_t N, size_t... Idxs,
KOKKOS_IMPL_FORCEINLINE_FUNCTION void _tag_invoke_array_helper(
Functor const& f, T (&vals)[N], std::integer_sequence<size_t, Idxs...>,
Args&&... args) {
- _tag_invoke<Tag>(f, vals[Idxs]..., (Args &&) args...);
+ _tag_invoke<Tag>(f, vals[Idxs]..., (Args&&)args...);
}
template <class Tag, class Functor, class T, size_t N, class... Args>
T (&vals)[N],
Args&&... args) {
_tag_invoke_array_helper<Tag>(f, vals, std::make_index_sequence<N>{},
- (Args &&) args...);
+ (Args&&)args...);
}
// ------------------------------------------------------------------ //
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <cstdlib>
+#include <iostream>
+#include <Kokkos_Abort.hpp>
+#include <impl/Kokkos_Stacktrace.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+void host_abort(const char *const message) {
+ std::cerr << message;
+
+#ifdef KOKKOS_IMPL_ENABLE_STACKTRACE
+ std::cerr << "\nBacktrace:\n";
+ save_stacktrace();
+ print_demangled_saved_stacktrace(std::cerr);
+#else
+ std::cerr << "\nTraceback functionality not available\n";
+#endif
+
+ ::abort();
+}
+
+} // namespace Impl
+} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_ANALYZE_POLICY_HPP
#define KOKKOS_IMPL_ANALYZE_POLICY_HPP
static constexpr auto trigger_error_message =
show_name_of_invalid_execution_policy_trait<Trait>{};
static_assert(
- /* always false: */ std::is_void<Trait>::value,
+ /* always false: */ std::is_void_v<Trait>,
"Unknown execution policy trait. Search compiler output for "
"'show_name_of_invalid_execution_policy_trait' to see the type of the "
"invalid trait.");
//------------------------------------------------------------------------------
-constexpr bool warn_if_deprecated(std::false_type) { return true; }
-KOKKOS_DEPRECATED_WITH_COMMENT(
- "Invalid WorkTag template argument in execution policy!!")
-constexpr bool warn_if_deprecated(std::true_type) { return true; }
-#define KOKKOS_IMPL_STATIC_WARNING(...) \
- static_assert( \
- warn_if_deprecated(std::integral_constant<bool, __VA_ARGS__>()), "")
-
template <typename... Traits>
struct PolicyTraits
: ExecPolicyTraitsWithDefaults<AnalyzeExecPolicy<void, Traits...>> {
using base_t =
ExecPolicyTraitsWithDefaults<AnalyzeExecPolicy<void, Traits...>>;
using base_t::base_t;
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
- KOKKOS_IMPL_STATIC_WARNING(!std::is_empty<typename base_t::work_tag>::value &&
- !std::is_void<typename base_t::work_tag>::value);
-#endif
};
-#undef KOKKOS_IMPL_STATIC_WARNING
-
} // namespace Impl
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_BITOPS_HPP
#define KOKKOS_BITOPS_HPP
#include <cstdint>
#include <climits>
-#ifdef KOKKOS_COMPILER_INTEL
+#if defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
#include <immintrin.h>
#endif
#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
constexpr int shift = sizeof(unsigned) * CHAR_BIT - 1;
return shift - __clz(i);
-#elif defined(KOKKOS_COMPILER_INTEL)
+#elif defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
return _bit_scan_reverse(i);
#else
return int_log2_fallback(i);
KOKKOS_IMPL_HOST_FUNCTION
inline int int_log2_host(unsigned i) {
// duplicating shift to avoid unused warning in else branch
-#if defined(KOKKOS_COMPILER_INTEL)
+#if defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
constexpr int shift = sizeof(unsigned) * CHAR_BIT - 1;
(void)shift;
return _bit_scan_reverse(i);
constexpr unsigned full = ~0u;
#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
return full != i ? __ffs(~i) - 1 : -1;
-#elif defined(KOKKOS_COMPILER_INTEL)
+#elif defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
return full != i ? _bit_scan_forward(~i) : -1;
#else
(void)full;
KOKKOS_IMPL_HOST_FUNCTION
inline int bit_first_zero_host(unsigned i) noexcept {
constexpr unsigned full = ~0u;
-#if defined(KOKKOS_COMPILER_INTEL)
+#if defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
return full != i ? _bit_scan_forward(~i) : -1;
#elif defined(KOKKOS_COMPILER_CRAYC)
return full != i ? _popcnt(i ^ (i + 1)) - 1 : -1;
KOKKOS_IMPL_DEVICE_FUNCTION inline int bit_scan_forward_device(unsigned i) {
#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
return __ffs(i) - 1;
-#elif defined(KOKKOS_COMPILER_INTEL)
+#elif defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
return _bit_scan_forward(i);
#else
return bit_scan_forward_fallback(i);
}
KOKKOS_IMPL_HOST_FUNCTION inline int bit_scan_forward_host(unsigned i) {
-#if defined(KOKKOS_COMPILER_INTEL)
+#if defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
return _bit_scan_forward(i);
#elif defined(KOKKOS_COMPILER_CRAYC)
return i ? _popcnt(~i & (i - 1)) : -1;
KOKKOS_IMPL_DEVICE_FUNCTION inline int bit_count_device(unsigned i) {
#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
return __popc(i);
-#elif defined(KOKKOS_COMPILER_INTEL)
+#elif defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
return _popcnt32(i);
#else
return bit_count_fallback(i);
}
KOKKOS_IMPL_HOST_FUNCTION inline int bit_count_host(unsigned i) {
-#if defined(KOKKOS_COMPILER_INTEL)
+#if defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_INTEL_LLVM)
return _popcnt32(i);
#elif defined(KOKKOS_COMPILER_CRAYC)
return _popcnt(i);
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <impl/Kokkos_CPUDiscovery.hpp>
+
+#include <cstdlib> // getenv
+#include <string>
+
+int Kokkos::Impl::mpi_ranks_per_node() {
+ for (char const* env_var : {
+ "OMPI_COMM_WORLD_LOCAL_SIZE", // OpenMPI
+ "MV2_COMM_WORLD_LOCAL_SIZE", // MVAPICH2
+ "MPI_LOCALNRANKS", // MPICH
+ // SLURM???
+ "PMI_LOCAL_SIZE", // PMI
+ }) {
+ char const* str = std::getenv(env_var);
+ if (str) {
+ return std::stoi(str);
+ }
+ }
+ return -1;
+}
+
+int Kokkos::Impl::mpi_local_rank_on_node() {
+ for (char const* env_var : {
+ "OMPI_COMM_WORLD_LOCAL_RANK", // OpenMPI
+ "MV2_COMM_WORLD_LOCAL_RANK", // MVAPICH2
+ "MPI_LOCALRANKID", // MPICH
+ "SLURM_LOCALID", // SLURM
+ "PMI_LOCAL_RANK", // PMI
+ }) {
+ char const* str = std::getenv(env_var);
+ if (str) {
+ return std::stoi(str);
+ }
+ }
+ return -1;
+}
+
+bool Kokkos::Impl::mpi_detected() { return mpi_local_rank_on_node() != -1; }
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+namespace Kokkos {
+namespace Impl {
+
+int mpi_ranks_per_node();
+int mpi_local_rank_on_node();
+// returns true if MPI execution environment is detected, false otherwise.
+bool mpi_detected();
+
+} // namespace Impl
+} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
#include <impl/Kokkos_LinkedListNode.hpp> // KOKKOS_EXPECTS
#include <Kokkos_Atomic.hpp> // atomic_compare_exchange, atomic_fence
-#include "Kokkos_LIFO.hpp"
+#include <impl/Kokkos_LIFO.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
non_owning_variable_size_circular_buffer(
non_owning_variable_size_circular_buffer const&) = delete;
non_owning_variable_size_circular_buffer(
- non_owning_variable_size_circular_buffer&&) = default;
- non_owning_variable_size_circular_buffer& operator =(
+ non_owning_variable_size_circular_buffer&&) = default;
+ non_owning_variable_size_circular_buffer& operator=(
non_owning_variable_size_circular_buffer const&) = delete;
- non_owning_variable_size_circular_buffer& operator =(
+ non_owning_variable_size_circular_buffer& operator=(
non_owning_variable_size_circular_buffer&&) = default;
- ~non_owning_variable_size_circular_buffer() = default;
+ ~non_owning_variable_size_circular_buffer() = default;
KOKKOS_FORCEINLINE_FUNCTION
constexpr size_type size() const noexcept { return m_size; }
public:
template <class _ignore = void,
class = std::enable_if_t<
- std::is_default_constructible<CircularBufferT>::value>>
+ std::is_default_constructible_v<CircularBufferT>>>
ChaseLevDeque() : m_array() {}
explicit ChaseLevDeque(CircularBufferT buffer) : m_array(std::move(buffer)) {}
#ifdef _WIN32
Kokkos::memory_fence();
bool const success =
- Kokkos::atomic_compare_exchange_strong(&m_top, t, t + 1);
+ (t == Kokkos::atomic_compare_exchange(&m_top, t, t + 1));
Kokkos::memory_fence();
if (!success) {
return_value = nullptr;
}
#else
if (!Impl::atomic_compare_exchange_strong(
- &m_top, t, t + 1, memory_order_seq_cst, memory_order_relaxed)) {
+ &m_top, t, t + 1, desul::MemoryOrderSeqCst(),
+ desul::MemoryOrderRelaxed())) {
/* failed race, someone else stole it */
return_value = nullptr;
}
KOKKOS_INLINE_FUNCTION
bool push(node_type& node) {
auto b = m_bottom; // memory order relaxed
- auto t = Impl::atomic_load(&m_top, memory_order_acquire);
+ auto t = Impl::atomic_load(&m_top, desul::MemoryOrderAcquire());
auto& a = m_array;
if (b - t > a.size() - 1) {
/* queue is full, resize */
return false;
}
a[b] = &node; // relaxed
- Impl::atomic_store(&m_bottom, b + 1, memory_order_release);
+ Impl::atomic_store(&m_bottom, b + 1, desul::MemoryOrderRelease());
return true;
}
auto t = m_top; // TODO @tasking @memory_order DSH: atomic load acquire
Kokkos::memory_fence(); // seq_cst fence, so why does the above need to be
// acquire?
- auto b = Impl::atomic_load(&m_bottom, memory_order_acquire);
+ auto b = Impl::atomic_load(&m_bottom, desul::MemoryOrderAcquire());
OptionalRef<T> return_value;
if (t < b) {
/* Non-empty queue */
#ifdef _WIN32
Kokkos::memory_fence();
bool const success =
- Kokkos::atomic_compare_exchange_strong(&m_top, t, t + 1);
+ (t == Kokkos::atomic_compare_exchange(&m_top, t, t + 1));
Kokkos::memory_fence();
if (!success) {
return_value = nullptr;
}
#else
- if (!Impl::atomic_compare_exchange_strong(
- &m_top, t, t + 1, memory_order_seq_cst, memory_order_relaxed)) {
+ if (!Impl::atomic_compare_exchange_strong(&m_top, t, t + 1,
+ desul::MemoryOrderSeqCst(),
+ desul::MemoryOrderRelaxed())) {
return_value = nullptr;
}
#endif
// essentially using the memory order in this version as a fence, which
// may be unnecessary
auto buffer_ptr = (node_type***)&m_array.buffer;
- auto a = Impl::atomic_load(buffer_ptr, memory_order_acquire); //
+ auto a = Impl::atomic_load(buffer_ptr, desul::MemoryOrderAcquire()); //
technically consume ordered, but acquire should be fine return_value =
*static_cast<T*>(a[t % m_array->size]); // relaxed; we'd have to replace the
m_array->size if we ever allow growth
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CHECKED_INTEGER_OPS_HPP
+#define KOKKOS_CHECKED_INTEGER_OPS_HPP
+
+#include <type_traits>
+
+#include <impl/Kokkos_Error.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+#if defined(__has_builtin)
+#if __has_builtin(__builtin_mul_overflow)
+#define KOKKOS_IMPL_USE_MUL_OVERFLOW_BUILTIN
+#endif
+#endif
+
+template <typename T>
+std::enable_if_t<std::is_integral_v<T>, bool> constexpr multiply_overflow(
+ T a, T b, T& res) {
+ static_assert(std::is_unsigned_v<T>,
+ "Operation not implemented for signed integers.");
+
+#if defined(KOKKOS_IMPL_USE_MUL_OVERFLOW_BUILTIN)
+ return __builtin_mul_overflow(a, b, &res);
+#else
+ auto product = a * b;
+ if ((a == 0) || (b == 0) || (a == product / b)) {
+ res = product;
+ return false;
+ } else {
+ return true;
+ }
+#endif
+}
+
+#undef KOKKOS_IMPL_USE_MUL_OVERFLOW_BUILTIN
+
+template <typename T>
+T multiply_overflow_abort(T a, T b) {
+ T result;
+ if (multiply_overflow(a, b, result))
+ Kokkos::abort("Arithmetic overflow detected.");
+
+ return result;
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // KOKKOS_CHECKED_INTEGER_OPS_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CLOCKTIC_HPP
#define KOKKOS_CLOCKTIC_HPP
// To use OpenCL(TM) built-in intrinsics inside kernels, we have to
// forward-declare their prototype, also see
// https://github.com/intel/pti-gpu/blob/master/chapters/binary_instrumentation/OpenCLBuiltIn.md
-#if defined(KOKKOS_ENABLE_SYCL) && defined(KOKKOS_ARCH_INTEL_GPU) && \
- defined(__SYCL_DEVICE_ONLY__)
+#if defined(KOKKOS_ENABLE_SYCL) && \
+ defined(KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE) && \
+ defined(KOKKOS_ARCH_INTEL_GPU) && defined(__SYCL_DEVICE_ONLY__)
extern SYCL_EXTERNAL unsigned long __attribute__((overloadable))
intel_get_cycle_counter();
#endif
// Return value of 64-bit hi-res clock register.
return clock64();
-#elif defined(KOKKOS_ENABLE_SYCL) && defined(KOKKOS_ARCH_INTEL_GPU) && \
- defined(__SYCL_DEVICE_ONLY__)
+// FIXME_SYCL We can only return something useful for Intel GPUs and with RDC
+#elif defined(KOKKOS_ENABLE_SYCL) && \
+ defined(KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE) && \
+ defined(KOKKOS_ARCH_INTEL_GPU) && defined(__SYCL_DEVICE_ONLY__)
return intel_get_cycle_counter();
return ((uint64_t)a) | (((uint64_t)d) << 32);
-#elif defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__POWERPC__) || defined(__ppc__) || defined(__ppc64__)
+#elif defined(__powerpc64__) || defined(__ppc64__)
- unsigned int cycles = 0;
+ unsigned long cycles = 0;
asm volatile("mftb %0" : "=r"(cycles));
return (uint64_t)cycles;
+#elif defined(__ppc__)
+ // see : pages.cs.wisc.edu/~legault/miniproj-736.pdf or
+ // cmssdt.cern.ch/lxr/source/FWCore/Utilities/interface/HRRealTime.h
+
+ uint64_t result = 0;
+ uint32_t upper, lower, tmp;
+
+ __asm__ volatile(
+ "0: \n"
+ "\tmftbu %0 \n"
+ "\tmftb %1 \n"
+ "\tmftbu %2 \n"
+ "\tcmpw %2, %0 \n"
+ "\tbne 0b \n"
+ : "=r"(upper), "=r"(lower), "=r"(tmp));
+ result = upper;
+ result = result << 32;
+ result = result | lower;
+
+ return (result);
+
#else
return std::chrono::high_resolution_clock::now().time_since_epoch().count();
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_COMBINED_REDUCER_HPP
#define KOKKOS_COMBINED_REDUCER_HPP
#include <Kokkos_Parallel_Reduce.hpp>
#include <Kokkos_ExecPolicy.hpp>
#include <Kokkos_AnonymousSpace.hpp>
-#include <impl/Kokkos_Utilities.hpp> // comma operator fold emulation
#include <utility>
std::move(arg_values))... {}
template <size_t Idx, class ValueType>
- KOKKOS_INLINE_FUNCTION ValueType& get() & noexcept {
+ KOKKOS_INLINE_FUNCTION ValueType& get() & noexcept {
return this->CombinedReducerValueItemImpl<Idx, ValueType>::ref();
}
template <size_t Idx, class ValueType>
// model Reducer
KOKKOS_INLINE_FUNCTION
- constexpr _fold_comma_emulation_return _init(value_type& val) const {
- m_reducer.init(val);
- return _fold_comma_emulation_return{};
- }
+ constexpr void _init(value_type& val) const { m_reducer.init(val); }
- KOKKOS_INLINE_FUNCTION constexpr _fold_comma_emulation_return _join(
- value_type& dest, value_type const& src) const {
+ KOKKOS_INLINE_FUNCTION constexpr void _join(value_type& dest,
+ value_type const& src) const {
m_reducer.join(dest, src);
- return _fold_comma_emulation_return{};
}
};
KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl(
CombinedReducerImpl const&) = default;
KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl(
- CombinedReducerImpl&&) = default;
+ CombinedReducerImpl&&) = default;
KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl& operator=(
CombinedReducerImpl const&) = default;
KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl& operator=(
template <class... ReducersDeduced>
KOKKOS_FUNCTION constexpr explicit CombinedReducerImpl(
value_type& value, ReducersDeduced&&... reducers) noexcept
- : CombinedReducerStorageImpl<Idxs, Reducers>((ReducersDeduced &&)
- reducers)...,
+ : CombinedReducerStorageImpl<Idxs, Reducers>(
+ (ReducersDeduced&&)reducers)...,
m_value_view(&value) {}
KOKKOS_FUNCTION constexpr void join(value_type& dest,
value_type const& src) const noexcept {
- emulate_fold_comma_operator(
- this->CombinedReducerStorageImpl<Idxs, Reducers>::_join(
- dest.template get<Idxs, typename Reducers::value_type>(),
- src.template get<Idxs, typename Reducers::value_type>())...);
+ (this->CombinedReducerStorageImpl<Idxs, Reducers>::_join(
+ dest.template get<Idxs, typename Reducers::value_type>(),
+ src.template get<Idxs, typename Reducers::value_type>()),
+ ...);
}
KOKKOS_FUNCTION constexpr void init(value_type& dest) const noexcept {
- emulate_fold_comma_operator(
- this->CombinedReducerStorageImpl<Idxs, Reducers>::_init(
- dest.template get<Idxs, typename Reducers::value_type>())...);
+ (this->CombinedReducerStorageImpl<Idxs, Reducers>::_init(
+ dest.template get<Idxs, typename Reducers::value_type>()),
+ ...);
}
+ KOKKOS_FUNCTION auto& reference() const { return *m_value_view.data(); }
+
// TODO figure out if we also need to call through to final
KOKKOS_FUNCTION
static void write_value_back_to_original_references(
const ExecutionSpace& exec_space, value_type const& value,
Reducers const&... reducers_that_reference_original_values) noexcept {
- emulate_fold_comma_operator(
- (write_one_value_back<ExecutionSpace, Idxs>(
- exec_space, reducers_that_reference_original_values.view(),
- value.template get<Idxs, typename Reducers::value_type>()),
- 0)...);
+ (write_one_value_back<ExecutionSpace, Idxs>(
+ exec_space, reducers_that_reference_original_values.view(),
+ value.template get<Idxs, typename Reducers::value_type>()),
+
+ ...);
+ }
+
+ template <int Idx, class View>
+ KOKKOS_FUNCTION static void write_one_value_back_on_device(
+ View const& inputView, typename View::const_value_type& value) noexcept {
+ *inputView.data() = value;
+ }
+
+ template <typename... CombinedReducers>
+ KOKKOS_FUNCTION void write_value_back_to_original_references_on_device(
+ value_type const& value,
+ CombinedReducers const&... reducers_that_reference_original_values) noexcept {
+ (write_one_value_back_on_device<Idxs>(
+ reducers_that_reference_original_values.view(),
+ value.template get<Idxs, typename CombinedReducers::value_type>()),
+ ...);
}
};
IndexOrMemberOrTagType1&& arg_first,
IndexOrMemberTypesThenValueType&&... args) const {
this->template _call_op_impl<IndexOrMemberOrTagType1&&>(
- (IndexOrMemberOrTagType1 &&) arg_first,
- (IndexOrMemberTypesThenValueType &&) args...);
+ (IndexOrMemberOrTagType1&&)arg_first,
+ (IndexOrMemberTypesThenValueType&&)args...);
}
// </editor-fold> end call operator }}}2
template <class... IdxOrMemberTypes, class IdxOrMemberType1,
class... IdxOrMemberTypesThenValueType>
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
- !std::is_same<remove_cvref_t<IdxOrMemberType1>, value_type>::value>
+ !std::is_same_v<remove_cvref_t<IdxOrMemberType1>, value_type>>
_call_op_impl(IdxOrMemberTypes&&... idxs, IdxOrMemberType1&& idx,
IdxOrMemberTypesThenValueType&&... args) const {
this->template _call_op_impl<IdxOrMemberTypes&&..., IdxOrMemberType1&&>(
- (IdxOrMemberTypes &&) idxs..., (IdxOrMemberType1 &&) idx,
- (IdxOrMemberTypesThenValueType &&) args...);
+ (IdxOrMemberTypes&&)idxs..., (IdxOrMemberType1&&)idx,
+ (IdxOrMemberTypesThenValueType&&)args...);
}
// base case
template <class... IdxOrMemberTypes>
KOKKOS_FORCEINLINE_FUNCTION void _call_op_impl(IdxOrMemberTypes&&... idxs,
value_type& out) const {
- m_functor((IdxOrMemberTypes &&) idxs...,
+ m_functor((IdxOrMemberTypes&&)idxs...,
out.template get<Idxs, typename Reducers::value_type>()...);
}
};
typename _reducer_from_arg_t<Space,
ReferencesOrViewsOrReducers>::value_type...>{
// This helper function is now poorly named after refactoring.
- _get_value_from_combined_reducer_ctor_arg((ReferencesOrViewsOrReducers &&)
- args)...};
+ _get_value_from_combined_reducer_ctor_arg(
+ (ReferencesOrViewsOrReducers&&)args)...};
//----------------------------------------
}
Space, _reducer_from_arg_t<Space, ReferencesOrViewsOrReducers>...>;
return reducer_type(value,
_reducer_from_arg_t<Space, ReferencesOrViewsOrReducers>{
- (ReferencesOrViewsOrReducers &&) args}...);
+ (ReferencesOrViewsOrReducers&&)args}...);
//----------------------------------------
}
-template <class Functor, class Space, class... ReferencesOrViewsOrReducers>
+template <class Space, class Functor, class... ReferencesOrViewsOrReducers>
KOKKOS_INLINE_FUNCTION constexpr auto make_wrapped_combined_functor(
- Functor const& functor, Space, ReferencesOrViewsOrReducers&&...) {
+ Functor const& functor, ReferencesOrViewsOrReducers&&...) {
//----------------------------------------
return CombinedReductionFunctorWrapper<
Functor, Space,
template <typename FunctorType>
using functor_has_value_t = typename FunctorType::value_type;
+
+template <typename MemberType, typename BoundaryStructType, typename Functor,
+ typename ReturnType1, typename ReturnType2, typename... ReturnTypes>
+KOKKOS_INLINE_FUNCTION void parallel_reduce_combined_reducers_impl(
+ BoundaryStructType const& boundaries, Functor const& functor,
+ ReturnType1&& returnType1, ReturnType2&& returnType2,
+ ReturnTypes&&... returnTypes) noexcept {
+ using mem_space_type = typename MemberType::execution_space::memory_space;
+
+ decltype(Impl::make_combined_reducer_value<mem_space_type>(
+ returnType1, returnType2, returnTypes...)) combined_value;
+
+ auto combined_functor = Impl::make_wrapped_combined_functor<mem_space_type>(
+ functor, returnType1, returnType2, returnTypes...);
+
+ auto combined_reducer = Impl::make_combined_reducer<mem_space_type>(
+ combined_value, returnType1, returnType2, returnTypes...);
+
+ parallel_reduce(boundaries, combined_functor, combined_reducer);
+
+ combined_reducer.write_value_back_to_original_references_on_device(
+ combined_value, Impl::_make_reducer_from_arg<mem_space_type>(returnType1),
+ Impl::_make_reducer_from_arg<mem_space_type>(returnType2),
+ Impl::_make_reducer_from_arg<mem_space_type>(returnTypes)...);
+}
+
} // end namespace Impl
//==============================================================================
// directly
using space_type = Kokkos::DefaultHostExecutionSpace::memory_space;
- auto value = Impl::make_combined_reducer_value<space_type>(
- returnType1, returnType2, returnTypes...);
+ decltype(Impl::make_combined_reducer_value<space_type>(
+ returnType1, returnType2, returnTypes...)) value;
using combined_reducer_type = Impl::CombinedReducer<
space_type, Impl::_reducer_from_arg_t<space_type, ReturnType1>,
auto combined_reducer = Impl::make_combined_reducer<space_type>(
value, returnType1, returnType2, returnTypes...);
- auto combined_functor = Impl::make_wrapped_combined_functor(
- functor, space_type{}, returnType1, returnType2, returnTypes...);
+ auto combined_functor = Impl::make_wrapped_combined_functor<space_type>(
+ functor, returnType1, returnType2, returnTypes...);
using combined_functor_type = decltype(combined_functor);
static_assert(
//------------------------------------------------------------------------------
// <editor-fold desc="Team overloads"> {{{2
-// Copied three times because that's the best way we have right now to match
-// Impl::TeamThreadRangeBoundariesStruct,
-// Impl::ThreadVectorRangeBoundariesStruct, and
-// Impl::TeamVectorRangeBoundariesStruct.
-// TODO make these work after restructuring
-
-// template <class iType, class MemberType, class Functor, class ReturnType1,
-// class ReturnType2, class... ReturnTypes>
-// KOKKOS_INLINE_FUNCTION void parallel_reduce(
-// std::string const& label,
-// Impl::TeamThreadRangeBoundariesStruct<iType, MemberType> const&
-// boundaries, Functor const& functor, ReturnType1&& returnType1,
-// ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
-// const auto combined_reducer =
-// Impl::make_combined_reducer<Kokkos::AnonymousSpace>(
-// returnType1, returnType2, returnTypes...);
-//
-// auto combined_functor = Impl::make_wrapped_combined_functor(
-// functor, Kokkos::AnonymousSpace{}, returnType1, returnType2,
-// returnTypes...);
-//
-// parallel_reduce(label, boundaries, combined_functor, combined_reducer);
-//}
-//
-// template <class iType, class MemberType, class Functor, class ReturnType1,
-// class ReturnType2, class... ReturnTypes>
-// KOKKOS_INLINE_FUNCTION void parallel_reduce(
-// std::string const& label,
-// Impl::ThreadVectorRangeBoundariesStruct<iType, MemberType> const&
-// boundaries,
-// Functor const& functor, ReturnType1&& returnType1,
-// ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
-// const auto combined_reducer =
-// Impl::make_combined_reducer<Kokkos::AnonymousSpace>(
-// returnType1, returnType2, returnTypes...);
-//
-// auto combined_functor = Impl::make_wrapped_combined_functor(
-// functor, Kokkos::AnonymousSpace{}, returnType1, returnType2,
-// returnTypes...);
-//
-// parallel_reduce(label, boundaries, combined_functor, combined_reducer);
-//}
-
-// template <class iType, class MemberType, class Functor, class ReturnType1,
-// class ReturnType2, class... ReturnTypes>
-// KOKKOS_INLINE_FUNCTION void parallel_reduce(
-// std::string const& label,
-// Impl::TeamVectorRangeBoundariesStruct<iType, MemberType> const&
-// boundaries, Functor const& functor, ReturnType1&& returnType1,
-// ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
-// const auto combined_reducer =
-// Impl::make_combined_reducer<Kokkos::AnonymousSpace>(
-// returnType1, returnType2, returnTypes...);
-//
-// auto combined_functor = Impl::make_wrapped_combined_functor(
-// functor, Kokkos::AnonymousSpace{}, returnType1, returnType2,
-// returnTypes...);
-//
-// parallel_reduce(label, boundaries, combined_functor, combined_reducer);
-//}
+template <class iType, class MemberType, class Functor, class ReturnType1,
+ class ReturnType2, class... ReturnTypes>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ Impl::TeamThreadRangeBoundariesStruct<iType, MemberType> const& boundaries,
+ Functor const& functor, ReturnType1&& returnType1,
+ ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
+ Impl::parallel_reduce_combined_reducers_impl<MemberType>(
+ boundaries, functor, returnType1, returnType2, returnTypes...);
+}
+
+template <class iType, class MemberType, class Functor, class ReturnType1,
+ class ReturnType2, class... ReturnTypes>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ Impl::ThreadVectorRangeBoundariesStruct<iType, MemberType> const&
+ boundaries,
+ Functor const& functor, ReturnType1&& returnType1,
+ ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
+ Impl::parallel_reduce_combined_reducers_impl<MemberType>(
+ boundaries, functor, returnType1, returnType2, returnTypes...);
+}
+
+template <class iType, class MemberType, class Functor, class ReturnType1,
+ class ReturnType2, class... ReturnTypes>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+ Impl::TeamVectorRangeBoundariesStruct<iType, MemberType> const& boundaries,
+ Functor const& functor, ReturnType1&& returnType1,
+ ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
+ Impl::parallel_reduce_combined_reducers_impl<MemberType>(
+ boundaries, functor, returnType1, returnType2, returnTypes...);
+}
// </editor-fold> end Team overloads }}}2
//------------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_COMMAND_LINE_PARSING_HPP
+#define KOKKOS_COMMAND_LINE_PARSING_HPP
+
+#include <string>
+#include <regex>
+
+namespace Kokkos {
+namespace Impl {
+bool is_unsigned_int(const char* str);
+bool check_arg(char const* arg, char const* expected);
+bool check_arg_bool(char const* arg, char const* name, bool& val);
+bool check_arg_int(char const* arg, char const* name, int& val);
+bool check_arg_str(char const* arg, char const* name, std::string& val);
+bool check_env_bool(char const* name, bool& val);
+bool check_env_int(char const* name, int& val);
+void warn_deprecated_environment_variable(std::string deprecated);
+void warn_deprecated_environment_variable(std::string deprecated,
+ std::string use_instead);
+void warn_deprecated_command_line_argument(std::string deprecated);
+void warn_deprecated_command_line_argument(std::string deprecated,
+ std::string use_instead);
+void warn_not_recognized_command_line_argument(std::string not_recognized);
+void do_not_warn_not_recognized_command_line_argument(std::regex ignore);
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CONCURRENTBITSET_HPP
#define KOKKOS_CONCURRENTBITSET_HPP
// when is full at the atomic_fetch_add(+1)
// then a release occurs before the atomic_fetch_add(-1).
- const uint32_t state = (uint32_t)Kokkos::atomic_fetch_add(
- reinterpret_cast<volatile int *>(buffer), 1);
+ const uint32_t state =
+ Kokkos::atomic_fetch_add(const_cast<uint32_t *>(buffer), 1);
const uint32_t state_error = state_header != (state & state_header_mask);
const uint32_t state_bit_used = state & state_used_mask;
if (state_error || (bit_bound <= state_bit_used)) {
- Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+ Kokkos::atomic_fetch_sub(const_cast<uint32_t *>(buffer), 1);
return state_error ? type(-2, -2) : type(-1, -1);
}
while (1) {
const uint32_t word = bit >> bits_per_int_lg2;
const uint32_t mask = 1u << (bit & bits_per_int_mask);
- const uint32_t prev = Kokkos::atomic_fetch_or(buffer + word + 1, mask);
+ const uint32_t prev = Kokkos::atomic_fetch_or(
+ const_cast<uint32_t *>(buffer) + word + 1, mask);
if (!(prev & mask)) {
// Successfully claimed 'result.first' by
// when is full at the atomic_fetch_add(+1)
// then a release occurs before the atomic_fetch_add(-1).
- const uint32_t state = (uint32_t)Kokkos::atomic_fetch_add(
- reinterpret_cast<volatile int *>(buffer), 1);
+ const uint32_t state =
+ Kokkos::atomic_fetch_add(const_cast<uint32_t *>(buffer), 1);
const uint32_t state_error = state_header != (state & state_header_mask);
const uint32_t state_bit_used = state & state_used_mask;
if (state_error || (bit_bound <= state_bit_used)) {
- Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+ Kokkos::atomic_fetch_sub(const_cast<uint32_t *>(buffer), 1);
return state_error ? type(-2, -2) : type(-1, -1);
}
while (1) {
const uint32_t word = bit >> bits_per_int_lg2;
const uint32_t mask = 1u << (bit & bits_per_int_mask);
- const uint32_t prev = Kokkos::atomic_fetch_or(buffer + word + 1, mask);
+ const uint32_t prev = Kokkos::atomic_fetch_or(
+ const_cast<uint32_t *>(buffer) + word + 1, mask);
if (!(prev & mask)) {
// Successfully claimed 'result.first' by
}
const uint32_t mask = 1u << (bit & bits_per_int_mask);
- const uint32_t prev =
- Kokkos::atomic_fetch_and(buffer + (bit >> bits_per_int_lg2) + 1, ~mask);
+ const uint32_t prev = Kokkos::atomic_fetch_and(
+ const_cast<uint32_t *>(buffer) + (bit >> bits_per_int_lg2) + 1, ~mask);
if (!(prev & mask)) {
return -1;
Kokkos::memory_fence();
const int count =
- Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+ Kokkos::atomic_fetch_sub(const_cast<uint32_t *>(buffer), 1);
// Flush the store-release
Kokkos::memory_fence();
}
const uint32_t mask = 1u << (bit & bits_per_int_mask);
- const uint32_t prev =
- Kokkos::atomic_fetch_or(buffer + (bit >> bits_per_int_lg2) + 1, mask);
+ const uint32_t prev = Kokkos::atomic_fetch_or(
+ const_cast<uint32_t *>(buffer) + (bit >> bits_per_int_lg2) + 1, mask);
if (!(prev & mask)) {
return -1;
Kokkos::memory_fence();
const int count =
- Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+ Kokkos::atomic_fetch_sub(const_cast<uint32_t *>(buffer), 1);
return (count & state_used_mask) - 1;
}
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#include <impl/Kokkos_ParseCommandLineArgumentsAndEnvironmentVariables.hpp>
#include <impl/Kokkos_DeviceManagement.hpp>
#include <impl/Kokkos_ExecSpaceManager.hpp>
+#include <impl/Kokkos_CPUDiscovery.hpp>
#include <algorithm>
#include <cctype>
KOKKOS_IMPL_COMBINE_SETTING(num_threads);
KOKKOS_IMPL_COMBINE_SETTING(map_device_id_by);
KOKKOS_IMPL_COMBINE_SETTING(device_id);
- KOKKOS_IMPL_COMBINE_SETTING(num_devices);
- KOKKOS_IMPL_COMBINE_SETTING(skip_device);
KOKKOS_IMPL_COMBINE_SETTING(disable_warnings);
+ KOKKOS_IMPL_COMBINE_SETTING(print_configuration);
KOKKOS_IMPL_COMBINE_SETTING(tune_internals);
KOKKOS_IMPL_COMBINE_SETTING(tools_help);
KOKKOS_IMPL_COMBINE_SETTING(tools_libs);
int get_device_count() {
#if defined(KOKKOS_ENABLE_CUDA)
- return Kokkos::Cuda::detect_device_count();
+ int count;
+ KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceCount(&count));
+ return count;
#elif defined(KOKKOS_ENABLE_HIP)
- return Kokkos::Experimental::HIP::detect_device_count();
+ int count;
+ KOKKOS_IMPL_HIP_SAFE_CALL(hipGetDeviceCount(&count));
+ return count;
#elif defined(KOKKOS_ENABLE_SYCL)
- return sycl::device::get_devices(sycl::info::device_type::gpu).size();
+ return Kokkos::Impl::get_sycl_devices().size();
#elif defined(KOKKOS_ENABLE_OPENACC)
return acc_get_num_devices(
Kokkos::Experimental::Impl::OpenACC_Traits::dev_type);
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
+ return omp_get_num_devices();
#else
Kokkos::abort("implementation bug");
return -1;
} // namespace
+std::vector<int> const& Kokkos::Impl::get_visible_devices() {
+ static auto devices = get_visible_devices(get_device_count());
+ return devices;
+}
+
+[[nodiscard]] int Kokkos::device_id() noexcept {
+#if defined(KOKKOS_ENABLE_CUDA)
+ int device = Cuda().cuda_device();
+#elif defined(KOKKOS_ENABLE_HIP)
+ int device = HIP().hip_device();
+#elif defined(KOKKOS_ENABLE_OPENACC)
+ int device = Experimental::OpenACC().acc_device_number();
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
+ int device = omp_get_default_device(); // FIXME_OPENMPTARGET
+#elif defined(KOKKOS_ENABLE_SYCL)
+ int device = Impl::SYCLInternal::m_syclDev;
+#else
+ int device = -1;
+ return device;
+#endif
+ auto const& visible_devices = Impl::get_visible_devices();
+ for (std::size_t i = 0; i < visible_devices.size(); ++i) {
+ if (visible_devices[i] == device) {
+ return i;
+ }
+ }
+ Kokkos::abort("Unexpected error: cannot determine device id");
+ return -1;
+}
+
+[[nodiscard]] int Kokkos::num_devices() noexcept {
+ if constexpr (std::is_same_v<DefaultExecutionSpace,
+ DefaultHostExecutionSpace>) {
+ return -1; // no GPU backend enabled
+ } else {
+ return Impl::get_visible_devices().size();
+ }
+}
+
+[[nodiscard]] int Kokkos::num_threads() noexcept {
+ return DefaultHostExecutionSpace().concurrency();
+}
+
Kokkos::Impl::ExecSpaceManager& Kokkos::Impl::ExecSpaceManager::get_instance() {
static ExecSpaceManager space_initializer = {};
return space_initializer;
}
}
-int Kokkos::Impl::get_ctest_gpu(const char* local_rank_str) {
+int Kokkos::Impl::get_ctest_gpu(int local_rank) {
auto const* ctest_kokkos_device_type =
std::getenv("CTEST_KOKKOS_DEVICE_TYPE");
if (!ctest_kokkos_device_type) {
// Make sure rank is within bounds of resource groups specified by CTest
auto resource_group_count = std::stoi(ctest_resource_group_count_str);
- auto local_rank = std::stoi(local_rank_str);
+ assert(local_rank >= 0);
if (local_rank >= resource_group_count) {
std::ostringstream ss;
ss << "Error: local rank " << local_rank
<< " is outside the bounds of resource groups provided by CTest. Raised"
<< " by Kokkos::Impl::get_ctest_gpu().";
- throw_runtime_exception(ss.str());
+ abort(ss.str().c_str());
}
// Get the resource types allocated to this resource group
std::ostringstream ss;
ss << "Error: " << ctest_resource_group_name << " is not specified. Raised"
<< " by Kokkos::Impl::get_ctest_gpu().";
- throw_runtime_exception(ss.str());
+ abort(ss.str().c_str());
}
// Look for the device type specified in CTEST_KOKKOS_DEVICE_TYPE
ss << "Error: device type '" << ctest_kokkos_device_type
<< "' not included in " << ctest_resource_group_name
<< ". Raised by Kokkos::Impl::get_ctest_gpu().";
- throw_runtime_exception(ss.str());
+ abort(ss.str().c_str());
}
// Get the device ID
std::ostringstream ss;
ss << "Error: " << ctest_resource_group_id_name
<< " is not specified. Raised by Kokkos::Impl::get_ctest_gpu().";
- throw_runtime_exception(ss.str());
+ abort(ss.str().c_str());
}
auto const* comma = std::strchr(resource_str, ',');
std::ostringstream ss;
ss << "Error: invalid value of " << ctest_resource_group_id_name << ": '"
<< resource_str << "'. Raised by Kokkos::Impl::get_ctest_gpu().";
- throw_runtime_exception(ss.str());
+ abort(ss.str().c_str());
}
std::string id(resource_str + 3, comma - resource_str - 3);
return std::stoi(id.c_str());
}
-std::vector<int> Kokkos::Impl::get_visible_devices(
- Kokkos::InitializationSettings const& settings, int device_count) {
+std::vector<int> Kokkos::Impl::get_visible_devices(int device_count) {
std::vector<int> visible_devices;
char* env_visible_devices = std::getenv("KOKKOS_VISIBLE_DEVICES");
if (env_visible_devices) {
}
}
} else {
- int num_devices =
- settings.has_num_devices() ? settings.get_num_devices() : device_count;
- if (num_devices > device_count) {
- std::stringstream ss;
- ss << "Error: Specified number of devices '" << num_devices
- << "' exceeds the actual number of GPUs available for execution '"
- << device_count << "'."
- << " Raised by Kokkos::initialize().\n";
- Kokkos::abort(ss.str().c_str());
- }
- for (int i = 0; i < num_devices; ++i) {
+ for (int i = 0; i < device_count; ++i) {
visible_devices.push_back(i);
}
- if (settings.has_skip_device()) {
- if (visible_devices.size() == 1 && settings.get_skip_device() == 0) {
- Kokkos::abort(
- "Error: skipping the only GPU available for execution.\n"
- " Raised by Kokkos::initialize().\n");
- }
- visible_devices.erase(
- std::remove(visible_devices.begin(), visible_devices.end(),
- settings.get_skip_device()),
- visible_devices.end());
- }
}
if (visible_devices.empty()) {
Kokkos::abort(
return visible_devices;
}
-int Kokkos::Impl::get_gpu(const InitializationSettings& settings) {
- std::vector<int> visible_devices =
- get_visible_devices(settings, get_device_count());
- int const num_devices = visible_devices.size();
+std::optional<int> Kokkos::Impl::get_gpu(
+ const InitializationSettings& settings) {
+ std::vector<int> visible_devices = get_visible_devices(get_device_count());
+ int const num_devices = visible_devices.size();
// device_id is provided
if (settings.has_device_id()) {
int const id = settings.get_device_id();
Kokkos::abort("implementation bug");
}
- auto const* local_rank_str =
- std::getenv("OMPI_COMM_WORLD_LOCAL_RANK"); // OpenMPI
- if (!local_rank_str)
- local_rank_str = std::getenv("MV2_COMM_WORLD_LOCAL_RANK"); // MVAPICH2
- if (!local_rank_str) local_rank_str = std::getenv("SLURM_LOCALID"); // SLURM
+ int const mpi_local_rank = mpi_local_rank_on_node();
- // use first GPU available for execution if unable to detect local MPI rank
- if (!local_rank_str) {
+ // if unable to detect local MPI rank return nullopt to delegate device
+ // selection to the backend
+ if (mpi_local_rank < 0) {
if (settings.has_map_device_id_by()) {
std::cerr << "Warning: unable to detect local MPI rank."
<< " Falling back to the first GPU available for execution."
<< " Raised by Kokkos::initialize()." << std::endl;
}
- return visible_devices[0];
+ return std::nullopt;
}
// use device assigned by CTest when resource allocation is activated
if (std::getenv("CTEST_KOKKOS_DEVICE_TYPE") &&
std::getenv("CTEST_RESOURCE_GROUP_COUNT")) {
- return get_ctest_gpu(local_rank_str);
+ return get_ctest_gpu(mpi_local_rank);
}
- return visible_devices[std::stoi(local_rank_str) % visible_devices.size()];
+ return visible_devices[mpi_local_rank % visible_devices.size()];
}
namespace {
void initialize_backends(const Kokkos::InitializationSettings& settings) {
-// This is an experimental setting
-// For KNL in Flat mode this variable should be set, so that
-// memkind allocates high bandwidth memory correctly.
-#ifdef KOKKOS_ENABLE_HBWSPACE
- setenv("MEMKIND_HBW_NODES", "1", 0);
-#endif
-
Kokkos::Impl::ExecSpaceManager::get_instance().initialize_spaces(settings);
}
std::to_string(KOKKOS_COMPILER_GNU));
declare_configuration_metadata("tools_only", "compiler_family", "gnu");
#endif
-#ifdef KOKKOS_COMPILER_IBM
- declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_IBM",
- std::to_string(KOKKOS_COMPILER_IBM));
- declare_configuration_metadata("tools_only", "compiler_family", "ibm");
-#endif
#ifdef KOKKOS_COMPILER_INTEL
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_INTEL",
std::to_string(KOKKOS_COMPILER_INTEL));
declare_configuration_metadata("tools_only", "compiler_family", "intel");
#endif
+#ifdef KOKKOS_COMPILER_INTEL_LLVM
+ declare_configuration_metadata("compiler_version",
+ "KOKKOS_COMPILER_INTEL_LLVM",
+ std::to_string(KOKKOS_COMPILER_INTEL_LLVM));
+ declare_configuration_metadata("tools_only", "compiler_family", "intel_llvm");
+#endif
#ifdef KOKKOS_COMPILER_NVCC
declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_NVCC",
std::to_string(KOKKOS_COMPILER_NVCC));
declare_configuration_metadata("tools_only", "compiler_family", "nvcc");
#endif
-#ifdef KOKKOS_COMPILER_PGI
- declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_PGI",
- std::to_string(KOKKOS_COMPILER_PGI));
+#ifdef KOKKOS_COMPILER_NVHPC
+ declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_NVHPC",
+ std::to_string(KOKKOS_COMPILER_NVHPC));
declare_configuration_metadata("tools_only", "compiler_family", "pgi");
#endif
#ifdef KOKKOS_COMPILER_MSVC
declare_configuration_metadata("tools_only", "compiler_family", "msvc");
#endif
-#ifdef KOKKOS_ENABLE_GNU_ATOMICS
- declare_configuration_metadata("atomics", "KOKKOS_ENABLE_GNU_ATOMICS", "yes");
-#else
- declare_configuration_metadata("atomics", "KOKKOS_ENABLE_GNU_ATOMICS", "no");
-#endif
-#ifdef KOKKOS_ENABLE_INTEL_ATOMICS
- declare_configuration_metadata("atomics", "KOKKOS_ENABLE_INTEL_ATOMICS",
- "yes");
-#else
- declare_configuration_metadata("atomics", "KOKKOS_ENABLE_INTEL_ATOMICS",
- "no");
-#endif
-#ifdef KOKKOS_ENABLE_WINDOWS_ATOMICS
- declare_configuration_metadata("atomics", "KOKKOS_ENABLE_WINDOWS_ATOMICS",
- "yes");
-#else
- declare_configuration_metadata("atomics", "KOKKOS_ENABLE_WINDOWS_ATOMICS",
- "no");
-#endif
-
#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_IVDEP",
"yes");
declare_configuration_metadata("vectorization",
"KOKKOS_ENABLE_PRAGMA_LOOPCOUNT", "no");
#endif
-#ifdef KOKKOS_ENABLE_PRAGMA_SIMD
- declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_SIMD",
- "yes");
-#else
- declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_SIMD",
- "no");
-#endif
#ifdef KOKKOS_ENABLE_PRAGMA_UNROLL
declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_UNROLL",
"yes");
"no");
#endif
-#ifdef KOKKOS_ENABLE_HBWSPACE
- declare_configuration_metadata("memory", "KOKKOS_ENABLE_HBWSPACE", "yes");
-#else
- declare_configuration_metadata("memory", "KOKKOS_ENABLE_HBWSPACE", "no");
-#endif
-#ifdef KOKKOS_ENABLE_INTEL_MM_ALLOC
- declare_configuration_metadata("memory", "KOKKOS_ENABLE_INTEL_MM_ALLOC",
- "yes");
-#else
- declare_configuration_metadata("memory", "KOKKOS_ENABLE_INTEL_MM_ALLOC",
- "no");
-#endif
-
#ifdef KOKKOS_ENABLE_ASM
declare_configuration_metadata("options", "KOKKOS_ENABLE_ASM", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_ASM", "no");
#endif
-#ifdef KOKKOS_ENABLE_CXX14
- declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX14", "yes");
-#else
- declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX14", "no");
-#endif
#ifdef KOKKOS_ENABLE_CXX17
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX17", "yes");
#else
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX20", "no");
#endif
+#ifdef KOKKOS_ENABLE_CXX23
+ declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX23", "yes");
+#else
+ declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX23", "no");
+#endif
+#ifdef KOKKOS_ENABLE_CXX26
+ declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX26", "yes");
+#else
+ declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX26", "no");
+#endif
#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
declare_configuration_metadata("options", "KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK",
"yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_HWLOC", "no");
#endif
-#ifdef KOKKOS_ENABLE_LIBRT
- declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBRT", "yes");
-#else
- declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBRT", "no");
-#endif
#ifdef KOKKOS_ENABLE_LIBDL
declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBDL", "yes");
#else
declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBDL", "no");
#endif
+
declare_configuration_metadata("architecture", "Default Device",
- typeid(Kokkos::DefaultExecutionSpace).name());
+ Kokkos::DefaultExecutionSpace::name());
+
+#if defined(KOKKOS_ARCH_A64FX)
+ declare_configuration_metadata("architecture", "CPU architecture", "A64FX");
+#elif defined(KOKKOS_ARCH_AMDAVX)
+ declare_configuration_metadata("architecture", "CPU architecture", "AMDAVX");
+#elif defined(KOKKOS_ARCH_ARMV80)
+ declare_configuration_metadata("architecture", "CPU architecture", "ARMV80");
+#elif defined(KOKKOS_ARCH_ARMV81)
+ declare_configuration_metadata("architecture", "CPU architecture", "ARMV81");
+#elif defined(KOKKOS_ARCH_ARMV8_THUNDERX)
+ declare_configuration_metadata("architecture", "CPU architecture",
+ "ARMV8_THUNDERX");
+#elif defined(KOKKOS_ARCH_ARMV8_THUNDERX2)
+ declare_configuration_metadata("architecture", "CPU architecture",
+ "ARMV8_THUNDERX2");
+#elif defined(KOKKOS_ARCH_BDW)
+ declare_configuration_metadata("architecture", "CPU architecture", "BDW");
+#elif defined(KOKKOS_ARCH_HSW)
+ declare_configuration_metadata("architecture", "CPU architecture", "HSW");
+#elif defined(KOKKOS_ARCH_ICL)
+ declare_configuration_metadata("architecture", "CPU architecture", "ICL");
+#elif defined(KOKKOS_ARCH_ICX)
+ declare_configuration_metadata("architecture", "CPU architecture", "ICX");
+#elif defined(KOKKOS_ARCH_KNC)
+ declare_configuration_metadata("architecture", "CPU architecture", "KNC");
+#elif defined(KOKKOS_ARCH_KNL)
+ declare_configuration_metadata("architecture", "CPU architecture", "KNL");
+#elif defined(KOKKOS_ARCH_NATIVE)
+ declare_configuration_metadata("architecture", "CPU architecture", "NATIVE");
+#elif defined(KOKKOS_ARCH_POWER8)
+ declare_configuration_metadata("architecture", "CPU architecture", "POWER8");
+#elif defined(KOKKOS_ARCH_POWER9)
+ declare_configuration_metadata("architecture", "CPU architecture", "POWER9");
+#elif defined(KOKKOS_ARCH_SKL)
+ declare_configuration_metadata("architecture", "CPU architecture", "SKL");
+#elif defined(KOKKOS_ARCH_SKX)
+ declare_configuration_metadata("architecture", "CPU architecture", "SKX");
+#elif defined(KOKKOS_ARCH_SNB)
+ declare_configuration_metadata("architecture", "CPU architecture", "SNB");
+#elif defined(KOKKOS_ARCH_SPR)
+ declare_configuration_metadata("architecture", "CPU architecture", "SPR");
+#elif defined(KOKKOS_ARCH_AMD_ZEN)
+ declare_configuration_metadata("architecture", "CPU architecture", "AMD_ZEN");
+#elif defined(KOKKOS_ARCH_AMD_ZEN2)
+ declare_configuration_metadata("architecture", "CPU architecture",
+ "AMD_ZEN2");
+#elif defined(KOKKOS_ARCH_AMD_ZEN3)
+ declare_configuration_metadata("architecture", "CPU architecture",
+ "AMD_ZEN3");
+#elif defined(KOKKOS_ARCH_RISCV_SG2042)
+ declare_configuration_metadata("architecture", "CPU architecture",
+ "SG2042 (RISC-V)")
+#elif defined(KOKKOS_ARCH_RISCV_RVA22V)
+ declare_configuration_metadata("architecture", "CPU architecture",
+ "RVA22V (RISC-V)")
+#else
+ declare_configuration_metadata("architecture", "CPU architecture", "none");
+#endif
+
+#if defined(KOKKOS_ARCH_INTEL_GEN)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "INTEL_GEN");
+#elif defined(KOKKOS_ARCH_INTEL_DG1)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "INTEL_DG1");
+#elif defined(KOKKOS_ARCH_INTEL_GEN9)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "INTEL_GEN9");
+#elif defined(KOKKOS_ARCH_INTEL_GEN11)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "INTEL_GEN11");
+#elif defined(KOKKOS_ARCH_INTEL_GEN12LP)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "INTEL_GEN12LP");
+#elif defined(KOKKOS_ARCH_INTEL_XEHP)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "INTEL_XEHP");
+#elif defined(KOKKOS_ARCH_INTEL_PVC)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "INTEL_PVC");
+
+#elif defined(KOKKOS_ARCH_KEPLER30)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "KEPLER30");
+#elif defined(KOKKOS_ARCH_KEPLER32)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "KEPLER32");
+#elif defined(KOKKOS_ARCH_KEPLER35)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "KEPLER35");
+#elif defined(KOKKOS_ARCH_KEPLER37)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "KELPER37");
+#elif defined(KOKKOS_ARCH_MAXWELL50)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "MAXWELL50");
+#elif defined(KOKKOS_ARCH_MAXWELL52)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "MAXWELL52");
+#elif defined(KOKKOS_ARCH_MAXWELL53)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "MAXWELL53");
+#elif defined(KOKKOS_ARCH_PASCAL60)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "PASCAL60");
+#elif defined(KOKKOS_ARCH_PASCAL61)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "PASCAL61");
+#elif defined(KOKKOS_ARCH_VOLTA70)
+ declare_configuration_metadata("architecture", "GPU architecture", "VOLTA70");
+#elif defined(KOKKOS_ARCH_VOLTA72)
+ declare_configuration_metadata("architecture", "GPU architecture", "VOLTA72");
+#elif defined(KOKKOS_ARCH_TURING75)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "TURING75");
+#elif defined(KOKKOS_ARCH_AMPERE80)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMPERE80");
+#elif defined(KOKKOS_ARCH_AMPERE86)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMPERE86");
+#elif defined(KOKKOS_ARCH_ADA89)
+ declare_configuration_metadata("architecture", "GPU architecture", "ADA89");
+#elif defined(KOKKOS_ARCH_HOPPER90)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "HOPPER90");
+#elif defined(KOKKOS_ARCH_AMD_GFX906)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMD_GFX906");
+#elif defined(KOKKOS_ARCH_AMD_GFX908)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMD_GFX908");
+#elif defined(KOKKOS_ARCH_AMD_GFX90A)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMD_GFX90A");
+#elif defined(KOKKOS_ARCH_AMD_GFX1030)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMD_GFX1030");
+#elif defined(KOKKOS_ARCH_AMD_GFX1100)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMD_GFX1100");
+#elif defined(KOKKOS_ARCH_AMD_GFX1103)
+ declare_configuration_metadata("architecture", "GPU architecture",
+ "AMD_GFX1103");
+
+#else
+ declare_configuration_metadata("architecture", "GPU architecture", "none");
+#endif
+
+#ifdef KOKKOS_IMPL_32BIT
+ declare_configuration_metadata("architecture", "platform", "32bit");
+#else
+ declare_configuration_metadata("architecture", "platform", "64bit");
+#endif
}
void post_initialize_internal(const Kokkos::InitializationSettings& settings) {
}
void initialize_internal(const Kokkos::InitializationSettings& settings) {
+ // The tool initialization is only called in post_initialize_internal.
+ // Pausing tools here, so that if someone has set callbacks programmatically
+ // these callbacks are not called inside the backend initialization, before
+ // the tool initialization happened.
+ Kokkos::Tools::Experimental::pause_tools();
pre_initialize_internal(settings);
initialize_backends(settings);
+ Kokkos::Tools::Experimental::resume_tools();
post_initialize_internal(settings);
}
-void finalize_internal() {
- typename decltype(finalize_hooks)::size_type numSuccessfulCalls = 0;
+// declared noexcept such that std::terminate is called if any of the registered
+// function throws
+void call_registered_finalize_hook_functions() noexcept {
while (!finalize_hooks.empty()) {
- auto f = finalize_hooks.top();
- try {
- f();
- } catch (...) {
- std::cerr << "Kokkos::finalize: A finalize hook (set via "
- "Kokkos::push_finalize_hook) threw an exception that it did "
- "not catch."
- " Per std::atexit rules, this results in std::terminate. "
- "This is "
- "finalize hook number "
- << numSuccessfulCalls
- << " (1-based indexing) "
- "out of "
- << finalize_hooks.size()
- << " to call. Remember that "
- "Kokkos::finalize calls finalize hooks in reverse order "
- "from how they "
- "were pushed."
- << std::endl;
- std::terminate();
- }
+ auto const& func = finalize_hooks.top();
+ func();
finalize_hooks.pop();
- ++numSuccessfulCalls;
}
+}
+void pre_finalize_internal() {
+ call_registered_finalize_hook_functions();
Kokkos::Profiling::finalize();
+}
- Kokkos::Impl::ExecSpaceManager::get_instance().finalize_spaces();
-
+void post_finalize_internal() {
g_is_initialized = false;
g_is_finalized = true;
g_show_warnings = true;
int num_threads;
int device_id;
- int num_devices; // deprecated
- int skip_device; // deprecated
std::string map_device_id_by;
bool disable_warnings;
bool print_configuration;
bool tune_internals;
- auto get_flag = [](std::string s) -> std::string {
- return s.erase(s.find('='));
- };
-
bool help_flag = false;
int iarg = 0;
while (iarg < argc) {
bool remove_flag = false;
- if (check_arg(argv[iarg], "--kokkos-numa") ||
- check_arg(argv[iarg], "--numa")) {
- warn_deprecated_command_line_argument(get_flag(argv[iarg]));
- // remove flag if prefixed with '--kokkos-'
- remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
- } else if (check_arg_int(argv[iarg], "--kokkos-num-threads", num_threads) ||
- check_arg_int(argv[iarg], "--num-threads", num_threads) ||
- check_arg_int(argv[iarg], "--kokkos-threads", num_threads) ||
- check_arg_int(argv[iarg], "--threads", num_threads)) {
- if (get_flag(argv[iarg]) != "--kokkos-num-threads") {
- warn_deprecated_command_line_argument(get_flag(argv[iarg]),
- "--kokkos-num-threads");
- }
+ if (check_arg_int(argv[iarg], "--kokkos-num-threads", num_threads)) {
if (!is_valid_num_threads(num_threads)) {
std::stringstream ss;
ss << "Error: command line argument '" << argv[iarg] << "' is invalid."
Kokkos::abort(ss.str().c_str());
}
settings.set_num_threads(num_threads);
- remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
- } else if (check_arg_int(argv[iarg], "--kokkos-device-id", device_id) ||
- check_arg_int(argv[iarg], "--device-id", device_id) ||
- check_arg_int(argv[iarg], "--kokkos-device", device_id) ||
- check_arg_int(argv[iarg], "--device", device_id)) {
- if (get_flag(argv[iarg]) != "--kokkos-device-id") {
- warn_deprecated_command_line_argument(get_flag(argv[iarg]),
- "--kokkos-device-id");
- }
+ remove_flag = true;
+ } else if (check_arg_int(argv[iarg], "--kokkos-device-id", device_id)) {
if (!is_valid_device_id(device_id)) {
std::stringstream ss;
ss << "Error: command line argument '" << argv[iarg] << "' is invalid."
Kokkos::abort(ss.str().c_str());
}
settings.set_device_id(device_id);
- remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
- } else if (check_arg(argv[iarg], "--kokkos-num-devices") ||
- check_arg(argv[iarg], "--num-devices") ||
- check_arg(argv[iarg], "--kokkos-ndevices") ||
- check_arg(argv[iarg], "--ndevices")) {
- if (check_arg(argv[iarg], "--num-devices")) {
- warn_deprecated_command_line_argument("--num-devices",
- "--kokkos-num-devices");
- }
- if (check_arg(argv[iarg], "--ndevices")) {
- warn_deprecated_command_line_argument("--ndevices",
- "--kokkos-num-devices");
- }
- if (check_arg(argv[iarg], "--kokkos-ndevices")) {
- warn_deprecated_command_line_argument("--kokkos-ndevices",
- "--kokkos-num-devices");
- }
- warn_deprecated_command_line_argument(
- "--kokkos-num-devices", "--kokkos-map-device-id-by=mpi_rank");
- // Find the number of device (expecting --device=XX)
- if (!((strncmp(argv[iarg], "--kokkos-num-devices=", 21) == 0) ||
- (strncmp(argv[iarg], "--num-devices=", 14) == 0) ||
- (strncmp(argv[iarg], "--kokkos-ndevices=", 18) == 0) ||
- (strncmp(argv[iarg], "--ndevices=", 11) == 0)))
- throw_runtime_exception(
- "Error: expecting an '=INT[,INT]' after command line argument "
- "'--kokkos-num-devices'."
- " Raised by Kokkos::initialize().");
-
- char* num1 = strchr(argv[iarg], '=') + 1;
- char* num2 = strpbrk(num1, ",");
- int num1_len = num2 == nullptr ? strlen(num1) : num2 - num1;
- char* num1_only = new char[num1_len + 1];
- strncpy(num1_only, num1, num1_len);
- num1_only[num1_len] = '\0';
-
- if (!is_unsigned_int(num1_only) || (strlen(num1_only) == 0)) {
- throw_runtime_exception(
- "Error: expecting an integer number after command line argument "
- "'--kokkos-num-devices'."
- " Raised by Kokkos::initialize().");
- }
- if (check_arg(argv[iarg], "--kokkos-num-devices") ||
- check_arg(argv[iarg], "--kokkos-ndevices")) {
- num_devices = std::stoi(num1_only);
- settings.set_num_devices(num_devices);
- settings.set_map_device_id_by("mpi_rank");
- }
- delete[] num1_only;
-
- if (num2 != nullptr) {
- if ((!is_unsigned_int(num2 + 1)) || (strlen(num2) == 1))
- throw_runtime_exception(
- "Error: expecting an integer number after command line argument "
- "'--kokkos-num-devices=XX,'."
- " Raised by Kokkos::initialize().");
-
- if (check_arg(argv[iarg], "--kokkos-num-devices") ||
- check_arg(argv[iarg], "--kokkos-ndevices")) {
- skip_device = std::stoi(num2 + 1);
- settings.set_skip_device(skip_device);
- }
- }
- remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
+ remove_flag = true;
} else if (check_arg_bool(argv[iarg], "--kokkos-disable-warnings",
disable_warnings)) {
settings.set_disable_warnings(disable_warnings);
Tools::Impl::parse_environment_variables(tools_init_arguments);
if (init_result.result ==
Tools::Impl::InitializationStatus::environment_argument_mismatch) {
- Impl::throw_runtime_exception(init_result.error_message);
+ Kokkos::abort(init_result.error_message.c_str());
}
combine(settings, tools_init_arguments);
- if (std::getenv("KOKKOS_NUMA")) {
- warn_deprecated_environment_variable("KOKKOS_NUMA");
- }
int num_threads;
if (check_env_int("KOKKOS_NUM_THREADS", num_threads)) {
if (!is_valid_num_threads(num_threads)) {
}
settings.set_device_id(device_id);
}
- int num_devices;
- int rand_devices;
- bool has_num_devices = check_env_int("KOKKOS_NUM_DEVICES", num_devices);
- bool has_rand_devices = check_env_int("KOKKOS_RAND_DEVICES", rand_devices);
- if (has_rand_devices && has_num_devices) {
- Impl::throw_runtime_exception(
- "Error: cannot specify both KOKKOS_NUM_DEVICES and "
- "KOKKOS_RAND_DEVICES."
- " Raised by Kokkos::initialize().");
- }
- if (has_num_devices) {
- warn_deprecated_environment_variable("KOKKOS_NUM_DEVICES",
- "KOKKOS_MAP_DEVICE_ID_BY=mpi_rank");
- settings.set_map_device_id_by("mpi_rank");
- settings.set_num_devices(num_devices);
- }
- if (has_rand_devices) {
- warn_deprecated_environment_variable("KOKKOS_RAND_DEVICES",
- "KOKKOS_MAP_DEVICE_ID_BY=random");
- settings.set_map_device_id_by("random");
- settings.set_num_devices(rand_devices);
- }
- if (has_num_devices || has_rand_devices) {
- int skip_device;
- if (check_env_int("KOKKOS_SKIP_DEVICE", skip_device)) {
- settings.set_skip_device(skip_device);
- }
- }
bool disable_warnings;
if (check_env_bool("KOKKOS_DISABLE_WARNINGS", disable_warnings)) {
settings.set_disable_warnings(disable_warnings);
}
//----------------------------------------------------------------------------
+namespace {
+bool kokkos_initialize_was_called() {
+ return Kokkos::is_initialized() || Kokkos::is_finalized();
+}
+bool kokkos_finalize_was_called() { return Kokkos::is_finalized(); }
+} // namespace
void Kokkos::initialize(int& argc, char* argv[]) {
+ if (kokkos_initialize_was_called()) {
+ Kokkos::abort(
+ "Error: Kokkos::initialize() has already been called."
+ " Kokkos can be initialized at most once.\n");
+ }
InitializationSettings settings;
Impl::parse_environment_variables(settings);
Impl::parse_command_line_arguments(argc, argv, settings);
}
void Kokkos::initialize(InitializationSettings const& settings) {
+ if (kokkos_initialize_was_called()) {
+ Kokkos::abort(
+ "Error: Kokkos::initialize() has already been called."
+ " Kokkos can be initialized at most once.\n");
+ }
InitializationSettings tmp;
Impl::parse_environment_variables(tmp);
combine(tmp, settings);
post_initialize_internal(settings);
}
+void Kokkos::Impl::pre_finalize() { pre_finalize_internal(); }
+
+void Kokkos::Impl::post_finalize() { post_finalize_internal(); }
+
void Kokkos::push_finalize_hook(std::function<void()> f) {
finalize_hooks.push(f);
}
-void Kokkos::finalize() { finalize_internal(); }
-
-#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
-KOKKOS_DEPRECATED void Kokkos::finalize_all() { finalize_internal(); }
-#endif
+void Kokkos::finalize() {
+ if (!kokkos_initialize_was_called()) {
+ Kokkos::abort(
+ "Error: Kokkos::finalize() may only be called after Kokkos has been "
+ "initialized.\n");
+ }
+ if (kokkos_finalize_was_called()) {
+ Kokkos::abort("Error: Kokkos::finalize() has already been called.\n");
+ }
+ pre_finalize_internal();
+ Impl::ExecSpaceManager::get_instance().finalize_spaces();
+ post_finalize_internal();
+}
#ifdef KOKKOS_COMPILER_INTEL
void Kokkos::fence() { fence("Kokkos::fence: Unnamed Global Fence"); }
void print_helper(std::ostream& os,
const std::map<std::string, std::string>& print_me) {
for (const auto& kv : print_me) {
- os << kv.first << ": " << kv.second << '\n';
+ os << " " << kv.first << ": " << kv.second << '\n';
}
}
} // namespace
Impl::ExecSpaceManager::get_instance().print_configuration(os, verbose);
}
-KOKKOS_ATTRIBUTE_NODISCARD bool Kokkos::is_initialized() noexcept {
+[[nodiscard]] bool Kokkos::is_initialized() noexcept {
return g_is_initialized;
}
-KOKKOS_ATTRIBUTE_NODISCARD bool Kokkos::is_finalized() noexcept {
- return g_is_finalized;
-}
+[[nodiscard]] bool Kokkos::is_finalized() noexcept { return g_is_finalized; }
bool Kokkos::show_warnings() noexcept { return g_show_warnings; }
bool Kokkos::tune_internals() noexcept { return g_tune_internals; }
-
-namespace Kokkos {
-
-#ifdef KOKKOS_COMPILER_PGI
-namespace Impl {
-// Bizzarely, an extra jump instruction forces the PGI compiler to not have a
-// bug related to (probably?) empty base optimization and/or aggregate
-// construction.
-void _kokkos_pgi_compiler_bug_workaround() {}
-} // end namespace Impl
-#endif
-} // namespace Kokkos
-
-Kokkos::Impl::InitializationSettingsHelper<std::string>::storage_type const
- Kokkos::Impl::InitializationSettingsHelper<std::string>::unspecified =
- "some string we don't expect user would ever provide";
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_HOST_GRAPHNODEKERNEL_HPP
#define KOKKOS_KOKKOS_HOST_GRAPHNODEKERNEL_HPP
// TODO @graphs decide if this should use vtable or intrusive erasure via
// function pointers like in the rest of the graph interface
virtual void execute_kernel() = 0;
+
+ GraphNodeKernelDefaultImpl() = default;
+
+ explicit GraphNodeKernelDefaultImpl(ExecutionSpace exec)
+ : m_execution_space(std::move(exec)) {}
+
+ ExecutionSpace m_execution_space;
};
// TODO Indicate that this kernel specialization is only for the Host somehow?
template <class ExecutionSpace, class PolicyType, class Functor,
class PatternTag, class... Args>
class GraphNodeKernelImpl
- : public PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
- Args..., ExecutionSpace>::type,
- public GraphNodeKernelDefaultImpl<ExecutionSpace> {
+ : public GraphNodeKernelDefaultImpl<ExecutionSpace>,
+ public PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
+ Args..., ExecutionSpace>::type {
public:
using base_t =
typename PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
// TODO @graph kernel name info propagation
template <class PolicyDeduced, class... ArgsDeduced>
- GraphNodeKernelImpl(std::string const&, ExecutionSpace const&,
- Functor arg_functor, PolicyDeduced&& arg_policy,
- ArgsDeduced&&... args)
- : base_t(std::move(arg_functor), (PolicyDeduced &&) arg_policy,
- (ArgsDeduced &&) args...),
- execute_kernel_vtable_base_t() {}
+ GraphNodeKernelImpl(std::string const &, ExecutionSpace const &,
+ Functor arg_functor, PolicyDeduced &&arg_policy,
+ ArgsDeduced &&...args)
+ : execute_kernel_vtable_base_t(arg_policy.space()),
+ base_t(std::move(arg_functor), (PolicyDeduced &&)arg_policy,
+ (ArgsDeduced &&)args...) {}
// FIXME @graph Forward through the instance once that works in the backends
template <class PolicyDeduced, class... ArgsDeduced>
- GraphNodeKernelImpl(ExecutionSpace const& ex, Functor arg_functor,
- PolicyDeduced&& arg_policy, ArgsDeduced&&... args)
+ GraphNodeKernelImpl(ExecutionSpace const &ex, Functor arg_functor,
+ PolicyDeduced &&arg_policy, ArgsDeduced &&...args)
: GraphNodeKernelImpl("", ex, std::move(arg_functor),
- (PolicyDeduced &&) arg_policy,
- (ArgsDeduced &&) args...) {}
+ (PolicyDeduced &&)arg_policy,
+ (ArgsDeduced &&)args...) {
+ // FIXME This constructor seem unused.
+ }
- void execute_kernel() final { this->base_t::execute(); }
+ void execute_kernel() override final { this->base_t::execute(); }
};
// </editor-fold> end GraphNodeKernelImpl }}}1
using is_graph_kernel = std::true_type;
};
using graph_kernel = GraphNodeAggregateKernelDefaultImpl;
- void execute_kernel() final {}
+ void execute_kernel() override final {}
};
} // end namespace Impl
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_HOST_GRAPHNODE_IMPL_HPP
#define KOKKOS_KOKKOS_HOST_GRAPHNODE_IMPL_HPP
GraphNodeBackendSpecificDetails(GraphNodeBackendSpecificDetails&&) noexcept =
delete;
- GraphNodeBackendSpecificDetails& operator =(
+ GraphNodeBackendSpecificDetails& operator=(
GraphNodeBackendSpecificDetails const&) = delete;
- GraphNodeBackendSpecificDetails& operator =(
+ GraphNodeBackendSpecificDetails& operator=(
GraphNodeBackendSpecificDetails&&) noexcept = delete;
~GraphNodeBackendSpecificDetails() = default;
m_is_aggregate = true;
}
+ // A node is awaitable if it can execute a kernel.
+ // A root node or an aggregate node cannot be waited for, because it does
+ // not launch anything.
+ bool awaitable() const { return (!m_is_root) && (!m_is_aggregate); }
+
+ // Retrieve the execution space instance that has been passed to
+ // the kernel at construction phase.
+ const ExecutionSpace& get_execution_space() const {
+ KOKKOS_EXPECTS(m_kernel_ptr != nullptr)
+ return m_kernel_ptr->m_execution_space;
+ }
+
void set_predecessor(
std::shared_ptr<GraphNodeBackendSpecificDetails<ExecutionSpace>>
arg_pred_impl) {
m_predecessors.push_back(std::move(arg_pred_impl));
}
- void execute_node() {
+ void execute_node(const ExecutionSpace& exec) {
// This node could have already been executed as the predecessor of some
// other
KOKKOS_EXPECTS(bool(m_kernel_ptr) || m_has_executed)
// supported semantics, but instinct I have feels like it should be...
m_has_executed = true;
for (auto const& predecessor : m_predecessors) {
- predecessor->execute_node();
+ predecessor->execute_node(exec);
}
+
+ // Before executing the kernel, be sure to fence the execution space
+ // instance of predecessors.
+ for (const auto& predecessor : m_predecessors) {
+ if (predecessor->awaitable() &&
+ predecessor->get_execution_space() != this->get_execution_space())
+ predecessor->get_execution_space().fence(
+ "Kokkos::DefaultGraphNode::execute_node: sync with predecessors");
+ }
+
m_kernel_ptr->execute_kernel();
}
KOKKOS_ENSURES(m_has_executed)
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HOST_GRAPH_IMPL_HPP
#define KOKKOS_HOST_GRAPH_IMPL_HPP
#include <impl/Kokkos_GraphImpl_fwd.hpp>
#include <impl/Kokkos_Default_Graph_fwd.hpp>
-#include <Kokkos_Serial.hpp>
-#include <Kokkos_OpenMP.hpp>
+#include <Serial/Kokkos_Serial.hpp>
+#include <OpenMP/Kokkos_OpenMP.hpp>
// FIXME @graph other backends?
#include <impl/Kokkos_OptionalRef.hpp>
//----------------------------------------------------------------------------
// <editor-fold desc="Constructors, destructor, and assignment"> {{{2
- // Not moveable or copyable; it spends its whole live as a shared_ptr in the
+ // Not movable or copyable; it spends its whole live as a shared_ptr in the
// Graph object
- GraphImpl() = default;
- GraphImpl(GraphImpl const&) = delete;
- GraphImpl(GraphImpl&&) = delete;
+ GraphImpl() = default;
+ GraphImpl(GraphImpl const&) = delete;
+ GraphImpl(GraphImpl&&) = delete;
GraphImpl& operator=(GraphImpl const&) = delete;
- GraphImpl& operator=(GraphImpl&&) = delete;
- ~GraphImpl() = default;
+ GraphImpl& operator=(GraphImpl&&) = delete;
+ ~GraphImpl() = default;
explicit GraphImpl(ExecutionSpace arg_space)
: execution_space_instance_storage_base_t(std::move(arg_space)) {}
template <class NodeImpl>
// requires NodeImplPtr is a shared_ptr to specialization of GraphNodeImpl
void add_node(std::shared_ptr<NodeImpl> const& arg_node_ptr) {
- static_assert(
- NodeImpl::kernel_type::Policy::is_graph_kernel::value,
- "Something has gone horribly wrong, but it's too complicated to "
- "explain here. Buy Daisy a coffee and she'll explain it to you.");
+ static_assert(NodeImpl::kernel_type::Policy::is_graph_kernel::value);
// Since this is always called before any calls to add_predecessor involving
// it, we can treat this node as a sink until we discover otherwise.
arg_node_ptr->node_details_t::set_kernel(arg_node_ptr->get_kernel());
return rv;
}
- void submit() {
+ void instantiate() {
+ KOKKOS_EXPECTS(!m_has_been_instantiated);
+ m_has_been_instantiated = true;
+ }
+
+ void submit(const ExecutionSpace& exec) {
+ if (!m_has_been_instantiated) instantiate();
// This reset is gross, but for the purposes of our simple host
// implementation...
for (auto& sink : m_sinks) {
sink->reset_has_executed();
}
+
+ // We don't know where the nodes will execute, so we need to fence the given
+ // execution space instance before proceeding. This is the simplest way
+ // of guaranteeing that the kernels in the graph are correctly "enqueued".
+ exec.fence(
+ "Kokkos::DefaultGraph::submit: fencing before launching graph nodes");
+
for (auto& sink : m_sinks) {
- sink->execute_node();
+ sink->execute_node(exec);
+ }
+
+ // Once all sinks have been executed, we need to fence them.
+ for (const auto& sink : m_sinks) {
+ if (sink->awaitable() && sink->get_execution_space() != exec)
+ sink->get_execution_space().fence(
+ "Kokkos::DefaultGraph::submit: fencing before ending graph submit");
}
}
+ private:
+ bool m_has_been_instantiated = false;
+
// </editor-fold> end required customizations }}}2
//----------------------------------------------------------------------------
};
} // end namespace Kokkos
-#include <OpenMP/Kokkos_OpenMP_Parallel.hpp>
-
#include <impl/Kokkos_Default_GraphNodeKernel.hpp>
#include <impl/Kokkos_Default_GraphNode_Impl.hpp>
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
+#define KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecutionSpace>
+struct GraphNodeKernelDefaultImpl;
+
+template <class ExecutionSpace>
+struct GraphNodeAggregateKernelDefaultImpl;
+
+} // end namespace Impl
+} // end namespace Kokkos
+
+#endif // KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DESUL_ATOMICS_CONFIG_HPP
+#define KOKKOS_DESUL_ATOMICS_CONFIG_HPP
+
+#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL)
+#define DESUL_CUDA_ARCH_IS_PRE_PASCAL
+#endif
+
+#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
+ defined(KOKKOS_ARCH_PASCAL)
+#define DESUL_CUDA_ARCH_IS_PRE_VOLTA
+#endif
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_DEVICE_MANAGEMENT_HPP
+#define KOKKOS_DEVICE_MANAGEMENT_HPP
+
+#include <optional>
+#include <vector>
+
+namespace Kokkos {
+class InitializationSettings;
+namespace Impl {
+std::optional<int> get_gpu(const Kokkos::InitializationSettings& settings);
+// This declaration is provided for testing purposes only
+int get_ctest_gpu(int local_rank);
+std::vector<int> get_visible_devices(int device_count); // test-only
+std::vector<int> const& get_visible_devices(); // use this instead
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_EBO_HPP
#define KOKKOS_EBO_HPP
template <class T, template <class...> class CtorNotOnDevice>
struct EBOBaseImpl<T, true, CtorNotOnDevice> {
template <class... Args, class _ignored = void,
- std::enable_if_t<std::is_void<_ignored>::value &&
- std::is_constructible<T, Args...>::value &&
+ std::enable_if_t<std::is_void_v<_ignored> &&
+ std::is_constructible_v<T, Args...> &&
!CtorNotOnDevice<Args...>::value,
int> = 0>
KOKKOS_FORCEINLINE_FUNCTION constexpr explicit EBOBaseImpl(
Args&&...) noexcept {}
template <class... Args, class _ignored = void,
- std::enable_if_t<std::is_void<_ignored>::value &&
- std::is_constructible<T, Args...>::value &&
+ std::enable_if_t<std::is_void_v<_ignored> &&
+ std::is_constructible_v<T, Args...> &&
CtorNotOnDevice<Args...>::value,
long> = 0>
inline constexpr explicit EBOBaseImpl(Args&&...) noexcept {}
T m_ebo_object;
template <class... Args, class _ignored = void,
- std::enable_if_t<std::is_void<_ignored>::value &&
+ std::enable_if_t<std::is_void_v<_ignored> &&
!CTorsNotOnDevice<Args...>::value &&
- std::is_constructible<T, Args...>::value,
+ std::is_constructible_v<T, Args...>,
int> = 0>
KOKKOS_FORCEINLINE_FUNCTION constexpr explicit EBOBaseImpl(
Args&&... args) noexcept(noexcept(T(std::forward<Args>(args)...)))
: m_ebo_object(std::forward<Args>(args)...) {}
template <class... Args, class _ignored = void,
- std::enable_if_t<std::is_void<_ignored>::value &&
+ std::enable_if_t<std::is_void_v<_ignored> &&
CTorsNotOnDevice<Args...>::value &&
- std::is_constructible<T, Args...>::value,
+ std::is_constructible_v<T, Args...>,
long> = 0>
inline constexpr explicit EBOBaseImpl(Args&&... args) noexcept(
noexcept(T(std::forward<Args>(args)...)))
template <class T,
template <class...> class CtorsNotOnDevice = NoCtorsNotOnDevice>
struct StandardLayoutNoUniqueAddressMemberEmulation
- : EBOBaseImpl<T, std::is_empty<T>::value, CtorsNotOnDevice> {
+ : EBOBaseImpl<T, std::is_empty_v<T>, CtorsNotOnDevice> {
private:
- using ebo_base_t = EBOBaseImpl<T, std::is_empty<T>::value, CtorsNotOnDevice>;
+ using ebo_base_t = EBOBaseImpl<T, std::is_empty_v<T>, CtorsNotOnDevice>;
public:
using ebo_base_t::ebo_base_t;
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <iostream>
+#include <iomanip>
+#include <sstream>
+#include <stdexcept>
+#include <Kokkos_Core.hpp> // show_warnings
+#include <impl/Kokkos_Error.hpp>
+
+void Kokkos::Impl::throw_runtime_exception(const std::string &msg) {
+ throw std::runtime_error(msg);
+}
+
+void Kokkos::Impl::throw_bad_alloc(std::string_view memory_space_name,
+ std::size_t size, std::string_view label) {
+ std::stringstream ss;
+ ss << "Kokkos ERROR: " << memory_space_name
+ << " memory space failed to allocate " << human_memory_size(size)
+ << " (label=\"" << label << "\").";
+ throw std::runtime_error(ss.str());
+}
+
+void Kokkos::Impl::log_warning(const std::string &msg) {
+ if (show_warnings()) {
+ std::cerr << msg << std::flush;
+ }
+}
+
+std::string Kokkos::Impl::human_memory_size(size_t arg_bytes) {
+ double bytes = arg_bytes;
+ const double K = 1024;
+ const double M = K * 1024;
+ const double G = M * 1024;
+ const double T = G * 1024;
+
+ std::ostringstream out;
+ if (bytes < K) {
+ out << std::setprecision(4) << bytes << " B";
+ } else if (bytes < M) {
+ bytes /= K;
+ out << std::setprecision(4) << bytes << " KiB";
+ } else if (bytes < G) {
+ bytes /= M;
+ out << std::setprecision(4) << bytes << " MiB";
+ } else if (bytes < T) {
+ bytes /= G;
+ out << std::setprecision(4) << bytes << " GiB";
+ } else {
+ bytes /= T;
+ out << std::setprecision(4) << bytes << " TiB";
+ }
+ return out.str();
+}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_ERROR_HPP
+#define KOKKOS_IMPL_ERROR_HPP
+
+#include <string>
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Abort.hpp>
+#include <Kokkos_Assert.hpp>
+
+namespace Kokkos::Impl {
+
+[[noreturn]] void throw_runtime_exception(const std::string &msg);
+[[noreturn]] void throw_bad_alloc(std::string_view memory_space_name,
+ std::size_t size, std::string_view label);
+void log_warning(const std::string &msg);
+
+std::string human_memory_size(size_t bytes);
+
+} // namespace Kokkos::Impl
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <sstream>
+
+namespace Kokkos {
+namespace Impl {
+PerTeamValue::PerTeamValue(size_t arg) : value(arg) {}
+
+PerThreadValue::PerThreadValue(size_t arg) : value(arg) {}
+} // namespace Impl
+
+Impl::PerTeamValue PerTeam(const size_t& arg) {
+ return Impl::PerTeamValue(arg);
+}
+
+Impl::PerThreadValue PerThread(const size_t& arg) {
+ return Impl::PerThreadValue(arg);
+}
+
+void team_policy_check_valid_storage_level_argument(int level) {
+ if (!(level == 0 || level == 1)) {
+ std::stringstream ss;
+ ss << "TeamPolicy::set_scratch_size(/*level*/ " << level
+ << ", ...) storage level argument must be 0 or 1 to be valid\n";
+ abort(ss.str().c_str());
+ }
+}
+
+} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_EXEC_SPACE_MANAGER_HPP
+#define KOKKOS_EXEC_SPACE_MANAGER_HPP
+
+#include <impl/Kokkos_InitializationSettings.hpp>
+#include <Kokkos_DetectionIdiom.hpp>
+#include <Kokkos_Concepts.hpp>
+
+#include <iosfwd>
+#include <map>
+#include <string>
+#include <utility>
+
+namespace {
+
+template <class T>
+using public_member_types_t = std::enable_if_t<
+ Kokkos::is_execution_space_v<typename T::execution_space> &&
+ Kokkos::is_memory_space_v<typename T::memory_space> &&
+ Kokkos::is_device_v<typename T::device_type> &&
+ Kokkos::is_array_layout_v<typename T::array_layout> &&
+ std::is_integral_v<typename T::size_type> &&
+ Kokkos::is_memory_space_v<typename T::scratch_memory_space>>;
+
+template <class T>
+using print_configuration_t = std::enable_if_t<
+ std::is_void_v<decltype(std::declval<T const&>().print_configuration(
+ std::declval<std::ostream&>()))> &&
+ std::is_void_v<decltype(std::declval<T const&>().print_configuration(
+ std::declval<std::ostream&>(), false))>>;
+
+template <class T>
+using initialize_finalize_t = std::enable_if_t<
+ std::is_void_v<decltype(T::impl_initialize(
+ std::declval<Kokkos::InitializationSettings const&>()))> &&
+ std::is_void_v<decltype(T::impl_finalize())>>;
+
+template <class T>
+using fence_t = std::enable_if_t<
+ std::is_void_v<decltype(std::declval<T const&>().fence())> &&
+ std::is_void_v<decltype(std::declval<T const&>().fence("name"))> &&
+ std::is_void_v<decltype(T::impl_static_fence("name"))>>;
+
+template <class T>
+using concurrency_t = std::enable_if_t<
+ std::is_same_v<int, decltype(std::declval<T const&>().concurrency())>>;
+
+template <class T>
+constexpr bool check_is_semiregular() {
+ static_assert(std::is_default_constructible_v<T>);
+ static_assert(std::is_copy_constructible_v<T>);
+ static_assert(std::is_move_constructible_v<T>);
+ static_assert(std::is_copy_assignable_v<T>);
+ static_assert(std::is_move_assignable_v<T>);
+ static_assert(std::is_destructible_v<T>);
+ return true;
+}
+
+template <class T>
+using equal_to_t =
+ decltype(std::declval<T const&>() == std::declval<T const&>());
+
+template <class T>
+using not_equal_to_t =
+ decltype(std::declval<T const&>() != std::declval<T const&>());
+
+template <class T>
+constexpr bool check_is_equality_comparable() {
+ using Kokkos::is_detected_exact_v;
+ static_assert(is_detected_exact_v<bool, equal_to_t, T>);
+ static_assert(is_detected_exact_v<bool, not_equal_to_t, T>);
+ return true;
+}
+
+template <class T>
+constexpr bool check_is_regular() {
+ static_assert(check_is_semiregular<T>() && check_is_equality_comparable<T>());
+ return true;
+}
+
+template <class ExecutionSpace>
+constexpr bool check_valid_execution_space() {
+ using Kokkos::is_detected_v;
+ static_assert(std::is_default_constructible_v<ExecutionSpace>);
+ static_assert(is_detected_v<public_member_types_t, ExecutionSpace>);
+ static_assert(is_detected_v<print_configuration_t, ExecutionSpace>);
+ static_assert(is_detected_v<initialize_finalize_t, ExecutionSpace>);
+ static_assert(is_detected_v<fence_t, ExecutionSpace>);
+ static_assert(is_detected_v<concurrency_t, ExecutionSpace>);
+ static_assert(sizeof(ExecutionSpace) <= 2 * sizeof(void*));
+ return true;
+}
+
+} // namespace
+
+namespace Kokkos {
+namespace Impl {
+
+struct ExecSpaceBase {
+ virtual void initialize(InitializationSettings const&) = 0;
+ virtual void finalize() = 0;
+ virtual void static_fence(std::string const&) = 0;
+ virtual void print_configuration(std::ostream& os, bool verbose) = 0;
+ virtual ~ExecSpaceBase() = default;
+};
+
+template <class ExecutionSpace>
+struct ExecSpaceDerived : ExecSpaceBase {
+ static_assert(check_valid_execution_space<ExecutionSpace>());
+ static_assert(check_is_regular<ExecutionSpace>());
+ void initialize(InitializationSettings const& settings) override final {
+ ExecutionSpace::impl_initialize(settings);
+ }
+ void finalize() override final { ExecutionSpace::impl_finalize(); }
+ void static_fence(std::string const& label) override final {
+ ExecutionSpace::impl_static_fence(label);
+ }
+ void print_configuration(std::ostream& os, bool verbose) override final {
+ ExecutionSpace().print_configuration(os, verbose);
+ }
+};
+
+/* ExecSpaceManager - Responsible for initializing all the registered
+ * backends. Backends are registered using the register_space_initializer()
+ * function which should be called from a global context so that it is called
+ * prior to initialize_spaces() which is called from Kokkos::initialize()
+ */
+class ExecSpaceManager {
+ std::map<std::string, std::unique_ptr<ExecSpaceBase>> exec_space_factory_list;
+ ExecSpaceManager() = default;
+
+ public:
+ void register_space_factory(std::string name,
+ std::unique_ptr<ExecSpaceBase> ptr);
+ void initialize_spaces(const Kokkos::InitializationSettings& settings);
+ void finalize_spaces();
+ void static_fence(const std::string&);
+ void print_configuration(std::ostream& os, bool verbose);
+ static ExecSpaceManager& get_instance();
+};
+
+template <class ExecutionSpace>
+int initialize_space_factory(std::string name) {
+ auto space_ptr = std::make_unique<ExecSpaceDerived<ExecutionSpace>>();
+ ExecSpaceManager::get_instance().register_space_factory(name,
+ std::move(space_ptr));
+ return 1;
+}
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_FUNCTORANALYSIS_HPP
#define KOKKOS_FUNCTORANALYSIS_HPP
using type = FunctorPatternInterface::FOR;
};
-template <class FunctorType, class ExecPolicy, class ReducerType,
+template <class CombinedFunctorReducerType, class ExecPolicy,
class ExecutionSpace>
struct DeduceFunctorPatternInterface<
- ParallelReduce<FunctorType, ExecPolicy, ReducerType, ExecutionSpace>> {
+ ParallelReduce<CombinedFunctorReducerType, ExecPolicy, ExecutionSpace>> {
using type = FunctorPatternInterface::REDUCE;
};
/** \brief Query Functor and execution policy argument tag for value type.
*
- * If 'value_type' is not explicitly declared in the functor
- * then attempt to deduce the type from FunctorType::operator()
- * interface used by the pattern and policy.
+ * If 'value_type' is not explicitly declared in the functor and
+ * OverrideValueType is void, then attempt to deduce the type from
+ * FunctorType::operator() interface used by the pattern and policy.
*
* For the REDUCE pattern generate a Reducer and finalization function
* derived from what is available within the functor.
*/
-template <typename PatternInterface, class Policy, class Functor>
+template <typename PatternInterface, class Policy, class Functor,
+ typename OverrideValueType>
struct FunctorAnalysis {
private:
using FOR = FunctorPatternInterface::FOR;
using functor_has_space = has_execution_space<Functor>;
static_assert(!policy_has_space::value || !functor_has_space::value ||
- std::is_same<typename policy_has_space::type,
- typename functor_has_space::type>::value,
+ std::is_same_v<typename policy_has_space::type,
+ typename functor_has_space::type>,
"Execution Policy and Functor execution space must match");
//----------------------------------------
// Check for Functor::value_type, which is either a simple type T or T[]
+ // If the functor doesn't have a value_type alias, use OverrideValueType.
template <typename F, typename = std::false_type>
struct has_value_type {
- using type = void;
+ using type = OverrideValueType;
};
template <typename F>
typename std::is_void<typename F::value_type>::type> {
using type = typename F::value_type;
- static_assert(!std::is_reference<type>::value &&
- std::rank<type>::value <= 1 &&
- std::extent<type>::value == 0,
+ static_assert(!std::is_reference_v<type> && std::rank_v<type> <= 1 &&
+ std::extent_v<type> == 0,
"Kokkos Functor::value_type is T or T[]");
};
//----------------------------------------
- // If Functor::value_type does not exist then evaluate operator(),
- // depending upon the pattern and whether the policy has a work tag,
- // to determine the reduction or scan value_type.
+ // If Functor::value_type does not exist and OverrideValueType is void, then
+ // evaluate operator(), depending upon the pattern and whether the policy has
+ // a work tag, to determine the reduction or scan value_type.
template <typename F, typename P = PatternInterface,
typename V = typename has_value_type<F>::type,
- bool T = std::is_void<Tag>::value>
+ bool T = std::is_void_v<Tag>>
struct deduce_value_type {
using type = V;
};
using candidate_type = typename deduce_value_type<Functor>::type;
enum {
- candidate_is_void = std::is_void<candidate_type>::value,
- candidate_is_array = std::rank<candidate_type>::value == 1
+ candidate_is_void = std::is_void_v<candidate_type>,
+ candidate_is_array = std::rank_v<candidate_type> == 1
};
//----------------------------------------
using value_type = std::remove_extent_t<candidate_type>;
- static_assert(!std::is_const<value_type>::value,
+ static_assert(!std::is_const_v<value_type>,
"Kokkos functor operator reduce argument cannot be const");
private:
private:
template <bool IsArray, class FF>
- KOKKOS_INLINE_FUNCTION static constexpr std::enable_if_t<IsArray, unsigned>
+ KOKKOS_INLINE_FUNCTION static constexpr std::enable_if_t<IsArray,
+ unsigned int>
get_length(FF const& f) {
return f.value_count;
}
template <bool IsArray, class FF>
- KOKKOS_INLINE_FUNCTION static constexpr std::enable_if_t<!IsArray, unsigned>
+ KOKKOS_INLINE_FUNCTION static constexpr std::enable_if_t<!IsArray,
+ unsigned int>
get_length(FF const&) {
return candidate_is_void ? 0 : 1;
}
!candidate_is_void && !candidate_is_array ? sizeof(ValueType) : 0
};
- KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_count(
+ KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned int value_count(
const Functor& f) {
return FunctorAnalysis::template get_length<candidate_is_array>(f);
}
- KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_size(
+ KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned int value_size(
const Functor& f) {
return FunctorAnalysis::template get_length<candidate_is_array>(f) *
sizeof(ValueType);
//----------------------------------------
template <class Unknown>
- KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_count(
+ KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned int value_count(
const Unknown&) {
return candidate_is_void ? 0 : 1;
}
template <class Unknown>
- KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_size(
+ KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned int value_size(
const Unknown&) {
return candidate_is_void ? 0 : sizeof(ValueType);
}
};
template <class F>
- struct DeduceJoinNoTag<F, std::enable_if_t<(is_reducer<F>::value ||
- (!is_reducer<F>::value &&
- std::is_void<Tag>::value)) &&
- detected_join_no_tag<F>::value>>
+ struct DeduceJoinNoTag<
+ F, std::enable_if_t<(is_reducer<F>::value ||
+ (!is_reducer<F>::value && std::is_void_v<Tag>)) &&
+ detected_join_no_tag<F>::value>>
: public has_join_no_tag_function<F> {
enum : bool { value = true };
};
template <class F>
struct DeduceJoinNoTag<
- F,
- std::enable_if_t<(is_reducer<F>::value ||
- (!is_reducer<F>::value && std::is_void<Tag>::value)) &&
- (!detected_join_no_tag<F>::value &&
- detected_volatile_join_no_tag<F>::value)>>
+ F, std::enable_if_t<(is_reducer<F>::value ||
+ (!is_reducer<F>::value && std::is_void_v<Tag>)) &&
+ (!detected_join_no_tag<F>::value &&
+ detected_volatile_join_no_tag<F>::value)>>
: public has_volatile_join_no_tag_function<F> {
enum : bool { value = true };
+ static_assert(Impl::dependent_false_v<F>,
+ "Reducer with a join() operator taking "
+ "volatile-qualified parameters is no longer supported");
};
template <class F = Functor, typename = void>
detected_volatile_join_tag<F>::value)>>
: public has_volatile_join_tag_function<F> {
enum : bool { value = true };
+ static_assert(Impl::dependent_false_v<F>,
+ "Reducer with a join() operator taking "
+ "volatile-qualified parameters is no longer supported");
};
//----------------------------------------
template <class F>
struct DeduceInitNoTag<
- F, std::enable_if_t<is_reducer<F>::value || (!is_reducer<F>::value &&
- std::is_void<Tag>::value),
+ F, std::enable_if_t<is_reducer<F>::value ||
+ (!is_reducer<F>::value && std::is_void_v<Tag>),
decltype(has_init_no_tag_function<F>::enable_if(
&F::init))>>
: public has_init_no_tag_function<F> {
template <class F>
struct DeduceFinalNoTag<
- F, std::enable_if_t<is_reducer<F>::value || (!is_reducer<F>::value &&
- std::is_void<Tag>::value),
+ F, std::enable_if_t<is_reducer<F>::value ||
+ (!is_reducer<F>::value && std::is_void_v<Tag>),
decltype(has_final_no_tag_function<F>::enable_if(
&F::final))>>
: public has_final_no_tag_function<F> {
struct Reducer {
private:
- Functor const* const m_functor;
+ Functor m_functor;
template <bool IsArray>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<IsArray, int> len() const
- noexcept {
- return m_functor->value_count;
+ KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<IsArray, int> len()
+ const noexcept {
+ return m_functor.value_count;
}
template <bool IsArray>
- KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<!IsArray, int> len() const
- noexcept {
+ KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<!IsArray, int> len()
+ const noexcept {
return candidate_is_void ? 0 : 1;
}
using reference_type = FunctorAnalysis::reference_type;
using functor_type = Functor; // Adapts a functor
+ static constexpr bool has_join_member_function() {
+ return DeduceJoin<>::value;
+ }
+ static constexpr bool has_init_member_function() {
+ return DeduceInit<>::value;
+ }
+ static constexpr bool has_final_member_function() {
+ return DeduceFinal<>::value;
+ }
+
+ KOKKOS_FUNCTION unsigned int value_size() const {
+ return FunctorAnalysis::value_size(m_functor);
+ }
+
+ KOKKOS_FUNCTION unsigned int value_count() const {
+ return FunctorAnalysis::value_count(m_functor);
+ }
+
+ KOKKOS_FUNCTION static constexpr unsigned int static_value_size() {
+ return StaticValueSize;
+ }
+
template <bool is_array = candidate_is_array>
KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_array, reference_type>
reference(ValueType* dst) noexcept {
KOKKOS_INLINE_FUNCTION
void join(ValueType* dst, ValueType const* src) const noexcept {
- DeduceJoin<>::join(m_functor, dst, src);
+ DeduceJoin<>::join(&m_functor, dst, src);
}
- KOKKOS_INLINE_FUNCTION reference_type init(ValueType* const dst) const
- noexcept {
- DeduceInit<>::init(m_functor, dst);
+ KOKKOS_INLINE_FUNCTION reference_type
+ init(ValueType* const dst) const noexcept {
+ DeduceInit<>::init(&m_functor, dst);
return reference(dst);
}
KOKKOS_INLINE_FUNCTION
void final(ValueType* dst) const noexcept {
- DeduceFinal<>::final(m_functor, dst);
+ DeduceFinal<>::final(&m_functor, dst);
}
- Reducer(Reducer const&) = default;
- Reducer(Reducer&&) = default;
+ KOKKOS_INLINE_FUNCTION
+ const Functor& get_functor() const { return m_functor; }
+
+ Reducer(Reducer const&) = default;
+ Reducer(Reducer&&) = default;
Reducer& operator=(Reducer const&) = delete;
- Reducer& operator=(Reducer&&) = delete;
- ~Reducer() = default;
+ Reducer& operator=(Reducer&&) = delete;
+ ~Reducer() = default;
KOKKOS_INLINE_FUNCTION explicit constexpr Reducer(
- Functor const* arg_functor) noexcept
+ Functor const& arg_functor) noexcept
: m_functor(arg_functor) {}
};
};
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_KOKKOS_GRAPHIMPL_HPP
#define KOKKOS_IMPL_KOKKOS_GRAPHIMPL_HPP
static_assert(
Kokkos::Impl::is_specialization_of<NodeType, GraphNodeImpl>::value,
"Kokkos Internal Error in graph interface");
- return std::make_shared<NodeType>((Args &&) args...);
+ return std::make_shared<NodeType>((Args&&)args...);
}
template <class GraphImplWeakPtr, class ExecutionSpace, class Kernel,
Kokkos::Experimental::GraphNodeRef>::value,
"Kokkos Internal Implementation error (bad argument to "
"`GraphAccess::get_node_ptr()`)");
- return ((NodeRef &&) node_ref).get_node_ptr();
+ return ((NodeRef&&)node_ref).get_node_ptr();
}
template <class NodeRef>
Kokkos::Experimental::GraphNodeRef>::value,
"Kokkos Internal Implementation error (bad argument to "
"`GraphAccess::get_graph_weak_ptr()`)");
- return ((NodeRef &&) node_ref).get_graph_weak_ptr();
+ return ((NodeRef&&)node_ref).get_graph_weak_ptr();
}
// </editor-fold> end accessors for private members of public interface }}}2
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_GRAPHIMPL_UTILITIES_HPP
#define KOKKOS_KOKKOS_GRAPHIMPL_UTILITIES_HPP
struct is_compatible_type_erasure<
Template<TSrc, USrc, VSrc>, Template<TDst, UDst, VDst>,
// Because gcc thinks this is ambiguous, we need to add this:
- std::enable_if_t<!std::is_same<TSrc, TDst>::value ||
- !std::is_same<USrc, UDst>::value ||
- !std::is_same<VSrc, VDst>::value>>
- : std::integral_constant<
- bool, is_compatible_type_erasure<TSrc, TDst>::value &&
- is_compatible_type_erasure<USrc, UDst>::value &&
- is_compatible_type_erasure<VSrc, VDst>::value> {};
+ std::enable_if_t<!std::is_same_v<TSrc, TDst> ||
+ !std::is_same_v<USrc, UDst> ||
+ !std::is_same_v<VSrc, VDst>>>
+ : std::bool_constant<is_compatible_type_erasure<TSrc, TDst>::value &&
+ is_compatible_type_erasure<USrc, UDst>::value &&
+ is_compatible_type_erasure<VSrc, VDst>::value> {};
// </editor-fold> end is_compatible_type_erasure }}}1
//==============================================================================
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
+#define KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecutionSpace, class Kernel, class Predecessor>
+struct GraphNodeImpl;
+
+template <class ExecutionSpace>
+struct GraphImpl;
+
+template <class ExecutionSpace, class Policy, class Functor,
+ class KernelTypeTag, class... Args>
+class GraphNodeKernelImpl;
+
+struct _graph_node_kernel_ctor_tag {};
+struct _graph_node_predecessor_ctor_tag {};
+struct _graph_node_is_root_ctor_tag {};
+
+struct GraphAccess;
+
+// Customizable for backends
+template <class ExecutionSpace>
+struct GraphNodeBackendSpecificDetails;
+
+// Customizable for backends
+template <class ExecutionSpace, class Kernel, class PredecessorRef>
+struct GraphNodeBackendDetailsBeforeTypeErasure;
+
+// TODO move this to a more appropriate place
+struct DoNotExplicitlySpecifyThisTemplateParameter;
+
+struct KernelInGraphProperty {};
+
+struct IsGraphKernelTag {};
+
+} // end namespace Impl
+} // end namespace Kokkos
+
+#endif // KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_KOKKOS_GRAPHNODECUSTOMIZATION_HPP
#define KOKKOS_IMPL_KOKKOS_GRAPHNODECUSTOMIZATION_HPP
GraphNodeBackendDetailsBeforeTypeErasure(
GraphNodeBackendDetailsBeforeTypeErasure&&) = delete;
- GraphNodeBackendDetailsBeforeTypeErasure& operator =(
+ GraphNodeBackendDetailsBeforeTypeErasure& operator=(
GraphNodeBackendDetailsBeforeTypeErasure const&) = delete;
GraphNodeBackendDetailsBeforeTypeErasure& operator=(
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_GRAPHNODEIMPL_HPP
#define KOKKOS_IMPL_GRAPHNODEIMPL_HPP
#include <Kokkos_Core_fwd.hpp>
#include <Kokkos_Graph_fwd.hpp>
-#include <impl/Kokkos_SimpleTaskScheduler.hpp> // ExecutionSpaceInstanceStorage
#include <impl/Kokkos_GraphImpl.hpp>
#include <impl/Kokkos_GraphNodeCustomization.hpp>
template <class... Args>
GraphNodeImpl(ExecutionSpace const& ex, _graph_node_is_root_ctor_tag,
Args&&... args) noexcept
- : implementation_base_t(_graph_node_is_root_ctor_tag{},
- (Args &&) args...),
+ : implementation_base_t(_graph_node_is_root_ctor_tag{}, (Args&&)args...),
execution_space_storage_base_t(ex) {}
// </editor-fold> end public(-ish) constructors }}}2
//----------------------------------------------------------------------------
// <editor-fold desc="no other constructors"> {{{2
- GraphNodeImpl() = delete;
- GraphNodeImpl(GraphNodeImpl const&) = delete;
- GraphNodeImpl(GraphNodeImpl&&) = delete;
+ GraphNodeImpl() = delete;
+ GraphNodeImpl(GraphNodeImpl const&) = delete;
+ GraphNodeImpl(GraphNodeImpl&&) = delete;
GraphNodeImpl& operator=(GraphNodeImpl const&) = delete;
- GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
+ GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
// </editor-fold> end no other constructors }}}2
//----------------------------------------------------------------------------
template <class KernelDeduced>
GraphNodeImpl(ExecutionSpace const& ex, _graph_node_kernel_ctor_tag,
KernelDeduced&& arg_kernel)
- : base_t(ex), m_kernel((KernelDeduced &&) arg_kernel) {}
+ : base_t(ex), m_kernel((KernelDeduced&&)arg_kernel) {}
template <class... Args>
GraphNodeImpl(ExecutionSpace const& ex, _graph_node_is_root_ctor_tag,
Args&&... args)
- : base_t(ex, _graph_node_is_root_ctor_tag{}, (Args &&) args...) {}
+ : base_t(ex, _graph_node_is_root_ctor_tag{}, (Args&&)args...) {}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// <editor-fold desc="Rule of 6 for not copyable or movable"> {{{3
// Not copyable or movable
- GraphNodeImpl() = delete;
- GraphNodeImpl(GraphNodeImpl const&) = delete;
- GraphNodeImpl(GraphNodeImpl&&) = delete;
+ GraphNodeImpl() = delete;
+ GraphNodeImpl(GraphNodeImpl const&) = delete;
+ GraphNodeImpl(GraphNodeImpl&&) = delete;
GraphNodeImpl& operator=(GraphNodeImpl const&) = delete;
- GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
- ~GraphNodeImpl() override = default;
+ GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
+ ~GraphNodeImpl() override = default;
// </editor-fold> end Rule of 6 for not copyable or movable }}}3
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// <editor-fold desc="Ctors, destructors, and assignment"> {{{2
// Not copyable or movable
- GraphNodeImpl() = delete;
- GraphNodeImpl(GraphNodeImpl const&) = delete;
- GraphNodeImpl(GraphNodeImpl&&) = delete;
+ GraphNodeImpl() = delete;
+ GraphNodeImpl(GraphNodeImpl const&) = delete;
+ GraphNodeImpl(GraphNodeImpl&&) = delete;
GraphNodeImpl& operator=(GraphNodeImpl const&) = delete;
- GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
- ~GraphNodeImpl() override = default;
+ GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
+ ~GraphNodeImpl() override = default;
// Normal kernel-and-predecessor constructor
template <class KernelDeduced, class PredecessorPtrDeduced>
GraphNodeImpl(ExecutionSpace const& ex, _graph_node_kernel_ctor_tag,
KernelDeduced&& arg_kernel, _graph_node_predecessor_ctor_tag,
PredecessorPtrDeduced&& arg_predecessor)
- : base_t(ex, _graph_node_kernel_ctor_tag{},
- (KernelDeduced &&) arg_kernel),
+ : base_t(ex, _graph_node_kernel_ctor_tag{}, (KernelDeduced&&)arg_kernel),
// The backend gets the ability to store (weak, non-owning) references
// to the kernel in it's final resting place here if it wants. The
// predecessor is already a pointer, so it doesn't matter that it isn't
// already at its final address
backend_details_base_t(ex, this->base_t::get_kernel(), arg_predecessor,
*this),
- m_predecessor_ref((PredecessorPtrDeduced &&) arg_predecessor) {}
+ m_predecessor_ref((PredecessorPtrDeduced&&)arg_predecessor) {}
// Root-tagged constructor
template <class... Args>
GraphNodeImpl(ExecutionSpace const& ex, _graph_node_is_root_ctor_tag,
Args&&... args)
- : base_t(ex, _graph_node_is_root_ctor_tag{}, (Args &&) args...),
+ : base_t(ex, _graph_node_is_root_ctor_tag{}, (Args&&)args...),
backend_details_base_t(ex, _graph_node_is_root_ctor_tag{}, *this),
m_predecessor_ref() {}
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
-#ifndef KOKKOS_HALF_HPP_
-#define KOKKOS_HALF_HPP_
-#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE
-#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
-#endif
+#ifndef KOKKOS_HALF_FLOATING_POINT_WRAPPER_HPP_
+#define KOKKOS_HALF_FLOATING_POINT_WRAPPER_HPP_
-#include <type_traits>
#include <Kokkos_Macros.hpp>
+#include <Kokkos_BitManipulation.hpp> // bit_cast
+
+#include <type_traits>
#include <iosfwd> // istream & ostream for extraction and insertion ops
#include <string>
+namespace Kokkos::Experimental::Impl {
+/// @brief templated struct for determining if half_t is an alias to float.
+/// @tparam T The type to specialize on.
+template <class T>
+struct is_float16 : std::false_type {};
+
+/// @brief templated struct for determining if bhalf_t is an alias to float.
+/// @tparam T The type to specialize on.
+template <class T>
+struct is_bfloat16 : std::false_type {};
+} // namespace Kokkos::Experimental::Impl
+
#ifdef KOKKOS_IMPL_HALF_TYPE_DEFINED
// KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH: A macro to select which
// Declare half_t (binary16)
using half_t = Kokkos::Experimental::Impl::floating_point_wrapper<
Kokkos::Impl::half_impl_t ::type>;
+namespace Impl {
+template <>
+struct is_float16<half_t> : std::true_type {};
+} // namespace Impl
KOKKOS_INLINE_FUNCTION
half_t cast_to_half(float val);
KOKKOS_INLINE_FUNCTION
#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
using bhalf_t = Kokkos::Experimental::Impl::floating_point_wrapper<
Kokkos::Impl ::bhalf_impl_t ::type>;
-
+namespace Impl {
+template <>
+struct is_bfloat16<bhalf_t> : std::true_type {};
+} // namespace Impl
KOKKOS_INLINE_FUNCTION
bhalf_t cast_to_bhalf(float val);
KOKKOS_INLINE_FUNCTION
template <class T>
static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::half_t cast_to_wrapper(
- T x, const volatile Kokkos::Impl::half_impl_t::type&);
+ T x, const Kokkos::Impl::half_impl_t::type&);
#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
template <class T>
static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::bhalf_t cast_to_wrapper(
- T x, const volatile Kokkos::Impl::bhalf_impl_t::type&);
+ T x, const Kokkos::Impl::bhalf_impl_t::type&);
#endif // KOKKOS_IMPL_BHALF_TYPE_DEFINED
template <class T>
/************************** END forward declarations **************************/
namespace Impl {
+
+template <typename FloatType>
+struct BitComparisonWrapper {
+ std::uint16_t value;
+
+ template <typename Number>
+ KOKKOS_FUNCTION friend bool operator==(BitComparisonWrapper a, Number b) {
+ return static_cast<FloatType>(a) == b;
+ }
+
+ template <typename Number>
+ KOKKOS_FUNCTION friend bool operator!=(BitComparisonWrapper a, Number b) {
+ return static_cast<FloatType>(a) != b;
+ }
+
+ template <typename Number>
+ KOKKOS_FUNCTION friend bool operator<(BitComparisonWrapper a, Number b) {
+ return static_cast<FloatType>(a) < b;
+ }
+
+ template <typename Number>
+ KOKKOS_FUNCTION friend bool operator<=(BitComparisonWrapper a, Number b) {
+ return static_cast<FloatType>(a) <= b;
+ }
+
+ template <typename Number>
+ KOKKOS_FUNCTION friend bool operator>(BitComparisonWrapper a, Number b) {
+ return static_cast<FloatType>(a) > b;
+ }
+
+ template <typename Number>
+ KOKKOS_FUNCTION friend bool operator>=(BitComparisonWrapper a, Number b) {
+ return static_cast<FloatType>(a) >= b;
+ }
+};
+
+template <typename FloatType>
+inline constexpr BitComparisonWrapper<FloatType> exponent_mask;
+template <typename FloatType>
+inline constexpr BitComparisonWrapper<FloatType> fraction_mask;
+
+#ifdef KOKKOS_IMPL_HALF_TYPE_DEFINED
+template <>
+inline constexpr BitComparisonWrapper<Kokkos::Experimental::half_t>
+ exponent_mask<Kokkos::Experimental::half_t>{0b0'11111'0000000000};
+template <>
+inline constexpr BitComparisonWrapper<Kokkos::Experimental::half_t>
+ fraction_mask<Kokkos::Experimental::half_t>{0b0'00000'1111111111};
+#endif
+
+#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+template <>
+inline constexpr BitComparisonWrapper<Kokkos::Experimental::bhalf_t>
+ exponent_mask<Kokkos::Experimental::bhalf_t>{0b0'11111111'0000000};
+template <>
+inline constexpr BitComparisonWrapper<Kokkos::Experimental::bhalf_t>
+ fraction_mask<Kokkos::Experimental::bhalf_t>{0b0'00000000'1111111};
+#endif
+
template <class FloatType>
class alignas(FloatType) floating_point_wrapper {
public:
- using impl_type = FloatType;
+ using impl_type = FloatType;
+ using bit_comparison_type = BitComparisonWrapper<floating_point_wrapper>;
private:
impl_type val;
- using fixed_width_integer_type = std::conditional_t<
- sizeof(impl_type) == 2, uint16_t,
- std::conditional_t<
- sizeof(impl_type) == 4, uint32_t,
- std::conditional_t<sizeof(impl_type) == 8, uint64_t, void>>>;
- static_assert(!std::is_void<fixed_width_integer_type>::value,
- "Invalid impl_type");
public:
// In-class initialization and defaulted default constructors not used
#if defined(_WIN32) && defined(KOKKOS_ENABLE_CUDA)
KOKKOS_FUNCTION
floating_point_wrapper(const floating_point_wrapper& rhs) : val(rhs.val) {}
+
+ KOKKOS_FUNCTION
+ floating_point_wrapper& operator=(const floating_point_wrapper& rhs) {
+ val = rhs.val;
+ return *this;
+ }
#else
KOKKOS_DEFAULTED_FUNCTION
floating_point_wrapper(const floating_point_wrapper&) noexcept = default;
+
+ KOKKOS_DEFAULTED_FUNCTION
+ floating_point_wrapper& operator=(const floating_point_wrapper&) noexcept =
+ default;
#endif
- KOKKOS_INLINE_FUNCTION
- floating_point_wrapper(const volatile floating_point_wrapper& rhs) {
-#if defined(KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH) && !defined(KOKKOS_ENABLE_SYCL)
- val = rhs.val;
-#else
- const volatile fixed_width_integer_type* rv_ptr =
- reinterpret_cast<const volatile fixed_width_integer_type*>(&rhs.val);
- const fixed_width_integer_type rv_val = *rv_ptr;
- val = reinterpret_cast<const impl_type&>(rv_val);
-#endif // KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+ KOKKOS_FUNCTION
+ floating_point_wrapper(bit_comparison_type rhs) {
+ val = Kokkos::bit_cast<impl_type>(rhs);
}
// Don't support implicit conversion back to impl_type.
return *this;
}
- template <class T>
- KOKKOS_FUNCTION void operator=(T rhs) volatile {
- impl_type new_val = cast_to_wrapper(rhs, val).val;
- volatile fixed_width_integer_type* val_ptr =
- reinterpret_cast<volatile fixed_width_integer_type*>(
- const_cast<impl_type*>(&val));
- *val_ptr = reinterpret_cast<fixed_width_integer_type&>(new_val);
- }
-
// Compound operators
KOKKOS_FUNCTION
floating_point_wrapper& operator+=(floating_point_wrapper rhs) {
return *this;
}
- KOKKOS_FUNCTION
- void operator+=(const volatile floating_point_wrapper& rhs) volatile {
- floating_point_wrapper tmp_rhs = rhs;
- floating_point_wrapper tmp_lhs = *this;
-
- tmp_lhs += tmp_rhs;
- *this = tmp_lhs;
- }
-
// Compound operators: upcast overloads for +=
template <class T>
KOKKOS_FUNCTION friend std::enable_if_t<
return *this;
}
- KOKKOS_FUNCTION
- void operator-=(const volatile floating_point_wrapper& rhs) volatile {
- floating_point_wrapper tmp_rhs = rhs;
- floating_point_wrapper tmp_lhs = *this;
-
- tmp_lhs -= tmp_rhs;
- *this = tmp_lhs;
- }
-
// Compund operators: upcast overloads for -=
template <class T>
KOKKOS_FUNCTION friend std::enable_if_t<
return *this;
}
- KOKKOS_FUNCTION
- void operator*=(const volatile floating_point_wrapper& rhs) volatile {
- floating_point_wrapper tmp_rhs = rhs;
- floating_point_wrapper tmp_lhs = *this;
-
- tmp_lhs *= tmp_rhs;
- *this = tmp_lhs;
- }
-
// Compund operators: upcast overloads for *=
template <class T>
KOKKOS_FUNCTION friend std::enable_if_t<
return *this;
}
- KOKKOS_FUNCTION
- void operator/=(const volatile floating_point_wrapper& rhs) volatile {
- floating_point_wrapper tmp_rhs = rhs;
- floating_point_wrapper tmp_lhs = *this;
-
- tmp_lhs /= tmp_rhs;
- *this = tmp_lhs;
- }
-
// Compund operators: upcast overloads for /=
template <class T>
KOKKOS_FUNCTION friend std::enable_if_t<
#endif
}
- KOKKOS_FUNCTION
- friend bool operator==(const volatile floating_point_wrapper& lhs,
- const volatile floating_point_wrapper& rhs) {
- floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
- return tmp_lhs == tmp_rhs;
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator<(floating_point_wrapper lhs, T rhs) {
+ return static_cast<float>(lhs) < rhs;
}
- KOKKOS_FUNCTION
- friend bool operator!=(const volatile floating_point_wrapper& lhs,
- const volatile floating_point_wrapper& rhs) {
- floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
- return tmp_lhs != tmp_rhs;
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator<(T lhs, floating_point_wrapper rhs) {
+ return lhs < static_cast<float>(rhs);
}
- KOKKOS_FUNCTION
- friend bool operator<(const volatile floating_point_wrapper& lhs,
- const volatile floating_point_wrapper& rhs) {
- floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
- return tmp_lhs < tmp_rhs;
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator>(floating_point_wrapper lhs, T rhs) {
+ return static_cast<float>(lhs) > rhs;
}
- KOKKOS_FUNCTION
- friend bool operator>(const volatile floating_point_wrapper& lhs,
- const volatile floating_point_wrapper& rhs) {
- floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
- return tmp_lhs > tmp_rhs;
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator>(T lhs, floating_point_wrapper rhs) {
+ return lhs > static_cast<float>(rhs);
}
- KOKKOS_FUNCTION
- friend bool operator<=(const volatile floating_point_wrapper& lhs,
- const volatile floating_point_wrapper& rhs) {
- floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
- return tmp_lhs <= tmp_rhs;
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator<=(floating_point_wrapper lhs, T rhs) {
+ return static_cast<float>(lhs) <= rhs;
}
- KOKKOS_FUNCTION
- friend bool operator>=(const volatile floating_point_wrapper& lhs,
- const volatile floating_point_wrapper& rhs) {
- floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
- return tmp_lhs >= tmp_rhs;
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator<=(T lhs, floating_point_wrapper rhs) {
+ return lhs <= static_cast<float>(rhs);
+ }
+
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator>=(floating_point_wrapper lhs, T rhs) {
+ return static_cast<float>(lhs) >= rhs;
+ }
+
+ template <class T>
+ KOKKOS_FUNCTION friend std::enable_if_t<std::is_convertible_v<T, float> &&
+ (std::is_same_v<T, float> ||
+ std::is_same_v<T, double>),
+ bool>
+ operator>=(T lhs, floating_point_wrapper rhs) {
+ return lhs >= static_cast<float>(rhs);
}
// Insertion and extraction operators
// Declare wrapper overloads now that floating_point_wrapper is declared
template <class T>
static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::half_t cast_to_wrapper(
- T x, const volatile Kokkos::Impl::half_impl_t::type&) {
+ T x, const Kokkos::Impl::half_impl_t::type&) {
return Kokkos::Experimental::cast_to_half(x);
}
#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
template <class T>
static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::bhalf_t cast_to_wrapper(
- T x, const volatile Kokkos::Impl::bhalf_impl_t::type&) {
+ T x, const Kokkos::Impl::bhalf_impl_t::type&) {
return Kokkos::Experimental::cast_to_bhalf(x);
}
#endif // KOKKOS_IMPL_BHALF_TYPE_DEFINED
// example don't include char
template <class T>
KOKKOS_INLINE_FUNCTION std::enable_if_t<
- std::is_same<T, float>::value || std::is_same<T, bool>::value ||
- std::is_same<T, double>::value || std::is_same<T, short>::value ||
- std::is_same<T, unsigned short>::value || std::is_same<T, int>::value ||
- std::is_same<T, unsigned int>::value || std::is_same<T, long>::value ||
- std::is_same<T, unsigned long>::value ||
- std::is_same<T, long long>::value ||
- std::is_same<T, unsigned long long>::value,
+ std::is_same_v<T, float> || std::is_same_v<T, bool> ||
+ std::is_same_v<T, double> || std::is_same_v<T, short> ||
+ std::is_same_v<T, unsigned short> || std::is_same_v<T, int> ||
+ std::is_same_v<T, unsigned int> || std::is_same_v<T, long> ||
+ std::is_same_v<T, unsigned long> || std::is_same_v<T, long long> ||
+ std::is_same_v<T, unsigned long long>,
T>
cast_from_half(half_t val) {
return T(val);
// cast_from_bhalf
template <class T>
KOKKOS_INLINE_FUNCTION std::enable_if_t<
- std::is_same<T, float>::value || std::is_same<T, bool>::value ||
- std::is_same<T, double>::value || std::is_same<T, short>::value ||
- std::is_same<T, unsigned short>::value || std::is_same<T, int>::value ||
- std::is_same<T, unsigned int>::value || std::is_same<T, long>::value ||
- std::is_same<T, unsigned long>::value ||
- std::is_same<T, long long>::value ||
- std::is_same<T, unsigned long long>::value,
+ std::is_same_v<T, float> || std::is_same_v<T, bool> ||
+ std::is_same_v<T, double> || std::is_same_v<T, short> ||
+ std::is_same_v<T, unsigned short> || std::is_same_v<T, int> ||
+ std::is_same_v<T, unsigned int> || std::is_same_v<T, long> ||
+ std::is_same_v<T, unsigned long> || std::is_same_v<T, long long> ||
+ std::is_same_v<T, unsigned long long>,
T>
cast_from_bhalf(bhalf_t val) {
return T(val);
#else
#define KOKKOS_BHALF_T_IS_FLOAT false
#endif // KOKKOS_IMPL_BHALF_TYPE_DEFINED
-#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE
-#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
-#endif
-#endif // KOKKOS_HALF_HPP_
+
+#endif // KOKKOS_HALF_FLOATING_POINT_WRAPPER_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HALF_MATHEMATICAL_FUNCTIONS_HPP_
+#define KOKKOS_HALF_MATHEMATICAL_FUNCTIONS_HPP_
+
+#include <Kokkos_MathematicalFunctions.hpp> // For the float overloads
+#include <Kokkos_BitManipulation.hpp> // bit_cast
+
+// clang-format off
+namespace Kokkos {
+// BEGIN macro definitions
+#if defined(KOKKOS_HALF_T_IS_FLOAT) && !KOKKOS_HALF_T_IS_FLOAT
+ #define KOKKOS_IMPL_MATH_H_FUNC_WRAPPER(MACRO, FUNC) \
+ MACRO(FUNC, Kokkos::Experimental::half_t)
+#else
+ #define KOKKOS_IMPL_MATH_H_FUNC_WRAPPER(MACRO, FUNC)
+#endif
+
+#if defined(KOKKOS_BHALF_T_IS_FLOAT) && !KOKKOS_BHALF_T_IS_FLOAT
+ #define KOKKOS_IMPL_MATH_B_FUNC_WRAPPER(MACRO, FUNC) \
+ MACRO(FUNC, Kokkos::Experimental::bhalf_t)
+#else
+ #define KOKKOS_IMPL_MATH_B_FUNC_WRAPPER(MACRO, FUNC)
+#endif
+
+#define KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(MACRO, FUNC) \
+ KOKKOS_IMPL_MATH_H_FUNC_WRAPPER(MACRO, FUNC) \
+ KOKKOS_IMPL_MATH_B_FUNC_WRAPPER(MACRO, FUNC)
+
+
+#define KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE(FUNC, HALF_TYPE) \
+ KOKKOS_INLINE_FUNCTION HALF_TYPE FUNC(HALF_TYPE x) { \
+ return static_cast<HALF_TYPE>(Kokkos::FUNC(static_cast<float>(x))); \
+ }
+
+#define KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, MIXED_TYPE) \
+ KOKKOS_INLINE_FUNCTION double FUNC(HALF_TYPE x, MIXED_TYPE y) { \
+ return Kokkos::FUNC(static_cast<double>(x), static_cast<double>(y)); \
+ } \
+ KOKKOS_INLINE_FUNCTION double FUNC(MIXED_TYPE x, HALF_TYPE y) { \
+ return Kokkos::FUNC(static_cast<double>(x), static_cast<double>(y)); \
+ }
+
+#define KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF(FUNC, HALF_TYPE) \
+ KOKKOS_INLINE_FUNCTION HALF_TYPE FUNC(HALF_TYPE x, HALF_TYPE y) { \
+ return static_cast<HALF_TYPE>( \
+ Kokkos::FUNC(static_cast<float>(x), static_cast<float>(y))); \
+ } \
+ KOKKOS_INLINE_FUNCTION float FUNC(float x, HALF_TYPE y) { \
+ return Kokkos::FUNC(static_cast<float>(x), static_cast<float>(y)); \
+ } \
+ KOKKOS_INLINE_FUNCTION float FUNC(HALF_TYPE x, float y) { \
+ return Kokkos::FUNC(static_cast<float>(x), static_cast<float>(y)); \
+ } \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, double) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, short) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, unsigned short) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, int) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, unsigned int) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, long) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, unsigned long) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, long long) \
+ KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF_MIXED(FUNC, HALF_TYPE, unsigned long long)
+
+
+#define KOKKOS_IMPL_MATH_UNARY_PREDICATE_HALF(FUNC, HALF_TYPE) \
+ KOKKOS_INLINE_FUNCTION bool FUNC(HALF_TYPE x) { \
+ return Kokkos::FUNC(static_cast<float>(x)); \
+ }
+
+// END macros definitions
+
+
+// Basic operations
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, abs)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, fabs)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, fmod)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, remainder)
+// remquo
+// fma
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, fmax)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, fmin)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, fdim)
+// nanq
+// Exponential functions
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, exp)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, exp2)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, expm1)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, log)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, log10)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, log2)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, log1p)
+// Power functions
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, pow)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, sqrt)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, cbrt)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, hypot)
+// Trigonometric functions
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, sin)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, cos)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, tan)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, asin)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, acos)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, atan)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, atan2)
+// Hyperbolic functions
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, sinh)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, cosh)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, tanh)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, asinh)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, acosh)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, atanh)
+// Error and gamma functions
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, erf)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, erfc)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, tgamma)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, lgamma)
+// Nearest integer floating point functions
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, ceil)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, floor)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, trunc)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, round)
+// lround
+// llround
+// FIXME_SYCL not available as of current SYCL 2020 specification (revision 4)
+#ifndef KOKKOS_ENABLE_SYCL // FIXME_SYCL
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, nearbyint)
+#endif
+// rint
+// lrint
+// llrint
+// Floating point manipulation functions
+// frexp
+// ldexp
+// modf
+// scalbn
+// scalbln
+// ilog
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE, logb)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, nextafter)
+// nexttoward
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF, copysign)
+// Classification and comparison functions
+// fpclassify
+
+#if defined(KOKKOS_HALF_T_IS_FLOAT) && !KOKKOS_HALF_T_IS_FLOAT
+KOKKOS_INLINE_FUNCTION bool isfinite(Kokkos::Experimental::half_t x) {
+ using bit_type = Kokkos::Experimental::half_t::bit_comparison_type;
+ constexpr bit_type exponent_mask = Kokkos::Experimental::Impl::exponent_mask<Kokkos::Experimental::half_t>;
+ const bit_type bit_pattern_x = bit_cast<bit_type>(
+ static_cast<Kokkos::Experimental::half_t::impl_type>(x));
+ return (bit_pattern_x.value & exponent_mask.value) != exponent_mask.value;
+}
+#endif
+
+#if defined(KOKKOS_BHALF_T_IS_FLOAT) && !KOKKOS_BHALF_T_IS_FLOAT
+KOKKOS_INLINE_FUNCTION bool isfinite(Kokkos::Experimental::bhalf_t x) {
+ using bit_type = Kokkos::Experimental::bhalf_t::bit_comparison_type;
+ constexpr bit_type exponent_mask = Kokkos::Experimental::Impl::exponent_mask<Kokkos::Experimental::bhalf_t>;
+ const bit_type bit_pattern_x = bit_cast<bit_type>(
+ static_cast<Kokkos::Experimental::bhalf_t::impl_type>(x));
+ return (bit_pattern_x.value & exponent_mask.value) != exponent_mask.value;
+}
+#endif
+
+#if defined(KOKKOS_HALF_T_IS_FLOAT) && !KOKKOS_HALF_T_IS_FLOAT
+KOKKOS_INLINE_FUNCTION bool isinf(Kokkos::Experimental::half_t x) {
+ using bit_type = Kokkos::Experimental::half_t::bit_comparison_type;
+ constexpr bit_type exponent_mask = Kokkos::Experimental::Impl::exponent_mask<Kokkos::Experimental::half_t>;
+ constexpr bit_type fraction_mask = Kokkos::Experimental::Impl::fraction_mask<Kokkos::Experimental::half_t>;
+ const bit_type bit_pattern_x = bit_cast<bit_type>(
+ static_cast<Kokkos::Experimental::half_t::impl_type>(x));
+ return (
+ ((bit_pattern_x.value & exponent_mask.value) == exponent_mask.value) &&
+ ((bit_pattern_x.value & fraction_mask.value) == 0));
+}
+#endif
+
+#if defined(KOKKOS_BHALF_T_IS_FLOAT) && !KOKKOS_BHALF_T_IS_FLOAT
+KOKKOS_INLINE_FUNCTION bool isinf(Kokkos::Experimental::bhalf_t x) {
+ using bit_type = Kokkos::Experimental::bhalf_t::bit_comparison_type;
+ constexpr bit_type exponent_mask = Kokkos::Experimental::Impl::exponent_mask<Kokkos::Experimental::bhalf_t>;
+ constexpr bit_type fraction_mask = Kokkos::Experimental::Impl::fraction_mask<Kokkos::Experimental::bhalf_t>;
+ const bit_type bit_pattern_x = bit_cast<bit_type>(
+ static_cast<Kokkos::Experimental::bhalf_t::impl_type>(x));
+ return (
+ ((bit_pattern_x.value & exponent_mask.value) == exponent_mask.value) &&
+ ((bit_pattern_x.value & fraction_mask.value) == 0));
+}
+#endif
+
+#if defined(KOKKOS_HALF_T_IS_FLOAT) && !KOKKOS_HALF_T_IS_FLOAT
+KOKKOS_INLINE_FUNCTION bool isnan(Kokkos::Experimental::half_t x) {
+ using bit_type = Kokkos::Experimental::half_t::bit_comparison_type;
+ constexpr bit_type exponent_mask = Kokkos::Experimental::Impl::exponent_mask<Kokkos::Experimental::half_t>;
+ constexpr bit_type fraction_mask = Kokkos::Experimental::Impl::fraction_mask<Kokkos::Experimental::half_t>;
+ const bit_type bit_pattern_x = bit_cast<bit_type>(
+ static_cast<Kokkos::Experimental::half_t::impl_type>(x));
+ return (
+ ((bit_pattern_x.value & exponent_mask.value) == exponent_mask.value) &&
+ ((bit_pattern_x.value & fraction_mask.value) != 0));
+}
+#endif
+
+#if defined(KOKKOS_BHALF_T_IS_FLOAT) && !KOKKOS_BHALF_T_IS_FLOAT
+KOKKOS_INLINE_FUNCTION bool isnan(Kokkos::Experimental::bhalf_t x) {
+ using bit_type = Kokkos::Experimental::bhalf_t::bit_comparison_type;
+ constexpr bit_type exponent_mask = Kokkos::Experimental::Impl::exponent_mask<Kokkos::Experimental::bhalf_t>;
+ constexpr bit_type fraction_mask = Kokkos::Experimental::Impl::fraction_mask<Kokkos::Experimental::bhalf_t>;
+ const bit_type bit_pattern_x = bit_cast<bit_type>(
+ static_cast<Kokkos::Experimental::bhalf_t::impl_type>(x));
+ return (
+ ((bit_pattern_x.value & exponent_mask.value) == exponent_mask.value) &&
+ ((bit_pattern_x.value & fraction_mask.value) != 0));
+}
+#endif
+// isnormal
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_UNARY_PREDICATE_HALF, signbit)
+// isgreater
+// isgreaterequal
+// isless
+// islessequal
+// islessgreater
+// isunordered
+// Complex number functions
+#define KOKKOS_IMPL_MATH_COMPLEX_REAL_HALF(FUNC, HALF_TYPE) \
+ KOKKOS_INLINE_FUNCTION HALF_TYPE FUNC(HALF_TYPE x) { return x; }
+
+#define KOKKOS_IMPL_MATH_COMPLEX_IMAG_HALF(FUNC, HALF_TYPE) \
+ KOKKOS_INLINE_FUNCTION HALF_TYPE FUNC(HALF_TYPE) { return 0; }
+
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_COMPLEX_REAL_HALF, real)
+KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER(KOKKOS_IMPL_MATH_COMPLEX_IMAG_HALF, imag)
+
+#undef KOKKOS_IMPL_MATH_COMPLEX_REAL_HALF
+#undef KOKKOS_IMPL_MATH_COMPLEX_IMAG_HALF
+#undef KOKKOS_IMPL_MATH_UNARY_PREDICATE_HALF
+#undef KOKKOS_IMPL_MATH_BINARY_FUNCTION_HALF
+#undef KOKKOS_IMPL_MATH_UNARY_FUNCTION_HALF_TYPE
+#undef KOKKOS_IMPL_MATH_HALF_FUNC_WRAPPER
+#undef KOKKOS_IMPL_MATH_B_FUNC_WRAPPER
+#undef KOKKOS_IMPL_MATH_H_FUNC_WRAPPER
+} // namespace Kokkos
+// clang-format on
+#endif // KOKKOS_HALF_MATHEMATICAL_FUNCTIONS_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HALF_NUMERIC_TRAITS_HPP_
+#define KOKKOS_HALF_NUMERIC_TRAITS_HPP_
+
+#include <Kokkos_NumericTraits.hpp>
+
+////////////// BEGIN HALF_T (binary16) limits //////////////
+// clang-format off
+// '\brief:' below are from the libc definitions for float and double:
+// https://www.gnu.org/software/libc/manual/html_node/Floating-Point-Parameters.html
+//
+// The arithmetic encoding and equations below are derived from:
+// Ref1: https://en.wikipedia.org/wiki/Single-precision_floating-point_format
+// Ref2: https://en.wikipedia.org/wiki/Exponent_bias
+// Ref3; https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html
+//
+// Some background on the magic numbers 2**10=1024 and 2**15=32768 used below:
+//
+// IMPORTANT: For IEEE754 encodings, see Ref1.
+//
+// For binary16, we have B = 2 and p = 16 with 2**16 possible significands.
+// The binary16 format is: [s e e e e e f f f f f f f f f f]
+// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+// s: signed bit (1 bit)
+// e: exponent bits (5 bits)
+// f: fractional bits (10 bits)
+//
+// E_bias = 2**(n_exponent_bits - 1) - 1 = 2**(5 - 1) - 1 = 15
+// E_subnormal = 00000 (base2)
+// E_infinity = 11111 (base2)
+// E_min = 1 - E_bias = 1 - 15
+// E_max = 2**5 - 1 - E_bias = 2**5 - 1 - 15 = 16
+//
+// 2**10=1024 is the smallest denominator that is representable in binary16:
+// [s e e e e e f f f f f f f f f f]
+// [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]
+// which is: 1 / 2**-10
+//
+//
+// 2**15 is the largest exponent factor representable in binary16, for example the
+// largest integer value representable in binary16 is:
+// [s e e e e e f f f f f f f f f f]
+// [0 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1]
+// which is: 2**(2**4 + 2**3 + 2**2 + 2**1 - 15) * (1 + 2**-10 + 2**-9 + 2**-8 + 2**-7 + 2**-6 + 2**-5 + 2**-4 + 2**-3 + 2**-2 + 2**-1)) =
+// 2**15 * (1 + 0.9990234375) =
+// 65504.0
+//
+#if defined(KOKKOS_HALF_T_IS_FLOAT) && !KOKKOS_HALF_T_IS_FLOAT
+/// \brief: Infinity
+///
+/// Binary16 encoding:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+template <>
+struct Kokkos::Experimental::Impl::infinity_helper<Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b0'11111'0000000000};
+};
+
+/// \brief: Minimum normalized number
+///
+/// Stdc defines this as the smallest number (representable in binary16).
+///
+/// Binary16 encoding:
+/// [s e e e e e f f f f f f f f f f]
+/// [1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+/// and in base10: -1 * 2**(2**4 + 2**3 + 2**2 + 2**1 - 15) * (1 + 2**-10 + 2**-9 + 2**-8 + 2**-7 + 2**-6 + 2**-5 + 2**-4 + 2**-3 + 2**-2 + 2**-1)
+/// = -2**15 * (1 + (2**10 - 1) / 2**10)
+template <>
+struct Kokkos::Experimental::Impl::finite_min_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b1'11110'1111111111}; // -65504
+};
+
+/// \brief: Maximum normalized number
+///
+/// Stdc defines this as the maximum number (representable in binary16).
+///
+/// Binary16 encoding:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+/// and in base10: 1 * 2**(2**4 + 2**3 + 2**2 + 2**1 - 15) * (1 + 2**-10 + 2**-9 + 2**-8 + 2**-7 + 2**-6 + 2**-5 + 2**-4 + 2**-3 + 2**-2 + 2**-1)
+/// = 2**15 * (1 + (2**10 - 1) / 2**10)
+template <>
+struct Kokkos::Experimental::Impl::finite_max_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b0'11110'1111111111}; // +65504
+};
+
+/// \brief: This is the difference between 1 and the smallest floating point
+/// number of type binary16 that is greater than 1
+///
+/// Smallest number in binary16 that is greater than 1 encoding:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+/// and in base10: 1 * 2**(2**3 + 2**2 + 2**1 + 2**0 - 15) * (1 + 2**-10)
+/// = 2**0 * (1 + 2**-10)
+/// = 1.0009765625
+///
+/// Lastly, 1 - 1.0009765625 = 0.0009765625.
+template <>
+struct Kokkos::Experimental::Impl::epsilon_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b0'00101'0000000000}; // 0.0009765625
+};
+
+/// @brief: The largest possible rounding error in ULPs
+///
+/// This simply uses the maximum rounding error.
+///
+/// Reference: https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html#689
+template <>
+struct Kokkos::Experimental::Impl::round_error_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b0'01110'0000000000}; // 0.5
+};
+
+/// \brief: Minimum normalized positive half precision number
+///
+/// Stdc defines this as the minimum normalized positive floating
+/// point number that is representable in type binary16
+///
+/// Smallest number in binary16 that is greater than 1 encoding:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+/// and in base10: 1 * 2**(2**0 - 15) * (1)
+/// = 2**-14
+template <>
+struct Kokkos::Experimental::Impl::norm_min_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b0'00001'0000000000}; // 0.00006103515625
+};
+
+/// \brief: Quiet not a half precision number
+///
+/// IEEE 754 defines this as all exponent bits and the first fraction bit high.
+///
+/// Quiet NaN in binary16:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+template <>
+struct Kokkos::Experimental::Impl::quiet_NaN_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b0'11111'1000000000};
+};
+
+/// \brief: Signaling not a half precision number
+///
+/// IEEE 754 defines this as all exponent bits and the second fraction bit high.
+///
+/// Quiet NaN in binary16:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 1 1 1 1 1 0 1 0 0 0 0 0 0 0 0]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+template <>
+struct Kokkos::Experimental::Impl::signaling_NaN_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr Kokkos::Experimental::half_t::bit_comparison_type value{0b0'11111'0100000000};
+};
+
+/// \brief: Number of digits in the matissa that can be represented
+/// without losing precision.
+///
+/// Stdc defines this as the number of base-RADIX digits in the floating point mantissa for the binary16 data type.
+///
+/// In binary16, we have 10 fractional bits plus the implicit leading 1.
+template <>
+struct Kokkos::Experimental::Impl::digits_helper<Kokkos::Experimental::half_t> {
+ static constexpr int value = 11;
+};
+
+/// \brief: "The number of base-10 digits that can be represented by the type T without change"
+/// Reference: https://en.cppreference.com/w/cpp/types/numeric_limits/digits10.
+///
+/// "For base-radix types, it is the value of digits() (digits - 1 for floating-point types) multiplied by log10(radix) and rounded down."
+/// Reference: https://en.cppreference.com/w/cpp/types/numeric_limits/digits10.
+///
+/// This is: floor(11 - 1 * log10(2))
+template <>
+struct Kokkos::Experimental::Impl::digits10_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr int value = 3;
+};
+
+/// \brief: Value of the base of the exponent representation.
+///
+/// Stdc defined this as the value of the base, or radix, of the exponent representation.
+template <>
+struct Kokkos::Experimental::Impl::radix_helper<Kokkos::Experimental::half_t> {
+ static constexpr int value = 2;
+};
+
+/// \brief: This is the smallest possible exponent value
+///
+/// Stdc defines this as the smallest possible exponent value for type binary16.
+/// More precisely, it is the minimum negative integer such that the value min_exponent_helper
+/// raised to this power minus 1 can be represented as a normalized floating point number of type float.
+///
+/// In binary16:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+/// and in base10: 1 * 2**(2**0 - 15) * (1 + 0)
+/// = 2**-14
+///
+/// with a bias of one from (C11 5.2.4.2.2), gives -13;
+template <>
+struct Kokkos::Experimental::Impl::min_exponent_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr int value = -13;
+};
+
+/// \brief: This is the largest possible exponent value
+///
+/// In binary16:
+/// [s e e e e e f f f f f f f f f f]
+/// [0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+/// and in base10: 1 * 2**(2**4 + 2**3 + 2**2 + 2**1 - 15) * (1 + 0)
+/// = 2**(30 - 15)
+/// = 2**15
+///
+/// with a bias of one from (C11 5.2.4.2.2), gives 16;
+template <>
+struct Kokkos::Experimental::Impl::max_exponent_helper<
+ Kokkos::Experimental::half_t> {
+ static constexpr int value = 16;
+};
+#endif
+////////////// END HALF_T (binary16) limits //////////////
+
+////////////// BEGIN BHALF_T (bfloat16) limits //////////////
+#if defined(KOKKOS_BHALF_T_IS_FLOAT) && !KOKKOS_BHALF_T_IS_FLOAT
+/// \brief: Infinity
+///
+/// Bfloat16 encoding:
+/// [s e e e e e e e e f f f f f f f]
+/// [0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0]
+/// bit index: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+///
+template <>
+struct Kokkos::Experimental::Impl::infinity_helper<Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b0'11111111'0000000};
+};
+
+// Minimum normalized number
+template <>
+struct Kokkos::Experimental::Impl::finite_min_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b1'11111110'1111111}; // -3.38953139e38
+};
+// Maximum normalized number
+template <>
+struct Kokkos::Experimental::Impl::finite_max_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b0'11111110'1111111}; // +3.38953139e3
+};
+// 1/2^7
+template <>
+struct Kokkos::Experimental::Impl::epsilon_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b0'01111000'0000000}; // 0.0078125
+};
+template <>
+struct Kokkos::Experimental::Impl::round_error_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b0'01111110'0000000}; // 0.5
+};
+// Minimum normalized positive bhalf number
+template <>
+struct Kokkos::Experimental::Impl::norm_min_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b0'00000001'0000000}; // 1.175494351e-38
+};
+// Quiet not a bhalf number
+template <>
+struct Kokkos::Experimental::Impl::quiet_NaN_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b0'11111111'1000000};
+};
+// Signaling not a bhalf number
+template <>
+struct Kokkos::Experimental::Impl::signaling_NaN_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr Kokkos::Experimental::bhalf_t::bit_comparison_type value{0b0'11111111'0100000};
+};
+// Number of digits in the matissa that can be represented
+// without losing precision.
+template <>
+struct Kokkos::Experimental::Impl::digits_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr int value = 2;
+};
+// 7 - 1 * log10(2)
+template <>
+struct Kokkos::Experimental::Impl::digits10_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr int value = 1;
+};
+// Value of the base of the exponent representation.
+template <>
+struct Kokkos::Experimental::Impl::radix_helper<Kokkos::Experimental::bhalf_t> {
+ static constexpr int value = 2;
+};
+// This is the smallest possible exponent value
+// with a bias of one (C11 5.2.4.2.2).
+template <>
+struct Kokkos::Experimental::Impl::min_exponent_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr int value = -125;
+};
+// This is the largest possible exponent value
+// with a bias of one (C11 5.2.4.2.2).
+template <>
+struct Kokkos::Experimental::Impl::max_exponent_helper<
+ Kokkos::Experimental::bhalf_t> {
+ static constexpr int value = 128;
+};
+#endif
+////////////// END BHALF_T (bfloat16) limits //////////
+
+#endif // KOKKOS_HALF_NUMERIC_TRAITS_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <impl/Kokkos_HostBarrier.hpp>
+#include <impl/Kokkos_BitOps.hpp>
+
+#include <impl/Kokkos_HostBarrier.hpp>
+
+#include <thread>
+#if defined(_WIN32)
+#include <process.h>
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+void HostBarrier::impl_backoff_wait_until_equal(
+ int* ptr, const int v, const bool active_wait) noexcept {
+ unsigned count = 0u;
+
+ while (!test_equal(ptr, v)) {
+ const int c = int_log2(++count);
+ if (!active_wait || c > log2_iterations_till_sleep) {
+ std::this_thread::sleep_for(
+ std::chrono::nanoseconds(c < 16 ? 256 * c : 4096));
+ } else if (c > log2_iterations_till_yield) {
+ std::this_thread::yield();
+ }
+#if defined(KOKKOS_ENABLE_ASM)
+#if defined(__PPC64__)
+ for (int j = 0; j < num_nops; ++j) {
+ asm volatile("nop\n");
+ }
+ asm volatile("or 27, 27, 27" ::: "memory");
+#elif defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
+ defined(__x86_64__)
+ for (int j = 0; j < num_nops; ++j) {
+ asm volatile("nop\n");
+ }
+ asm volatile("pause\n" ::: "memory");
+#endif
+#endif
+ }
+}
+} // namespace Impl
+} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_HOST_BARRIER_HPP
#define KOKKOS_HOST_BARRIER_HPP
KOKKOS_INLINE_FUNCTION
void wait() const noexcept { wait(m_buffer, m_size, m_step); }
- HostBarrier() = default;
- HostBarrier(HostBarrier&&) = default;
+ HostBarrier() = default;
+ HostBarrier(HostBarrier&&) = default;
HostBarrier& operator=(HostBarrier&&) = default;
KOKKOS_INLINE_FUNCTION
HostBarrier(int size, int* buffer)
: m_size{size}, m_step{0u}, m_buffer{buffer} {}
- HostBarrier(const HostBarrier&) = delete;
+ HostBarrier(const HostBarrier&) = delete;
HostBarrier& operator=(const HostBarrier&) = delete;
private:
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_HOST_SHARED_PTR_HPP
#define KOKKOS_IMPL_HOST_SHARED_PTR_HPP
template <class Deleter>
HostSharedPtr(T* element_ptr, const Deleter& deleter)
: m_element_ptr(element_ptr) {
-#ifdef KOKKOS_ENABLE_CXX17
static_assert(std::is_invocable_v<Deleter, T*> &&
std::is_copy_constructible_v<Deleter>);
-#endif
if (element_ptr) {
try {
m_control = new Control{deleter, 1};
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Atomic.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdint>
+#include <cstring>
+
+#include <iostream>
+#include <sstream>
+#include <cstring>
+
+#ifdef KOKKOS_COMPILER_INTEL
+#include <aligned_new>
+#endif
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+KOKKOS_DEPRECATED HostSpace::HostSpace(const HostSpace::AllocationMechanism &)
+ : HostSpace() {}
+#endif
+
+void *HostSpace::allocate(const size_t arg_alloc_size) const {
+ return allocate("[unlabeled]", arg_alloc_size);
+}
+void *HostSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
+ const size_t
+
+ arg_logical_size) const {
+ return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void *HostSpace::impl_allocate(
+ const char *arg_label, const size_t arg_alloc_size,
+ const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ const size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ static_assert(sizeof(void *) == sizeof(uintptr_t),
+ "Error sizeof(void*) != sizeof(uintptr_t)");
+
+ static_assert(
+ Kokkos::Impl::is_integral_power_of_two(Kokkos::Impl::MEMORY_ALIGNMENT),
+ "Memory alignment must be power of two");
+
+ constexpr uintptr_t alignment = Kokkos::Impl::MEMORY_ALIGNMENT;
+ constexpr uintptr_t alignment_mask = alignment - 1;
+
+ void *ptr = nullptr;
+
+ if (arg_alloc_size)
+ ptr = operator new(arg_alloc_size, std::align_val_t(alignment),
+ std::nothrow_t{});
+
+ if (!ptr || (reinterpret_cast<uintptr_t>(ptr) == ~uintptr_t(0)) ||
+ (reinterpret_cast<uintptr_t>(ptr) & alignment_mask)) {
+ Impl::throw_bad_alloc(name(), arg_alloc_size, arg_label);
+ }
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+ }
+ return ptr;
+}
+
+void HostSpace::deallocate(void *const arg_alloc_ptr,
+ const size_t arg_alloc_size) const {
+ deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void HostSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size,
+ const size_t arg_logical_size) const {
+ if (arg_alloc_ptr) Kokkos::fence("HostSpace::impl_deallocate before free");
+ impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HostSpace::impl_deallocate(
+ const char *arg_label, void *const arg_alloc_ptr,
+ const size_t arg_alloc_size, const size_t arg_logical_size,
+ const Kokkos::Tools::SpaceHandle arg_handle) const {
+ if (arg_alloc_ptr) {
+ size_t reported_size =
+ (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+ if (Kokkos::Profiling::profileLibraryLoaded()) {
+ Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+ reported_size);
+ }
+ constexpr uintptr_t alignment = Kokkos::Impl::MEMORY_ALIGNMENT;
+ operator delete(arg_alloc_ptr, std::align_val_t(alignment),
+ std::nothrow_t{});
+ }
+}
+
+} // namespace Kokkos
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION(Kokkos::HostSpace);
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_HOSTSPACE_ZEROMEMSET_HPP
+#define KOKKOS_HOSTSPACE_ZEROMEMSET_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <impl/Kokkos_ZeroMemset_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct ZeroMemset<HostSpace::execution_space> {
+ ZeroMemset(const HostSpace::execution_space& exec, void* dst, size_t cnt) {
+ // Host spaces, except for HPX, are synchronous and we need to fence for HPX
+ // since we can't properly enqueue a std::memset otherwise.
+ // We can't use exec.fence() directly since we don't have a full definition
+ // of HostSpace here.
+ hostspace_fence(exec);
+ std::memset(dst, 0, cnt);
+ }
+};
+
+} // end namespace Impl
+} // end namespace Kokkos
+
+#endif // KOKKOS_HOSTSPACE_ZEROMEMSET_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
"Kokkos::Impl::hostspace_parallel_deepcopy_async: fence after copy");
}
-void hostspace_parallel_deepcopy_async(const DefaultHostExecutionSpace& exec,
- void* dst, const void* src,
- ptrdiff_t n) {
- using policy_t = Kokkos::RangePolicy<Kokkos::DefaultHostExecutionSpace>;
+template <typename ExecutionSpace>
+void hostspace_parallel_deepcopy_async(const ExecutionSpace& exec, void* dst,
+ const void* src, ptrdiff_t n) {
+ using policy_t = Kokkos::RangePolicy<ExecutionSpace>;
// If the asynchronous HPX backend is enabled, do *not* copy anything
// synchronously. The deep copy must be correctly sequenced with respect to
// other kernels submitted to the same instance, so we only use the fallback
// parallel_for version in this case.
-#if !(defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH))
+#if !(defined(KOKKOS_ENABLE_HPX) && \
+ defined(KOKKOS_ENABLE_IMPL_HPX_ASYNC_DISPATCH))
constexpr int host_deep_copy_serial_limit = 10 * 8192;
- if ((n < host_deep_copy_serial_limit) ||
- (DefaultHostExecutionSpace().concurrency() == 1)) {
- std::memcpy(dst, src, n);
+ if ((n < host_deep_copy_serial_limit) || (exec.concurrency() == 1)) {
+ if (0 < n) std::memcpy(dst, src, n);
return;
}
}
}
+// Explicit instantiation
+template void hostspace_parallel_deepcopy_async<DefaultHostExecutionSpace>(
+ const DefaultHostExecutionSpace&, void*, const void*, ptrdiff_t);
+
+#if defined(KOKKOS_ENABLE_SERIAL) && \
+ (defined(KOKKOS_ENABLE_OPENMP) || defined(KOKKOS_ENABLE_THREADS) || \
+ defined(KOKKOS_ENABLE_HPX))
+// Instantiate only if both the Serial backend and some other host parallel
+// backend are enabled
+template void hostspace_parallel_deepcopy_async<Kokkos::Serial>(
+ const Kokkos::Serial&, void*, const void*, ptrdiff_t);
+#endif
} // namespace Impl
} // namespace Kokkos
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
+#define KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
+
+#include <cstdint>
+
+namespace Kokkos {
+
+namespace Impl {
+
+void hostspace_fence(const DefaultHostExecutionSpace& exec);
+
+void hostspace_parallel_deepcopy(void* dst, const void* src, ptrdiff_t n);
+// DeepCopy called with an execution space that can't access HostSpace
+void hostspace_parallel_deepcopy_async(void* dst, const void* src, ptrdiff_t n);
+template <typename ExecutionSpace>
+void hostspace_parallel_deepcopy_async(const ExecutionSpace& exec, void* dst,
+ const void* src, ptrdiff_t n);
+} // namespace Impl
+
+} // namespace Kokkos
+
+#endif // KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#include <Kokkos_Macros.hpp>
#include <impl/Kokkos_HostThreadTeam.hpp>
#include <impl/Kokkos_Error.hpp>
-#include <impl/Kokkos_Spinwait.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
// zombi team around (for example m_pool_size = 5 and team_size = 2
// (ii) if team_alloc > team_size then the last team might have less
// threads than the others
- m_team_rank = (team_base_rank + team_size <= m_pool_size) &&
+ m_team_rank = (team_base_rank + team_size <= m_pool_size) &&
(team_alloc_rank < team_size)
- ? team_alloc_rank
- : -1;
+ ? team_alloc_rank
+ : -1;
m_team_size = team_size;
m_team_alloc = team_alloc_size;
m_league_rank = league_rank;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_HOSTTHREADTEAM_HPP
#define KOKKOS_IMPL_HOSTTHREADTEAM_HPP
public:
inline bool team_rendezvous() const noexcept {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ // FIXME_OPENMP The tasking framework creates an instance with
+ // m_team_scratch == nullptr and m_team_rendezvous != 0:
+ int* ptr = m_team_scratch == nullptr
+ ? nullptr
+ : reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous);
+#else
int* ptr = reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous);
+#endif
HostBarrier::split_arrive(ptr, m_team_size, m_team_rendezvous_step);
if (m_team_rank != 0) {
HostBarrier::wait(ptr, m_team_size, m_team_rendezvous_step);
inline void team_rendezvous_release() const noexcept {
HostBarrier::split_release(
- reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous), m_team_size,
- m_team_rendezvous_step);
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ // FIXME_OPENMP The tasking framework creates an instance with
+ // m_team_scratch == nullptr and m_team_rendezvous != 0:
+ (m_team_scratch == nullptr)
+ ? nullptr
+ : reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous),
+#else
+ reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous),
+#endif
+ m_team_size, m_team_rendezvous_step);
}
inline int pool_rendezvous() const noexcept {
//----------------------------------------
-#ifndef KOKKOS_COMPILER_NVHPC // FIXME_NVHPC bug in NVHPC regarding constexpr
- // constructors used in device code
+#if !defined(KOKKOS_COMPILER_NVHPC) || (KOKKOS_COMPILER_NVHPC >= 230700)
constexpr
#endif
HostThreadTeamData() noexcept
}
int64_t* team_shared() const noexcept {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ // FIXME_OPENMP The tasking framework creates an instance with
+ // m_team_scratch == nullptr and m_team_shared != 0
+ if (m_team_scratch == nullptr) return nullptr;
+#endif
return m_team_scratch + m_team_shared;
}
int const num = (m_work_end + m_work_chunk - 1) / m_work_chunk;
int const part = (num + m_league_size - 1) / m_league_size;
- m_work_range.first = part * m_league_rank;
+ m_work_range.first = static_cast<int64_t>(part) * m_league_rank;
m_work_range.second = m_work_range.first + part;
// Steal from next team, round robin
const int i = get_work_stealing();
if (0 <= i) {
- x.first = m_work_chunk * i;
+ x.first = static_cast<int64_t>(m_work_chunk) * i;
x.second = x.first + m_work_chunk < m_work_end ? x.first + m_work_chunk
: m_work_end;
}
using execution_space = HostExecSpace;
using thread_team_member = HostThreadTeamMember;
using host_thread_team_member = HostThreadTeamMember;
+ using team_handle = HostThreadTeamMember;
private:
scratch_memory_space m_scratch;
public:
constexpr HostThreadTeamMember(HostThreadTeamData& arg_data) noexcept
- : m_scratch(arg_data.team_shared(), arg_data.team_shared_bytes()),
+ : m_scratch(
+ arg_data.team_shared(),
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+ // FIXME_OPENMP The tasking framework creates an instance with
+ // m_team_scratch == nullptr and m_team_shared != 0:
+ (arg_data.team_shared() == nullptr) ? 0
+ : arg_data.team_shared_bytes()
+#else
+ arg_data.team_shared_bytes()
+#endif
+ ),
m_data(arg_data),
m_league_rank(arg_data.m_league_rank),
- m_league_size(arg_data.m_league_size) {}
+ m_league_size(arg_data.m_league_size) {
+ }
constexpr HostThreadTeamMember(HostThreadTeamData& arg_data,
int const arg_league_rank,
m_league_rank(arg_league_rank),
m_league_size(arg_league_size) {}
- ~HostThreadTeamMember() = default;
- HostThreadTeamMember() = delete;
- HostThreadTeamMember(HostThreadTeamMember&&) = default;
- HostThreadTeamMember(HostThreadTeamMember const&) = default;
- HostThreadTeamMember& operator=(HostThreadTeamMember&&) = default;
+ ~HostThreadTeamMember() = default;
+ HostThreadTeamMember() = delete;
+ HostThreadTeamMember(HostThreadTeamMember&&) = default;
+ HostThreadTeamMember(HostThreadTeamMember const&) = default;
+ HostThreadTeamMember& operator=(HostThreadTeamMember&&) = default;
HostThreadTeamMember& operator=(HostThreadTeamMember const&) = default;
//----------------------------------------
//--------------------------------------------------------------------------
template <typename T>
- KOKKOS_INLINE_FUNCTION void team_broadcast(T& value,
- const int source_team_rank) const
- noexcept {
+ KOKKOS_INLINE_FUNCTION void team_broadcast(
+ T& value, const int source_team_rank) const noexcept {
KOKKOS_IF_ON_HOST((if (1 < m_data.m_team_size) {
- T volatile* const shared_value = (T*)m_data.team_reduce();
+ T* const shared_value = (T*)m_data.team_reduce();
// Don't overwrite shared memory until all threads arrive
// only this thread returned from 'team_rendezvous'
// with a return value of 'true'
- *shared_value = value;
+ Kokkos::Impl::atomic_store(shared_value, value,
+ desul::MemoryOrderRelease());
m_data.team_rendezvous_release();
// This thread released all other threads from 'team_rendezvous'
// with a return value of 'false'
} else {
- value = *shared_value;
+ value = Kokkos::Impl::atomic_load(shared_value,
+ desul::MemoryOrderAcquire());
}
}))
//--------------------------------------------------------------------------
template <class Closure, typename T>
- KOKKOS_INLINE_FUNCTION void team_broadcast(Closure const& f, T& value,
- const int source_team_rank) const
- noexcept {
+ KOKKOS_INLINE_FUNCTION void team_broadcast(
+ Closure const& f, T& value, const int source_team_rank) const noexcept {
KOKKOS_IF_ON_HOST((
- T volatile* const shared_value = (T*)m_data.team_reduce();
+ T* const shared_value = (T*)m_data.team_reduce();
// Don't overwrite shared memory until all threads arrive
f(value);
if (1 < m_data.m_team_size) {
- *shared_value = value;
+ Kokkos::Impl::atomic_store(shared_value, value,
+ desul::MemoryOrderRelease());
}
m_data.team_rendezvous_release();
// This thread released all other threads from 'team_rendezvous'
// with a return value of 'false'
- } else { value = *shared_value; }))
+ } else {
+ value = Kokkos::Impl::atomic_load(shared_value,
+ desul::MemoryOrderAcquire());
+ }))
KOKKOS_IF_ON_DEVICE(
((void)f; (void)value; (void)source_team_rank;
// team_reduce( Max(result) );
template <typename ReducerType>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer_v<ReducerType>>
team_reduce(ReducerType const& reducer) const noexcept {
team_reduce(reducer, reducer.reference());
}
template <typename ReducerType>
- KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer_v<ReducerType>>
team_reduce(ReducerType const& reducer,
typename ReducerType::value_type contribution) const noexcept {
+ using value_type = typename ReducerType::value_type;
+ using wrapped_reducer_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<Kokkos::DefaultHostExecutionSpace>, ReducerType,
+ value_type>::Reducer;
+
+ impl_team_reduce(wrapped_reducer_type(reducer), contribution);
+ reducer.reference() = contribution;
+ }
+
+ template <typename WrappedReducerType>
+ KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer_v<WrappedReducerType>>
+ impl_team_reduce(
+ WrappedReducerType const& reducer,
+ typename WrappedReducerType::value_type& contribution) const {
KOKKOS_IF_ON_HOST((
+
if (1 < m_data.m_team_size) {
- using value_type = typename ReducerType::value_type;
+ using value_type = typename WrappedReducerType::value_type;
if (0 != m_data.m_team_rank) {
// Non-root copies to their local buffer:
value_type* const src =
(value_type*)m_data.team_member(i)->team_reduce_local();
- reducer.join(contribution, *src);
+ reducer.join(&contribution, src);
}
// Copy result to root member's buffer:
// reducer.copy( (value_type*) m_data.team_reduce() , reducer.data()
// );
*((value_type*)m_data.team_reduce()) = contribution;
- reducer.reference() = contribution;
+
m_data.team_rendezvous_release();
// This thread released all other threads from 'team_rendezvous'
// with a return value of 'false'
} else {
// Copy from root member's buffer:
- reducer.reference() = *((value_type*)m_data.team_reduce());
+ contribution = *((value_type*)m_data.team_reduce());
}
- } else { reducer.reference() = contribution; }))
+ }))
KOKKOS_IF_ON_DEVICE(((void)reducer; (void)contribution;
Kokkos::abort("HostThreadTeamMember team_reduce\n");))
parallel_reduce(Impl::TeamThreadRangeBoundariesStruct<iType, Member> const&
loop_boundaries,
Closure const& closure, Reducer const& reducer) {
- typename Reducer::value_type value;
- reducer.init(value);
+ using value_type = typename Reducer::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<Kokkos::DefaultHostExecutionSpace>, Reducer, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
closure(i, value);
}
- loop_boundaries.thread.team_reduce(reducer, value);
+ loop_boundaries.thread.impl_team_reduce(wrapped_reducer, value);
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
}
template <typename iType, typename Closure, typename ValueType, typename Member>
parallel_reduce(Impl::TeamThreadRangeBoundariesStruct<iType, Member> const&
loop_boundaries,
Closure const& closure, ValueType& result) {
- ValueType val;
- Sum<ValueType> reducer(val);
- reducer.init(val);
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<Kokkos::DefaultHostExecutionSpace>, Closure, ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(closure);
+ value_type value;
+ wrapped_reducer.init(&value);
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
- closure(i, reducer.reference());
+ closure(i, value);
}
- loop_boundaries.thread.team_reduce(reducer);
- result = reducer.reference();
+ loop_boundaries.thread.impl_team_reduce(wrapped_reducer, value);
+ wrapped_reducer.final(&value);
+ result = value;
}
/*template< typename iType, class Space
parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
iType, Member>& loop_boundaries,
const Lambda& lambda, ValueType& result) {
- result = ValueType();
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<Kokkos::DefaultHostExecutionSpace>, Lambda, ValueType>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+ using value_type = typename wrapped_reducer_type::value_type;
+
+ wrapped_reducer_type wrapped_reducer(lambda);
+ value_type value;
+ wrapped_reducer.init(&value);
+
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
- lambda(i, result);
+ lambda(i, value);
}
+
+ wrapped_reducer.final(&value);
+ result = value;
}
template <typename iType, class Lambda, typename ReducerType, typename Member>
parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
iType, Member>& loop_boundaries,
const Lambda& lambda, const ReducerType& reducer) {
- reducer.init(reducer.reference());
+ using value_type = typename ReducerType::value_type;
+ using functor_analysis_type = typename Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::REDUCE,
+ TeamPolicy<Kokkos::DefaultHostExecutionSpace>, ReducerType, value_type>;
+ using wrapped_reducer_type = typename functor_analysis_type::Reducer;
+
+ wrapped_reducer_type wrapped_reducer(reducer);
+ value_type value;
+ wrapped_reducer.init(&value);
+
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
- lambda(i, reducer.reference());
+ lambda(i, value);
}
+
+ wrapped_reducer.final(&value);
+ reducer.reference() = value;
}
//----------------------------------------------------------------------------
-template <typename iType, class Closure, class Member>
+template <typename iType, class Closure, class Member, typename ValueType>
KOKKOS_INLINE_FUNCTION
- std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+ std::enable_if_t<!Kokkos::is_reducer<ValueType>::value &&
+ Impl::is_host_thread_team_member<Member>::value>
parallel_scan(Impl::TeamThreadRangeBoundariesStruct<iType, Member> const&
loop_boundaries,
- Closure const& closure) {
- // Extract ValueType from the closure
-
- using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+ Closure const& closure, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using ClosureValueType = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
+ static_assert(std::is_same_v<ClosureValueType, ValueType>,
+ "Non-matching value types of closure and return type");
- value_type accum = 0;
+ ValueType accum = ValueType();
// Intra-member scan
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
closure(i, accum, false);
}
+ auto& team_member = loop_boundaries.thread;
+
// 'accum' output is the exclusive prefix sum
- accum = loop_boundaries.thread.team_scan(accum);
+ accum = team_member.team_scan(accum);
for (iType i = loop_boundaries.start; i < loop_boundaries.end;
i += loop_boundaries.increment) {
closure(i, accum, true);
}
+
+ team_member.team_broadcast(accum, team_member.team_size() - 1);
+
+ return_val = accum;
}
-template <typename iType, class ClosureType, class Member>
+template <typename iType, class Closure, class Member>
KOKKOS_INLINE_FUNCTION
std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+ parallel_scan(Impl::TeamThreadRangeBoundariesStruct<iType, Member> const&
+ loop_boundaries,
+ Closure const& closure) {
+ // Extract ValueType from the closure
+ using ValueType = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure,
+ void>::value_type;
+
+ ValueType scan_val;
+ parallel_scan(loop_boundaries, closure, scan_val);
+}
+
+template <typename iType, class ClosureType, class Member, typename ValueType>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<!Kokkos::is_reducer<ValueType>::value &&
+ Impl::is_host_thread_team_member<Member>::value>
parallel_scan(Impl::ThreadVectorRangeBoundariesStruct<iType, Member> const&
loop_boundaries,
- ClosureType const& closure) {
- using value_type = typename Kokkos::Impl::FunctorAnalysis<
- Impl::FunctorPatternInterface::SCAN, void, ClosureType>::value_type;
+ ClosureType const& closure, ValueType& return_val) {
+ // Extract ValueType from the Closure
+ using ClosureValueType = typename Kokkos::Impl::FunctorAnalysis<
+ Kokkos::Impl::FunctorPatternInterface::SCAN, void, ClosureType,
+ void>::value_type;
+ static_assert(std::is_same_v<ClosureValueType, ValueType>,
+ "Non-matching value types of closure and return type");
- value_type scan_val = value_type();
+ ValueType scan_val = ValueType();
#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
#pragma ivdep
i += loop_boundaries.increment) {
closure(i, scan_val, true);
}
+
+ return_val = scan_val;
+}
+
+template <typename iType, class ClosureType, class Member>
+KOKKOS_INLINE_FUNCTION
+ std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+ parallel_scan(Impl::ThreadVectorRangeBoundariesStruct<iType, Member> const&
+ loop_boundaries,
+ ClosureType const& closure) {
+ // Extract ValueType from the closure
+ using ValueType = typename Kokkos::Impl::FunctorAnalysis<
+ Impl::FunctorPatternInterface::SCAN, void, ClosureType, void>::value_type;
+
+ ValueType scan_val;
+ parallel_scan(loop_boundaries, closure, scan_val);
}
template <typename iType, class Lambda, typename ReducerType, typename Member>
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_INITIALIZATION_SETTINGS_HPP
+#define KOKKOS_INITIALIZATION_SETTINGS_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <optional>
+#include <string>
+
+namespace Kokkos {
+
+class InitializationSettings {
+#define KOKKOS_IMPL_DECLARE(TYPE, NAME) \
+ private: \
+ std::optional<TYPE> m_##NAME; \
+ \
+ public: \
+ InitializationSettings& set_##NAME(TYPE NAME) { \
+ m_##NAME = NAME; \
+ return *this; \
+ } \
+ bool has_##NAME() const noexcept { return static_cast<bool>(m_##NAME); } \
+ TYPE get_##NAME() const noexcept { return *m_##NAME; } \
+ static_assert(true, "no-op to require trailing semicolon")
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#define KOKKOS_IMPL_DECLARE_DEPRECATED(TYPE, NAME) \
+ private: \
+ std::optional<TYPE> m_##NAME; \
+ \
+ public: \
+ KOKKOS_DEPRECATED InitializationSettings& set_##NAME(TYPE NAME) { \
+ m_##NAME = NAME; \
+ return *this; \
+ } \
+ KOKKOS_DEPRECATED bool has_##NAME() const noexcept { \
+ return static_cast<bool>(m_##NAME); \
+ } \
+ KOKKOS_DEPRECATED TYPE get_##NAME() const noexcept { return *m_##NAME; } \
+ static_assert(true, "no-op to require trailing semicolon")
+#else
+#define KOKKOS_IMPL_DECLARE_DEPRECATED(TYPE, NAME) \
+ static_assert(true, "no-op to require trailing semicolon")
+#endif
+
+ public:
+ KOKKOS_IMPL_DECLARE(int, num_threads);
+ KOKKOS_IMPL_DECLARE(int, device_id);
+ KOKKOS_IMPL_DECLARE(std::string, map_device_id_by);
+ KOKKOS_IMPL_DECLARE_DEPRECATED(int, num_devices);
+ KOKKOS_IMPL_DECLARE_DEPRECATED(int, skip_device);
+ KOKKOS_IMPL_DECLARE(bool, disable_warnings);
+ KOKKOS_IMPL_DECLARE(bool, print_configuration);
+ KOKKOS_IMPL_DECLARE(bool, tune_internals);
+ KOKKOS_IMPL_DECLARE(bool, tools_help);
+ KOKKOS_IMPL_DECLARE(std::string, tools_libs);
+ KOKKOS_IMPL_DECLARE(std::string, tools_args);
+
+#undef KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE
+#undef KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER
+#undef KOKKOS_IMPL_DECLARE
+};
+
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
using intrusive_node_base_type = SimpleSinglyLinkedListNode<>;
public:
- LockBasedLIFO() = default;
- LockBasedLIFO(LockBasedLIFO const&) = delete;
- LockBasedLIFO(LockBasedLIFO&&) = delete;
+ LockBasedLIFO() = default;
+ LockBasedLIFO(LockBasedLIFO const&) = delete;
+ LockBasedLIFO(LockBasedLIFO&&) = delete;
LockBasedLIFO& operator=(LockBasedLIFO const&) = delete;
- LockBasedLIFO& operator=(LockBasedLIFO&&) = delete;
+ LockBasedLIFO& operator=(LockBasedLIFO&&) = delete;
~LockBasedLIFO() = default;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
struct SimpleSinglyLinkedListNode {
private:
using pointer_type =
- typename PointerTemplate<SimpleSinglyLinkedListNode>::type;
+ typename PointerTemplate<SimpleSinglyLinkedListNode>::type; // NOLINT
pointer_type m_next = reinterpret_cast<pointer_type>(NotEnqueuedValue);
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#include <impl/Kokkos_Error.hpp>
-#include <cstdint>
#include <ostream>
#include <sstream>
+#include <cstdint>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_MULTIPLETASKQUEUE_HPP
#define KOKKOS_IMPL_MULTIPLETASKQUEUE_HPP
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
template <class _always_void = void>
KOKKOS_INLINE_FUNCTION OptionalRef<task_base_type> _pop_failed_insertion(
int priority, TaskType type,
- std::enable_if_t<task_queue_traits::ready_queue_insertion_may_fail &&
- std::is_void<_always_void>::value,
+ std::enable_if_t<std::is_void_v<_always_void> &&
+ task_queue_traits::ready_queue_insertion_may_fail,
void*> = nullptr) {
auto* rv_ptr = m_failed_heads[priority][(int)type];
if (rv_ptr) {
template <class _always_void = void>
KOKKOS_INLINE_FUNCTION OptionalRef<task_base_type> _pop_failed_insertion(
int /*priority*/, TaskType /*type*/,
- std::enable_if_t<!task_queue_traits::ready_queue_insertion_may_fail &&
- std::is_void<_always_void>::value,
+ std::enable_if_t<std::is_void_v<_always_void> &&
+ !task_queue_traits::ready_queue_insertion_may_fail,
void*> = nullptr) {
return OptionalRef<task_base_type>{nullptr};
}
template <class _always_void = void>
KOKKOS_INLINE_FUNCTION void do_handle_failed_insertion(
runnable_task_base_type&& task,
- std::enable_if_t<task_queue_traits::ready_queue_insertion_may_fail &&
- std::is_void<_always_void>::value,
+ std::enable_if_t<std::is_void_v<_always_void> &&
+ task_queue_traits::ready_queue_insertion_may_fail,
void*> = nullptr) {
// failed insertions, if they happen, must be from the only thread that
// is allowed to push to m_ready_queues, so this linked-list insertion is
template <class _always_void = void>
KOKKOS_INLINE_FUNCTION void do_handle_failed_insertion(
runnable_task_base_type&& /*task*/,
- std::enable_if_t<!task_queue_traits::ready_queue_insertion_may_fail &&
- std::is_void<_always_void>::value,
+ std::enable_if_t<std::is_void_v<_always_void> &&
+ !task_queue_traits::ready_queue_insertion_may_fail,
void*> = nullptr) {
Kokkos::abort("should be unreachable!");
}
template <class _always_void = void>
KOKKOS_INLINE_FUNCTION void flush_failed_insertions(
int priority, int task_type,
- std::enable_if_t<
- task_queue_traits::ready_queue_insertion_may_fail &&
- std::is_void<_always_void>::value, // just to make this dependent
- // on template parameter
- int> = 0) {
+ std::enable_if_t<std::is_void_v<_always_void> &&
+ task_queue_traits::ready_queue_insertion_may_fail,
+ int> = 0) {
// TODO @tasking @minor DSH this somethimes gets some things out of LIFO
// order, which may be undesirable (but not a bug)
template <class _always_void = void>
KOKKOS_INLINE_FUNCTION void flush_failed_insertions(
int, int,
- std::enable_if_t<
- !task_queue_traits::ready_queue_insertion_may_fail &&
- std::is_void<_always_void>::value, // just to make this dependent
- // on template parameter
- int> = 0) {}
+ std::enable_if_t<std::is_void_v<_always_void> &&
+ !task_queue_traits::ready_queue_insertion_may_fail,
+ int> = 0) {}
KOKKOS_INLINE_FUNCTION
void flush_all_failed_insertions() {
static constexpr int NumPriorities = 3;
KOKKOS_INLINE_FUNCTION
- constexpr typename vla_emulation_base_t::vla_entry_count_type n_queues() const
- noexcept {
+ constexpr typename vla_emulation_base_t::vla_entry_count_type n_queues()
+ const noexcept {
return this->n_vla_entries();
}
//----------------------------------------------------------------------------
// <editor-fold desc="Constructors, destructors, and assignment"> {{{2
- MultipleTaskQueue() = delete;
- MultipleTaskQueue(MultipleTaskQueue const&) = delete;
- MultipleTaskQueue(MultipleTaskQueue&&) = delete;
+ MultipleTaskQueue() = delete;
+ MultipleTaskQueue(MultipleTaskQueue const&) = delete;
+ MultipleTaskQueue(MultipleTaskQueue&&) = delete;
MultipleTaskQueue& operator=(MultipleTaskQueue const&) = delete;
- MultipleTaskQueue& operator=(MultipleTaskQueue&&) = delete;
+ MultipleTaskQueue& operator=(MultipleTaskQueue&&) = delete;
MultipleTaskQueue(typename base_t::execution_space const& arg_execution_space,
typename base_t::memory_space const&,
// TODO @tasking @generalization DSH make this a property-based customization
// point
KOKKOS_INLINE_FUNCTION
- team_scheduler_info_type initial_team_scheduler_info(int rank_in_league) const
- noexcept {
+ team_scheduler_info_type initial_team_scheduler_info(
+ int rank_in_league) const noexcept {
return team_scheduler_info_type{
typename team_scheduler_info_type::team_queue_id_t(rank_in_league %
n_queues())};
} /* namespace Impl */
} /* namespace Kokkos */
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_NVIDIA_GPU_ARCHITECTURES_HPP
+#define KOKKOS_CUDA_NVIDIA_GPU_ARCHITECTURES_HPP
+
+#if defined(KOKKOS_ARCH_KEPLER30)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 30
+#elif defined(KOKKOS_ARCH_KEPLER32)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 32
+#elif defined(KOKKOS_ARCH_KEPLER35)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 35
+#elif defined(KOKKOS_ARCH_KEPLER37)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 37
+#elif defined(KOKKOS_ARCH_MAXWELL50)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 50
+#elif defined(KOKKOS_ARCH_MAXWELL52)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 52
+#elif defined(KOKKOS_ARCH_MAXWELL53)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 53
+#elif defined(KOKKOS_ARCH_PASCAL60)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 60
+#elif defined(KOKKOS_ARCH_PASCAL61)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 61
+#elif defined(KOKKOS_ARCH_VOLTA70)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 70
+#elif defined(KOKKOS_ARCH_VOLTA72)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 72
+#elif defined(KOKKOS_ARCH_TURING75)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 75
+#elif defined(KOKKOS_ARCH_AMPERE80)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 80
+#elif defined(KOKKOS_ARCH_AMPERE86)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 86
+#elif defined(KOKKOS_ARCH_ADA89)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 89
+#elif defined(KOKKOS_ARCH_HOPPER90)
+#define KOKKOS_IMPL_ARCH_NVIDIA_GPU 90
+#elif defined(KOKKOS_ENABLE_CUDA)
+// do not raise an error on other backends that may run on NVIDIA GPUs such as
+// OpenACC, OpenMPTarget, or SYCL
+#error NVIDIA GPU arch not recognized
+#endif
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_PARSE_COMMAND_LINE_ARGUMENTS_AND_ENVIRONMENT_VARIABLES_HPP
+#define KOKKOS_PARSE_COMMAND_LINE_ARGUMENTS_AND_ENVIRONMENT_VARIABLES_HPP
+
+// These declaration are only provided for testing purposes
+namespace Kokkos {
+class InitializationSettings;
+namespace Impl {
+void parse_command_line_arguments(int& argc, char* argv[],
+ InitializationSettings& settings);
+void parse_environment_variables(InitializationSettings& settings);
+} // namespace Impl
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
const std::string InitArguments::unset_string_option = {
"kokkos_tools_impl_unset_option"};
-InitArguments tool_arguments;
-
namespace Impl {
void parse_command_line_arguments(int& argc, char* argv[],
InitArguments& arguments) {
args = env_tools_args;
}
return {
- Kokkos::Tools::Impl::InitializationStatus::InitializationResult::success};
+ Kokkos::Tools::Impl::InitializationStatus::InitializationResult::success,
+ ""};
}
InitializationStatus initialize_tools_subsystem(
const Kokkos::Tools::InitArguments& args) {
if (!Kokkos::Tools::printHelp(final_args)) {
std::cerr << "Tool has not provided a help message" << std::endl;
}
- return {InitializationStatus::InitializationResult::help_request};
+ return {InitializationStatus::InitializationResult::help_request, ""};
}
Kokkos::Tools::parseArgs(final_args);
#else
(void)args;
#endif
- return {InitializationStatus::InitializationResult::success};
+ return {InitializationStatus::InitializationResult::success, ""};
}
} // namespace Impl
return;
}
- char* envProfileLibrary = const_cast<char*>(profileLibrary.c_str());
-
- const auto envProfileCopy =
- std::make_unique<char[]>(strlen(envProfileLibrary) + 1);
- sprintf(envProfileCopy.get(), "%s", envProfileLibrary);
-
- char* profileLibraryName = strtok(envProfileCopy.get(), ";");
-
- if ((profileLibraryName != nullptr) &&
- (strcmp(profileLibraryName, "") != 0)) {
- firstProfileLibrary = dlopen(profileLibraryName, RTLD_NOW | RTLD_GLOBAL);
+ if (auto end_first_library = profileLibrary.find(';');
+ end_first_library != 0) {
+ auto profileLibraryName = profileLibrary.substr(0, end_first_library);
+ firstProfileLibrary =
+ dlopen(profileLibraryName.c_str(), RTLD_NOW | RTLD_GLOBAL);
if (firstProfileLibrary == nullptr) {
std::cerr << "Error: Unable to load KokkosP library: "
<< ", RTLD_NOW | RTLD_GLOBAL) failed with " << dlerror()
<< '\n';
} else {
-#ifdef KOKKOS_ENABLE_PROFILING_LOAD_PRINT
- std::cout << "KokkosP: Library Loaded: " << profileLibraryName
- << std::endl;
-#endif
lookup_function(firstProfileLibrary, "kokkosp_begin_parallel_scan",
Experimental::current_callbacks.begin_parallel_scan);
lookup_function(firstProfileLibrary, "kokkosp_begin_parallel_for",
} // namespace Experimental
} // namespace Tools
-namespace Profiling {
-bool profileLibraryLoaded() { return Kokkos::Tools::profileLibraryLoaded(); }
-
-void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
- uint64_t* kernelID) {
- Kokkos::Tools::beginParallelFor(kernelPrefix, devID, kernelID);
-}
-void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
- uint64_t* kernelID) {
- Kokkos::Tools::beginParallelReduce(kernelPrefix, devID, kernelID);
-}
-void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
- uint64_t* kernelID) {
- Kokkos::Tools::beginParallelScan(kernelPrefix, devID, kernelID);
-}
-void endParallelFor(const uint64_t kernelID) {
- Kokkos::Tools::endParallelFor(kernelID);
-}
-void endParallelReduce(const uint64_t kernelID) {
- Kokkos::Tools::endParallelReduce(kernelID);
-}
-void endParallelScan(const uint64_t kernelID) {
- Kokkos::Tools::endParallelScan(kernelID);
-}
-
-void pushRegion(const std::string& kName) { Kokkos::Tools::pushRegion(kName); }
-void popRegion() { Kokkos::Tools::popRegion(); }
-
-void createProfileSection(const std::string& sectionName, uint32_t* secID) {
- Kokkos::Tools::createProfileSection(sectionName, secID);
-}
-void destroyProfileSection(const uint32_t secID) {
- Kokkos::Tools::destroyProfileSection(secID);
-}
-
-void startSection(const uint32_t secID) { Kokkos::Tools::startSection(secID); }
-
-void stopSection(const uint32_t secID) { Kokkos::Tools::stopSection(secID); }
-
-void markEvent(const std::string& eventName) {
- Kokkos::Tools::markEvent(eventName);
-}
-void allocateData(const SpaceHandle handle, const std::string name,
- const void* data, const uint64_t size) {
- Kokkos::Tools::allocateData(handle, name, data, size);
-}
-void deallocateData(const SpaceHandle space, const std::string label,
- const void* ptr, const uint64_t size) {
- Kokkos::Tools::deallocateData(space, label, ptr, size);
-}
-
-void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
- const void* dst_ptr, const SpaceHandle src_space,
- const std::string src_label, const void* src_ptr,
- const uint64_t size) {
- Kokkos::Tools::beginDeepCopy(dst_space, dst_label, dst_ptr, src_space,
- src_label, src_ptr, size);
-}
-void endDeepCopy() { Kokkos::Tools::endDeepCopy(); }
-
-void finalize() { Kokkos::Tools::finalize(); }
-void initialize(const std::string& profileLibrary) {
- Kokkos::Tools::initialize(profileLibrary);
-}
-
-bool printHelp(const std::string& args) {
- return Kokkos::Tools::printHelp(args);
-}
-void parseArgs(const std::string& args) { Kokkos::Tools::parseArgs(args); }
-void parseArgs(int _argc, char** _argv) {
- Kokkos::Tools::parseArgs(_argc, _argv);
-}
-
-SpaceHandle make_space_handle(const char* space_name) {
- return Kokkos::Tools::make_space_handle(space_name);
-}
-} // namespace Profiling
-
// Tuning
namespace Tools {
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_KOKKOS_PROFILING_HPP
#define KOKKOS_IMPL_KOKKOS_PROFILING_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PROFILING
+#endif
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Tuners.hpp>
#include <impl/Kokkos_Profiling_Interface.hpp>
#include <memory>
#include <iosfwd>
Kokkos::Tools::Impl::InitializationStatus parse_environment_variables(
InitArguments& arguments);
+template <typename PolicyType, typename Functor>
+struct ToolResponse {
+ PolicyType policy;
+};
+
} // namespace Impl
bool profileLibraryLoaded();
Kokkos::Tools::endFence(handle);
}
-inline uint32_t int_for_synchronization_reason(
- Kokkos::Tools::Experimental::SpecialSynchronizationCases reason) {
- switch (reason) {
- case GlobalDeviceSynchronization: return 0;
- case DeepCopyResourceSynchronization: return 0x00ffffff;
- }
- return 0;
-}
-
template <typename Space, typename FencingFunctor>
void profile_fence_event(
const std::string& name,
size_t get_current_context_id();
} // namespace Experimental
+namespace Impl {} // namespace Impl
+
} // namespace Tools
namespace Profiling {
-bool profileLibraryLoaded();
+// don't let ClangFormat reorder the using-declarations below
+// clang-format off
+using Kokkos::Tools::profileLibraryLoaded;
-void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
- uint64_t* kernelID);
-void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
- uint64_t* kernelID);
-void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
- uint64_t* kernelID);
-void endParallelFor(const uint64_t kernelID);
-void endParallelReduce(const uint64_t kernelID);
-void endParallelScan(const uint64_t kernelID);
-void pushRegion(const std::string& kName);
-void popRegion();
+using Kokkos::Tools::printHelp;
+using Kokkos::Tools::parseArgs;
-void createProfileSection(const std::string& sectionName, uint32_t* secID);
-void destroyProfileSection(const uint32_t secID);
-void startSection(const uint32_t secID);
+using Kokkos::Tools::initialize;
+using Kokkos::Tools::finalize;
-void stopSection(const uint32_t secID);
+using Kokkos::Tools::beginParallelFor;
+using Kokkos::Tools::beginParallelReduce;
+using Kokkos::Tools::beginParallelScan;
+using Kokkos::Tools::endParallelFor;
+using Kokkos::Tools::endParallelReduce;
+using Kokkos::Tools::endParallelScan;
-void markEvent(const std::string& eventName);
-void allocateData(const SpaceHandle handle, const std::string name,
- const void* data, const uint64_t size);
-void deallocateData(const SpaceHandle space, const std::string label,
- const void* ptr, const uint64_t size);
-void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
- const void* dst_ptr, const SpaceHandle src_space,
- const std::string src_label, const void* src_ptr,
- const uint64_t size);
-void endDeepCopy();
-void finalize();
-void initialize(const std::string& = {});
+using Kokkos::Tools::allocateData;
+using Kokkos::Tools::deallocateData;
+
+using Kokkos::Tools::beginDeepCopy;
+using Kokkos::Tools::endDeepCopy;
-SpaceHandle make_space_handle(const char* space_name);
+using Kokkos::Tools::pushRegion;
+using Kokkos::Tools::popRegion;
+
+using Kokkos::Tools::createProfileSection;
+using Kokkos::Tools::destroyProfileSection;
+using Kokkos::Tools::startSection;
+using Kokkos::Tools::stopSection;
+
+using Kokkos::Tools::markEvent;
+
+using Kokkos::Tools::make_space_handle;
+// clang-format on
namespace Experimental {
using Kokkos::Tools::Experimental::set_allocate_data_callback;
} // namespace Kokkos
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PROFILING
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PROFILING
+#endif
+
#endif
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
*/
#include <stdbool.h>
#endif
-#define KOKKOSP_INTERFACE_VERSION 20211015
+#define KOKKOSP_INTERFACE_VERSION 20240906
// Profiling
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct Kokkos_Profiling_KokkosPDeviceInfo {
size_t deviceID;
};
Kokkos_Tools_Maximize
};
-struct Kokkos_Tools_OptimzationGoal {
+struct Kokkos_Tools_OptimizationGoal {
size_t type_id;
enum Kokkos_Tools_OptimizationType goal;
};
typedef void (*Kokkos_Tools_contextEndFunction)(
const size_t, struct Kokkos_Tools_VariableValue);
typedef void (*Kokkos_Tools_optimizationGoalDeclarationFunction)(
- const size_t, const struct Kokkos_Tools_OptimzationGoal goal);
+ const size_t, const struct Kokkos_Tools_OptimizationGoal goal);
struct Kokkos_Profiling_EventSet {
Kokkos_Profiling_initFunction init;
// changing struct layout
};
+#ifdef __cplusplus
+}
+#endif
+
#endif // KOKKOS_PROFILING_C_INTERFACE_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOSP_DEVICE_INFO_HPP
+#define KOKKOSP_DEVICE_INFO_HPP
+
+#include <cstdint>
+#include <impl/Kokkos_Profiling_C_Interface.h>
+namespace Kokkos {
+namespace Profiling {
+using KokkosPDeviceInfo = Kokkos_Profiling_KokkosPDeviceInfo;
+} // namespace Profiling
+} // namespace Kokkos
+
+#endif
-/*
- //@HEADER
- // ************************************************************************
- //
- // Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
- //
- // Under the terms of Contract DE-NA0003525 with NTESS,
- // the U.S. Government retains certain rights in this software.
- //
- // Redistribution and use in source and binary forms, with or without
- // modification, are permitted provided that the following conditions are
- // met:
- //
- // 1. Redistributions of source code must retain the above copyright
- // notice, this list of conditions and the following disclaimer.
- //
- // 2. Redistributions in binary form must reproduce the above copyright
- // notice, this list of conditions and the following disclaimer in the
- // documentation and/or other materials provided with the distribution.
- //
- // 3. Neither the name of the Corporation nor the names of the
- // contributors may be used to endorse or promote products derived from
- // this software without specific prior written permission.
- //
- // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
- // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
- // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- //
- // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
- //
- // ************************************************************************
- //@HEADER
- */
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
#ifndef KOKKOSP_INTERFACE_HPP
#define KOKKOSP_INTERFACE_HPP
<< num_instance_bits) +
space.impl_instance_id();
}
+
+inline uint32_t int_for_synchronization_reason(
+ Kokkos::Tools::Experimental::SpecialSynchronizationCases reason) {
+ switch (reason) {
+ case GlobalDeviceSynchronization: return 0;
+ case DeepCopyResourceSynchronization: return 0x00ffffff;
+ }
+ return 0;
+}
} // namespace Experimental
} // namespace Tools
} // end namespace Kokkos
using CandidateValueType = Kokkos_Tools_VariableInfo_CandidateValueType;
using SetOrRange = Kokkos_Tools_VariableInfo_SetOrRange;
using VariableInfo = Kokkos_Tools_VariableInfo;
-using OptimizationGoal = Kokkos_Tools_OptimzationGoal;
+using OptimizationGoal = Kokkos_Tools_OptimizationGoal;
using TuningString = Kokkos_Tools_Tuning_String;
using VariableValue = Kokkos_Tools_VariableValue;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_QUAD_PRECISION_MATH_HPP
#define KOKKOS_QUAD_PRECISION_MATH_HPP
#if defined(KOKKOS_ENABLE_LIBQUADMATH)
#include <Kokkos_NumericTraits.hpp>
+#include <Kokkos_ReductionIdentity.hpp>
#include <Kokkos_MathematicalConstants.hpp>
#include <Kokkos_MathematicalFunctions.hpp>
//<editor-fold desc="numeric traits __float128 specializations">
namespace Kokkos {
namespace Experimental {
-#if defined(KOKKOS_ENABLE_CXX17)
#define KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(TRAIT, TYPE, VALUE_TYPE, VALUE) \
template <> \
struct TRAIT<TYPE> { \
}; \
template <> \
inline constexpr auto TRAIT##_v<TYPE> = TRAIT<TYPE>::value;
-#else
-#define KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(TRAIT, TYPE, VALUE_TYPE, VALUE) \
- template <> \
- struct TRAIT<TYPE> { \
- static constexpr VALUE_TYPE value = VALUE; \
- };
-#endif
// clang-format off
// Numeric distinguished value traits
-// Workaround GCC bug https://godbolt.org/z/qWb5oe4dx
-// error: '__builtin_huge_valq()' is not a constant expression
-#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 710)
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(infinity, __float128, __float128, HUGE_VALQ)
-#endif
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(finite_min, __float128, __float128, -FLT128_MAX)
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(finite_max, __float128, __float128, FLT128_MAX)
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(epsilon, __float128, __float128, FLT128_EPSILON)
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(round_error, __float128, __float128, static_cast<__float128>(0.5))
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(norm_min, __float128, __float128, FLT128_MIN)
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(denorm_min, __float128, __float128, FLT128_DENORM_MIN)
-KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(reciprocal_overflow_threshold, __float128, __float128, FLT128_MIN)
-#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 710)
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(quiet_NaN, __float128, __float128, __builtin_nanq(""))
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(signaling_NaN, __float128, __float128, __builtin_nansq(""))
-#endif
// Numeric characteristics traits
KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(digits, __float128, int, FLT128_MANT_DIG)
inline __float128 fmod(__float128 x, __float128 y) { return ::fmodq(x, y); }
inline __float128 remainder(__float128 x, __float128 y) { return ::remainderq(x, y); }
// remquo
-// fma
+inline __float128 fma(__float128 x, __float128 y, __float128 z) { return ::fmaq(x, y, z); }
inline __float128 fmax(__float128 x, __float128 y) { return ::fmaxq(x, y); }
inline __float128 fmin(__float128 x, __float128 y) { return ::fminq(x, y); }
inline __float128 fdim(__float128 x, __float128 y) { return ::fdimq(x, y); }
// scalbn
// scalbln
// ilog
-#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 610)
inline __float128 logb(__float128 x) { return ::logbq(x); }
-#endif
inline __float128 nextafter(__float128 x, __float128 y) { return ::nextafterq(x, y); }
// nexttoward
inline __float128 copysign(__float128 x, __float128 y) { return ::copysignq(x, y); }
//</editor-fold>
//<editor-fold desc="Mathematical constants __float128 specializations">
-namespace Kokkos {
-namespace Experimental {
+namespace Kokkos::numbers {
// clang-format off
template <> constexpr __float128 e_v <__float128> = 2.718281828459045235360287471352662498Q;
template <> constexpr __float128 log2e_v <__float128> = 1.442695040888963407359924681001892137Q;
template <> constexpr __float128 egamma_v <__float128> = 0.577215664901532860606512090082402431Q;
template <> constexpr __float128 phi_v <__float128> = 1.618033988749894848204586834365638118Q;
// clang-format on
-} // namespace Experimental
-} // namespace Kokkos
+} // namespace Kokkos::numbers
//</editor-fold>
#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
#endif
#include <Kokkos_Core.hpp>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
namespace Kokkos {
namespace Impl {
-thread_local int SharedAllocationRecord<void, void>::t_tracking_enabled = 1;
-
#ifdef KOKKOS_ENABLE_DEBUG
bool SharedAllocationRecord<void, void>::is_sane(
SharedAllocationRecord<void, void>* arg_record) {
}
if (nullptr != Kokkos::atomic_exchange(&root->m_next, root_next)) {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord failed is_sane unlocking");
}
}
bool SharedAllocationRecord<void, void>::is_sane(
SharedAllocationRecord<void, void>*) {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord::is_sane only works with "
"KOKKOS_ENABLE_DEBUG enabled");
return false;
}
-#endif //#ifdef KOKKOS_ENABLE_DEBUG
+#endif // #ifdef KOKKOS_ENABLE_DEBUG
#ifdef KOKKOS_ENABLE_DEBUG
SharedAllocationRecord<void, void>* SharedAllocationRecord<void, void>::find(
}
if (nullptr != Kokkos::atomic_exchange(&arg_root->m_next, root_next)) {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord failed locking/unlocking");
}
return r;
#else
SharedAllocationRecord<void, void>* SharedAllocationRecord<void, void>::find(
SharedAllocationRecord<void, void>* const, void* const) {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord::find only works with "
- "KOKKOS_ENABLE_DEBUG "
- "enabled");
+ "KOKKOS_ENABLE_DEBUG enabled");
return nullptr;
}
#endif
Kokkos::memory_fence();
if (nullptr != Kokkos::atomic_exchange(&m_root->m_next, this)) {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord failed locking/unlocking");
}
#endif
} else {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord given nullptr allocation");
}
}
const int old_count = Kokkos::atomic_fetch_add(&arg_record->m_count, 1);
if (old_count < 0) { // Error
- Kokkos::Impl::throw_runtime_exception(
- "Kokkos::Impl::SharedAllocationRecord failed increment");
+ Kokkos::abort("Kokkos::Impl::SharedAllocationRecord failed increment");
}
}
ss << "Kokkos allocation \"";
ss << arg_record->get_label();
ss << "\" is being deallocated after Kokkos::finalize was called\n";
- auto s = ss.str();
- Kokkos::Impl::throw_runtime_exception(s);
+ Kokkos::abort(ss.str().c_str());
}
#ifdef KOKKOS_ENABLE_DEBUG
// Unlock the list:
if (nullptr !=
Kokkos::atomic_exchange(&arg_record->m_root->m_next, root_next)) {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord failed decrement unlocking");
}
"= %d\n",
arg_record->m_alloc_ptr->m_label, old_count);
fflush(stderr);
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord failed decrement count");
}
void SharedAllocationRecord<void, void>::print_host_accessible_records(
std::ostream& s, const char* const space_name,
const SharedAllocationRecord* const root, const bool detail) {
- const SharedAllocationRecord<void, void>* r = root;
-
- char buffer[256];
-
+ // Print every node except the root, which does not represent an actual
+ // allocation.
+ const SharedAllocationRecord<void, void>* r = root->m_next;
+
+ std::ios_base::fmtflags saved_flags = s.flags();
+#define KOKKOS_PAD_HEX(ptr) \
+ "0x" << std::hex << std::setw(12) << std::setfill('0') \
+ << reinterpret_cast<uintptr_t>(ptr)
if (detail) {
- do {
- // Formatting dependent on sizeof(uintptr_t)
- const char* format_string;
-
- if (sizeof(uintptr_t) == sizeof(unsigned long)) {
- format_string =
- "%s addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx + "
- "%.8ld ] count(%d) dealloc(0x%.12lx) %s\n";
- } else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
- format_string =
- "%s addr( 0x%.12llx ) list( 0x%.12llx 0x%.12llx ) extent[ "
- "0x%.12llx + %.8ld ] count(%d) dealloc(0x%.12llx) %s\n";
- }
+ while (r != root) {
+ s << space_name << " addr( " << KOKKOS_PAD_HEX(r) << " ) list ( "
+ << KOKKOS_PAD_HEX(r->m_prev) << ' ' << KOKKOS_PAD_HEX(r->m_next)
+ << " ) extent[ " << KOKKOS_PAD_HEX(r->m_alloc_ptr) << " + " << std::dec
+ << std::setw(8) << r->m_alloc_size << " ] count(" << r->use_count()
+ << ") dealloc(" << KOKKOS_PAD_HEX(r->m_dealloc) << ") "
+ << r->m_alloc_ptr->m_label << '\n';
- snprintf(buffer, 256, format_string, space_name,
- reinterpret_cast<uintptr_t>(r),
- reinterpret_cast<uintptr_t>(r->m_prev),
- reinterpret_cast<uintptr_t>(r->m_next),
- reinterpret_cast<uintptr_t>(r->m_alloc_ptr), r->m_alloc_size,
- r->use_count(), reinterpret_cast<uintptr_t>(r->m_dealloc),
- r->m_alloc_ptr->m_label);
- s << buffer;
r = r->m_next;
- } while (r != root);
+ }
} else {
- do {
- if (r->m_alloc_ptr) {
- // Formatting dependent on sizeof(uintptr_t)
- const char* format_string;
-
- if (sizeof(uintptr_t) == sizeof(unsigned long)) {
- format_string = "%s [ 0x%.12lx + %ld ] %s\n";
- } else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
- format_string = "%s [ 0x%.12llx + %ld ] %s\n";
- }
-
- snprintf(buffer, 256, format_string, space_name,
- reinterpret_cast<uintptr_t>(r->data()), r->size(),
- r->m_alloc_ptr->m_label);
- } else {
- snprintf(buffer, 256, "%s [ 0 + 0 ]\n", space_name);
- }
- s << buffer;
+ while (r != root) {
+ s << space_name << " [ " << KOKKOS_PAD_HEX(r->data()) << " + " << std::dec
+ << r->size() << " ] " << r->m_alloc_ptr->m_label << '\n';
r = r->m_next;
- } while (r != root);
+ }
}
+#undef KOKKOS_PAD_HEX
+ s.flags(saved_flags);
}
#else
void SharedAllocationRecord<void, void>::print_host_accessible_records(
std::ostream&, const char* const, const SharedAllocationRecord* const,
const bool) {
- Kokkos::Impl::throw_runtime_exception(
+ Kokkos::abort(
"Kokkos::Impl::SharedAllocationRecord::print_host_accessible_records"
" only works with KOKKOS_ENABLE_DEBUG enabled");
}
#endif
+void fill_host_accessible_header_info(
+ SharedAllocationRecord<void, void>* arg_record,
+ SharedAllocationHeader& arg_header, std::string const& arg_label) {
+ // Fill in the Header information, directly accessible on the host
+
+ arg_header.m_record = arg_record;
+
+ strncpy(arg_header.m_label, arg_label.c_str(),
+ SharedAllocationHeader::maximum_label_length);
+ // Set last element zero, in case c_str is too long
+ arg_header.m_label[SharedAllocationHeader::maximum_label_length - 1] = '\0';
+}
+
} /* namespace Impl */
} /* namespace Kokkos */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_SHARED_ALLOC_HPP
#define KOKKOS_SHARED_ALLOC_HPP
private:
using Record = SharedAllocationRecord<void, void>;
-#ifdef KOKKOS_ARCH_VEGA
+#if defined(KOKKOS_ARCH_AMD_GPU)
static constexpr unsigned maximum_label_length =
(1u << 8 /* 256 */) - sizeof(Record*);
#else
friend class SharedAllocationRecordCommon;
template <class>
friend class HostInaccessibleSharedAllocationRecordCommon;
+ friend void fill_host_accessible_header_info(
+ SharedAllocationRecord<void, void>*, SharedAllocationHeader&,
+ std::string const&);
Record* m_record;
char m_label[maximum_label_length];
template <>
class SharedAllocationRecord<void, void> {
protected:
-#ifdef KOKKOS_ARCH_VEGA
+#if defined(KOKKOS_ARCH_AMD_GPU)
static_assert(sizeof(SharedAllocationHeader) == (1u << 8 /* 256 */),
"sizeof(SharedAllocationHeader) != 256");
#else
int m_count;
std::string m_label;
- SharedAllocationRecord(SharedAllocationRecord&&) = delete;
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
- SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
+ SharedAllocationRecord(SharedAllocationRecord&&) = delete;
+ SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+ SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
/**\brief Construct and insert into 'arg_root' tracking set.
SharedAllocationHeader* arg_alloc_ptr, size_t arg_alloc_size,
function_type arg_dealloc, const std::string& label);
private:
- static thread_local int t_tracking_enabled;
+ static inline thread_local int t_tracking_enabled = 1;
public:
virtual std::string get_label() const { return std::string("Unmanaged"); }
SharedAllocationRecord()
: m_alloc_ptr(nullptr),
m_alloc_size(0),
- m_dealloc(nullptr)
+ m_dealloc(nullptr),
#ifdef KOKKOS_ENABLE_DEBUG
- ,
m_root(this),
m_prev(this),
- m_next(this)
+ m_next(this),
#endif
- ,
m_count(0) {
}
static constexpr unsigned maximum_label_length =
SharedAllocationHeader::maximum_label_length;
- KOKKOS_INLINE_FUNCTION
+ KOKKOS_FUNCTION
const SharedAllocationHeader* head() const { return m_alloc_ptr; }
/* User's memory begins at the end of the header */
- KOKKOS_INLINE_FUNCTION
+ KOKKOS_FUNCTION
void* data() const { return static_cast<void*>(m_alloc_ptr + 1); }
/* User's memory begins at the end of the header */
const SharedAllocationRecord* const root, const bool detail);
};
+template <class MemorySpace>
+SharedAllocationHeader* checked_allocation_with_header(MemorySpace const& space,
+ std::string const& label,
+ size_t alloc_size) {
+ return reinterpret_cast<SharedAllocationHeader*>(space.allocate(
+ label.c_str(), alloc_size + sizeof(SharedAllocationHeader), alloc_size));
+}
+
+template <class ExecutionSpace, class MemorySpace>
+SharedAllocationHeader* checked_allocation_with_header(
+ ExecutionSpace const& exec_space, MemorySpace const& space,
+ std::string const& label, size_t alloc_size) {
+ return reinterpret_cast<SharedAllocationHeader*>(
+ space.allocate(exec_space, label.c_str(),
+ alloc_size + sizeof(SharedAllocationHeader), alloc_size));
+}
+
+void fill_host_accessible_header_info(SharedAllocationHeader& arg_header,
+ std::string const& arg_label);
+
template <class MemorySpace>
class SharedAllocationRecordCommon : public SharedAllocationRecord<void, void> {
private:
using derived_t = SharedAllocationRecord<MemorySpace, void>;
using record_base_t = SharedAllocationRecord<void, void>;
- derived_t& self() { return *static_cast<derived_t*>(this); }
- derived_t const& self() const { return *static_cast<derived_t const*>(this); }
protected:
using record_base_t::record_base_t;
- void _fill_host_accessible_header_info(SharedAllocationHeader& arg_header,
- std::string const& arg_label);
+ MemorySpace m_space;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+ static record_base_t s_root_record;
+#endif
static void deallocate(record_base_t* arg_rec);
public:
+ ~SharedAllocationRecordCommon();
+ template <class ExecutionSpace>
+ SharedAllocationRecordCommon(
+ ExecutionSpace const& exec, MemorySpace const& space,
+ std::string const& label, std::size_t alloc_size,
+ record_base_t::function_type dealloc = &deallocate)
+ : SharedAllocationRecord<void, void>(
+#ifdef KOKKOS_ENABLE_DEBUG
+ &s_root_record,
+#endif
+ checked_allocation_with_header(exec, space, label, alloc_size),
+ sizeof(SharedAllocationHeader) + alloc_size, dealloc, label),
+ m_space(space) {
+ auto& header = *SharedAllocationRecord<void, void>::m_alloc_ptr;
+ fill_host_accessible_header_info(this, header, label);
+ }
+ SharedAllocationRecordCommon(
+ MemorySpace const& space, std::string const& label, std::size_t size,
+ record_base_t::function_type dealloc = &deallocate);
+
static auto allocate(MemorySpace const& arg_space,
std::string const& arg_label, size_t arg_alloc_size)
-> derived_t*;
/**\brief Deallocate tracked memory in the space */
static void* reallocate_tracked(void* arg_alloc_ptr, size_t arg_alloc_size);
static auto get_record(void* alloc_ptr) -> derived_t*;
- std::string get_label() const;
+ std::string get_label() const override;
static void print_records(std::ostream& s, MemorySpace const&,
bool detail = false);
};
template <class MemorySpace>
class HostInaccessibleSharedAllocationRecordCommon
- : public SharedAllocationRecordCommon<MemorySpace> {
+ : public SharedAllocationRecord<void, void> {
private:
- using base_t = SharedAllocationRecordCommon<MemorySpace>;
using derived_t = SharedAllocationRecord<MemorySpace, void>;
using record_base_t = SharedAllocationRecord<void, void>;
protected:
- using base_t::base_t;
+ using record_base_t::record_base_t;
+
+ MemorySpace m_space;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+ static record_base_t s_root_record;
+#endif
+
+ static void deallocate(record_base_t* arg_rec);
public:
+ ~HostInaccessibleSharedAllocationRecordCommon();
+ template <class ExecutionSpace>
+ HostInaccessibleSharedAllocationRecordCommon(
+ ExecutionSpace const& exec, MemorySpace const& space,
+ std::string const& label, std::size_t alloc_size,
+ record_base_t::function_type dealloc = &deallocate)
+ : SharedAllocationRecord<void, void>(
+#ifdef KOKKOS_ENABLE_DEBUG
+ &s_root_record,
+#endif
+ checked_allocation_with_header(exec, space, label, alloc_size),
+ sizeof(SharedAllocationHeader) + alloc_size, dealloc, label),
+ m_space(space) {
+ SharedAllocationHeader header;
+
+ fill_host_accessible_header_info(this, header, label);
+
+ Kokkos::Impl::DeepCopy<MemorySpace, HostSpace>(
+ exec, SharedAllocationRecord<void, void>::m_alloc_ptr, &header,
+ sizeof(SharedAllocationHeader));
+ }
+ HostInaccessibleSharedAllocationRecordCommon(
+ MemorySpace const& space, std::string const& label, std::size_t size,
+ record_base_t::function_type dealloc = &deallocate);
+
+ static auto allocate(MemorySpace const& arg_space,
+ std::string const& arg_label, size_t arg_alloc_size)
+ -> derived_t*;
+ /**\brief Allocate tracked memory in the space */
+ static void* allocate_tracked(MemorySpace const& arg_space,
+ std::string const& arg_alloc_label,
+ size_t arg_alloc_size);
+ /**\brief Reallocate tracked memory in the space */
+ static void deallocate_tracked(void* arg_alloc_ptr);
+ /**\brief Deallocate tracked memory in the space */
+ static void* reallocate_tracked(void* arg_alloc_ptr, size_t arg_alloc_size);
+
static void print_records(std::ostream& s, MemorySpace const&,
bool detail = false);
static auto get_record(void* alloc_ptr) -> derived_t*;
- std::string get_label() const;
+ std::string get_label() const override;
};
-namespace {
+#ifdef KOKKOS_ENABLE_DEBUG
+template <class MemorySpace>
+SharedAllocationRecord<void, void>
+ SharedAllocationRecordCommon<MemorySpace>::s_root_record;
+
+template <class MemorySpace>
+SharedAllocationRecord<void, void>
+ HostInaccessibleSharedAllocationRecordCommon<MemorySpace>::s_root_record;
+#endif
+
+#define KOKKOS_IMPL_SHARED_ALLOCATION_SPECIALIZATION(MEMORY_SPACE) \
+ template <> \
+ class Kokkos::Impl::SharedAllocationRecord<MEMORY_SPACE, void> \
+ : public Kokkos::Impl::SharedAllocationRecordCommon<MEMORY_SPACE> { \
+ using SharedAllocationRecordCommon< \
+ MEMORY_SPACE>::SharedAllocationRecordCommon; \
+ }
+
+#define KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_SPECIALIZATION( \
+ MEMORY_SPACE) \
+ template <> \
+ class Kokkos::Impl::SharedAllocationRecord<MEMORY_SPACE, void> \
+ : public Kokkos::Impl::HostInaccessibleSharedAllocationRecordCommon< \
+ MEMORY_SPACE> { \
+ using HostInaccessibleSharedAllocationRecordCommon< \
+ MEMORY_SPACE>::HostInaccessibleSharedAllocationRecordCommon; \
+ }
+
+#define KOKKOS_IMPL_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION( \
+ MEMORY_SPACE) \
+ template class Kokkos::Impl::SharedAllocationRecordCommon<MEMORY_SPACE>
+
+#define KOKKOS_IMPL_HOST_INACCESSIBLE_SHARED_ALLOCATION_RECORD_EXPLICIT_INSTANTIATION( \
+ MEMORY_SPACE) \
+ template class Kokkos::Impl::HostInaccessibleSharedAllocationRecordCommon< \
+ MEMORY_SPACE>
/* Taking the address of this function so make sure it is unique */
template <class MemorySpace, class DestroyFunctor>
-void deallocate(SharedAllocationRecord<void, void>* record_ptr) {
+inline void deallocate(SharedAllocationRecord<void, void>* record_ptr) {
using base_type = SharedAllocationRecord<MemorySpace, void>;
using this_type = SharedAllocationRecord<MemorySpace, DestroyFunctor>;
delete ptr;
}
-} // namespace
-
/*
* Memory space specialization of SharedAllocationRecord< Space , void >
* requires :
&Kokkos::Impl::deallocate<MemorySpace, DestroyFunctor>),
m_destroy() {}
- SharedAllocationRecord() = delete;
- SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+ SharedAllocationRecord() = delete;
+ SharedAllocationRecord(const SharedAllocationRecord&) = delete;
SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
public:
// pressure on compiler optimization by reducing
// number of symbols and inline functions.
-#define KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT \
- KOKKOS_IF_ON_HOST((if (!(m_record_bits & DO_NOT_DEREF_FLAG)) { \
- Record::increment(m_record); \
- }))
+#ifdef KOKKOS_ENABLE_IMPL_REF_COUNT_BRANCH_UNLIKELY
+#define KOKKOS_IMPL_BRANCH_PROB KOKKOS_IMPL_ATTRIBUTE_UNLIKELY
+#else
+#define KOKKOS_IMPL_BRANCH_PROB
+#endif
-#define KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT \
- KOKKOS_IF_ON_HOST((if (!(m_record_bits & DO_NOT_DEREF_FLAG)) { \
- Record::decrement(m_record); \
- }))
+#define KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT \
+ KOKKOS_IF_ON_HOST( \
+ (if (!(m_record_bits & DO_NOT_DEREF_FLAG)) \
+ KOKKOS_IMPL_BRANCH_PROB { Record::increment(m_record); }))
+
+#define KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT \
+ KOKKOS_IF_ON_HOST( \
+ (if (!(m_record_bits & DO_NOT_DEREF_FLAG)) \
+ KOKKOS_IMPL_BRANCH_PROB { Record::decrement(m_record); }))
#define KOKKOS_IMPL_SHARED_ALLOCATION_CARRY_RECORD_BITS(rhs, \
override_tracking) \
}
template <class MemorySpace>
- constexpr SharedAllocationRecord<MemorySpace, void>* get_record() const
- noexcept {
+ constexpr SharedAllocationRecord<MemorySpace, void>* get_record()
+ const noexcept {
return (m_record_bits & DO_NOT_DEREF_FLAG)
? nullptr
: static_cast<SharedAllocationRecord<MemorySpace, void>*>(
#undef KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT
#undef KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT
+#undef KOKKOS_IMPL_BRANCH_PROB
+};
+
+struct SharedAllocationDisableTrackingGuard {
+ SharedAllocationDisableTrackingGuard() {
+ KOKKOS_ASSERT(
+ (Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_enabled()));
+ Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_disable();
+ }
+
+ SharedAllocationDisableTrackingGuard(
+ const SharedAllocationDisableTrackingGuard&) = delete;
+ SharedAllocationDisableTrackingGuard(SharedAllocationDisableTrackingGuard&&) =
+ delete;
+
+ ~SharedAllocationDisableTrackingGuard() {
+ KOKKOS_ASSERT((
+ !Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_enabled()));
+ Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_enable();
+ }
+ // clang-format off
+ // The old version of clang format we use is particularly egregious here
+ SharedAllocationDisableTrackingGuard& operator=(
+ const SharedAllocationDisableTrackingGuard&) = delete;
+ SharedAllocationDisableTrackingGuard& operator=(
+ SharedAllocationDisableTrackingGuard&&) = delete;
+ // clang-format on
};
+template <class FunctorType, class... Args>
+inline FunctorType construct_with_shared_allocation_tracking_disabled(
+ Args&&... args) {
+ [[maybe_unused]] auto guard = SharedAllocationDisableTrackingGuard{};
+ return {std::forward<Args>(args)...};
+}
} /* namespace Impl */
} /* namespace Kokkos */
#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (12/8/20) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_SHAREDALLOC_TIMPL_HPP
#define KOKKOS_IMPL_SHAREDALLOC_TIMPL_HPP
#include <Kokkos_HostSpace.hpp> // used with HostInaccessible specializations
-#include <string> // std::string
-#include <cstring> // strncpy
-#include <iostream> // ostream
+#include <cstring>
+#include <ostream>
+#include <string>
namespace Kokkos {
namespace Impl {
+template <class MemorySpace>
+SharedAllocationRecordCommon<MemorySpace>::~SharedAllocationRecordCommon() {
+ auto alloc_ptr = SharedAllocationRecord<void, void>::m_alloc_ptr;
+ auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+ auto label = SharedAllocationRecord<void, void>::m_label;
+ m_space.deallocate(label.c_str(), alloc_ptr, alloc_size,
+ alloc_size - sizeof(SharedAllocationHeader));
+}
+template <class MemorySpace>
+HostInaccessibleSharedAllocationRecordCommon<
+ MemorySpace>::~HostInaccessibleSharedAllocationRecordCommon() {
+ auto alloc_ptr = SharedAllocationRecord<void, void>::m_alloc_ptr;
+ auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+ auto label = SharedAllocationRecord<void, void>::m_label;
+ m_space.deallocate(label.c_str(), alloc_ptr, alloc_size,
+ alloc_size - sizeof(SharedAllocationHeader));
+}
+
+template <class MemorySpace>
+SharedAllocationRecordCommon<MemorySpace>::SharedAllocationRecordCommon(
+ MemorySpace const& space, std::string const& label, std::size_t alloc_size,
+ SharedAllocationRecord<void, void>::function_type dealloc)
+ : SharedAllocationRecord<void, void>(
+#ifdef KOKKOS_ENABLE_DEBUG
+ &s_root_record,
+#endif
+ checked_allocation_with_header(space, label, alloc_size),
+ sizeof(SharedAllocationHeader) + alloc_size, dealloc, label),
+ m_space(space) {
+ auto& header = *SharedAllocationRecord<void, void>::m_alloc_ptr;
+ fill_host_accessible_header_info(this, header, label);
+}
+
+template <class MemorySpace>
+HostInaccessibleSharedAllocationRecordCommon<MemorySpace>::
+ HostInaccessibleSharedAllocationRecordCommon(
+ MemorySpace const& space, std::string const& label,
+ std::size_t alloc_size,
+ SharedAllocationRecord<void, void>::function_type dealloc)
+ : SharedAllocationRecord<void, void>(
+#ifdef KOKKOS_ENABLE_DEBUG
+ &s_root_record,
+#endif
+ checked_allocation_with_header(space, label, alloc_size),
+ sizeof(SharedAllocationHeader) + alloc_size, dealloc, label),
+ m_space(space) {
+ SharedAllocationHeader header;
+
+ fill_host_accessible_header_info(this, header, label);
+
+ typename MemorySpace::execution_space exec;
+ Kokkos::Impl::DeepCopy<MemorySpace, HostSpace>(
+ exec, SharedAllocationRecord<void, void>::m_alloc_ptr, &header,
+ sizeof(SharedAllocationHeader));
+ exec.fence(std::string("SharedAllocationRecord<Kokkos::") +
+ MemorySpace::name() +
+ "Space, void>::SharedAllocationRecord(): "
+ "fence after copying header from HostSpace");
+}
+
template <class MemorySpace>
auto SharedAllocationRecordCommon<MemorySpace>::allocate(
MemorySpace const& arg_space, std::string const& arg_label,
Kokkos::Impl::DeepCopy<MemorySpace, MemorySpace>(
r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
- Kokkos::fence(
- "SharedAllocationRecord<Kokkos::Experimental::HBWSpace, "
- "void>::reallocate_tracked(): fence after copying data");
+ Kokkos::fence(std::string("SharedAllocationRecord<") + MemorySpace::name() +
+ ", void>::reallocate_tracked(): fence after copying data");
+
+ record_base_t::increment(r_new);
+ record_base_t::decrement(r_old);
+
+ return r_new->data();
+}
+
+template <class MemorySpace>
+auto HostInaccessibleSharedAllocationRecordCommon<MemorySpace>::allocate(
+ MemorySpace const& arg_space, std::string const& arg_label,
+ size_t arg_alloc_size) -> derived_t* {
+ return new derived_t(arg_space, arg_label, arg_alloc_size);
+}
+
+template <class MemorySpace>
+void* HostInaccessibleSharedAllocationRecordCommon<
+ MemorySpace>::allocate_tracked(const MemorySpace& arg_space,
+ const std::string& arg_alloc_label,
+ size_t arg_alloc_size) {
+ if (!arg_alloc_size) return nullptr;
+
+ SharedAllocationRecord* const r =
+ allocate(arg_space, arg_alloc_label, arg_alloc_size);
+
+ record_base_t::increment(r);
+
+ return r->data();
+}
+
+template <class MemorySpace>
+void HostInaccessibleSharedAllocationRecordCommon<MemorySpace>::deallocate(
+ HostInaccessibleSharedAllocationRecordCommon::record_base_t* arg_rec) {
+ delete static_cast<derived_t*>(arg_rec);
+}
+
+template <class MemorySpace>
+void HostInaccessibleSharedAllocationRecordCommon<
+ MemorySpace>::deallocate_tracked(void* arg_alloc_ptr) {
+ if (arg_alloc_ptr != nullptr) {
+ SharedAllocationRecord* const r = derived_t::get_record(arg_alloc_ptr);
+ record_base_t::decrement(r);
+ }
+}
+
+template <class MemorySpace>
+void* HostInaccessibleSharedAllocationRecordCommon<
+ MemorySpace>::reallocate_tracked(void* arg_alloc_ptr,
+ size_t arg_alloc_size) {
+ derived_t* const r_old = derived_t::get_record(arg_alloc_ptr);
+ derived_t* const r_new =
+ allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
+
+ Kokkos::Impl::DeepCopy<MemorySpace, MemorySpace>(
+ r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
+ Kokkos::fence(std::string("SharedAllocationRecord<") + MemorySpace::name() +
+ ", void>::reallocate_tracked(): fence after copying data");
record_base_t::increment(r_new);
record_base_t::decrement(r_old);
return record_base_t::m_label;
}
-template <class MemorySpace>
-void SharedAllocationRecordCommon<MemorySpace>::
- _fill_host_accessible_header_info(SharedAllocationHeader& arg_header,
- std::string const& arg_label) {
- // Fill in the Header information, directly accessible on the host
-
- arg_header.m_record = &self();
-
- strncpy(arg_header.m_label, arg_label.c_str(),
- SharedAllocationHeader::maximum_label_length);
- // Set last element zero, in case c_str is too long
- arg_header.m_label[SharedAllocationHeader::maximum_label_length - 1] = '\0';
-}
-
template <class MemorySpace>
void SharedAllocationRecordCommon<MemorySpace>::print_records(
std::ostream& s, const MemorySpace&, bool detail) {
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_SIMPLETASKSCHEDULER_HPP
#define KOKKOS_SIMPLETASKSCHEDULER_HPP
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
} // namespace Kokkos
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//---------------------------------------------------------------------------#endif
///* #if defined( KOKKOS_ENABLE_TASKDAG ) */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_SINGLETASKQUEUE_HPP
#define KOKKOS_IMPL_SINGLETASKQUEUE_HPP
//----------------------------------------------------------------------------
// <editor-fold desc="Constructors, destructors, and assignment"> {{{2
- SingleTaskQueue() = delete;
- SingleTaskQueue(SingleTaskQueue const&) = delete;
- SingleTaskQueue(SingleTaskQueue&&) = delete;
+ SingleTaskQueue() = delete;
+ SingleTaskQueue(SingleTaskQueue const&) = delete;
+ SingleTaskQueue(SingleTaskQueue&&) = delete;
SingleTaskQueue& operator=(SingleTaskQueue const&) = delete;
- SingleTaskQueue& operator=(SingleTaskQueue&&) = delete;
+ SingleTaskQueue& operator=(SingleTaskQueue&&) = delete;
explicit SingleTaskQueue(typename base_t::execution_space const&,
typename base_t::memory_space const&,
}
KOKKOS_INLINE_FUNCTION
- constexpr team_scheduler_info_type initial_team_scheduler_info(int) const
- noexcept {
+ constexpr team_scheduler_info_type initial_team_scheduler_info(
+ int) const noexcept {
return {};
}
};
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
class Stacktrace {
public:
- Stacktrace() = delete;
- Stacktrace(const Stacktrace&) = delete;
+ Stacktrace() = delete;
+ Stacktrace(const Stacktrace&) = delete;
Stacktrace& operator=(const Stacktrace&) = delete;
Stacktrace(Stacktrace&&) = delete;
- Stacktrace& operator=(Stacktrace&&) = delete;
- ~Stacktrace() = delete;
+ Stacktrace& operator=(Stacktrace&&) = delete;
+ ~Stacktrace() = delete;
// These are public only to avoid wasting an extra stacktrace line.
// See save_stacktrace below.
const size_t end = find_first_whitespace(s, cur);
const bool last = (end == std::string::npos);
const size_t count = last ? end : size_t(end - cur);
- c(s.substr(cur, count), last);
+ c(s.substr(cur, count));
cur = find_first_non_whitespace(s, end);
}
}
struct main_column_info {
bool found_main;
size_t main_col;
- std::vector<size_t> main_col_lens;
};
main_column_info find_main_column(const std::vector<std::string>& traceback) {
size_t main_col = 0;
for (auto&& entry : traceback) {
size_t col_count = 0;
- for_each_token(entry, [&](const std::string& s, bool) {
+ for_each_token(entry, [&](const std::string& s) {
const size_t pos = s.find("main");
if (pos != std::string::npos) {
found_main = true;
}
}
- // Make another pass to get the column lengths.
- // Only demangle the column of functions.
- std::vector<size_t> max_col_lengths;
- for (auto&& entry : traceback) {
- size_t col_count = 0;
- for_each_token(entry, [&](const std::string& s, bool) {
- const size_t cur_col_len =
- (found_main && col_count == main_col) ? demangle(s).size() : s.size();
- ++col_count;
- if (max_col_lengths.size() < col_count) {
- max_col_lengths.push_back(cur_col_len);
- } else {
- const size_t old_max_len = max_col_lengths[col_count - 1];
- if (old_max_len < cur_col_len) {
- max_col_lengths[col_count - 1] = cur_col_len;
- }
- }
- });
- }
- return main_column_info{found_main, main_col, max_col_lengths};
+ return main_column_info{found_main, main_col};
}
-void demangle_and_print_traceback_entry(
- std::ostream& out, const std::string& traceback_entry,
- const bool found_main, const size_t main_col,
- const std::vector<size_t>& max_col_lens) {
+void demangle_and_print_traceback_entry(std::ostream& out,
+ const std::string& traceback_entry,
+ const bool found_main,
+ const size_t main_col) {
std::vector<std::string> tokens;
size_t cur_col = 0;
- for_each_token(traceback_entry, [&](const std::string& s, bool last) {
- const size_t old_width(out.width());
- out.width(max_col_lens[cur_col]);
- try {
- if (found_main && cur_col == main_col) {
- out << demangle(s);
- } else {
- out << s;
- }
- if (!last) {
- out << " ";
- }
- ++cur_col;
- } catch (...) {
- out.width(old_width);
- throw;
+
+ // Print the address column first
+ for_each_token(traceback_entry, [&](const std::string& s) {
+ if (!(found_main && cur_col == main_col)) {
+ out << s;
+ }
+ ++cur_col;
+ });
+
+ out << " ";
+
+ // Then the function name
+ cur_col = 0;
+ for_each_token(traceback_entry, [&](const std::string& s) {
+ if (found_main && cur_col == main_col) {
+ out << demangle(s);
}
- out.width(old_width);
+ ++cur_col;
});
}
const auto result = find_main_column(traceback);
for (auto&& entry : traceback) {
demangle_and_print_traceback_entry(out, entry, result.found_main,
- result.main_col, result.main_col_lens);
+ result.main_col);
out << std::endl;
}
}
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
#ifndef KOKKOS_STACKTRACE_HPP
#define KOKKOS_STACKTRACE_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_STRING_MANIPULATION_HPP
#define KOKKOS_STRING_MANIPULATION_HPP
template <class Unsigned>
KOKKOS_FUNCTION constexpr unsigned int to_chars_len(Unsigned val) {
unsigned int const base = 10;
- static_assert(std::is_integral<Unsigned>::value, "implementation bug");
- static_assert(std::is_unsigned<Unsigned>::value, "implementation bug");
+ static_assert(std::is_integral_v<Unsigned>, "implementation bug");
+ static_assert(std::is_unsigned_v<Unsigned>, "implementation bug");
unsigned int n = 1;
while (val >= base) {
val /= base;
KOKKOS_FUNCTION constexpr void to_chars_impl(char *first, unsigned int len,
Unsigned val) {
unsigned int const base = 10;
- static_assert(std::is_integral<Unsigned>::value, "implementation bug");
- static_assert(std::is_unsigned<Unsigned>::value, "implementation bug");
+ static_assert(std::is_integral_v<Unsigned>, "implementation bug");
+ static_assert(std::is_unsigned_v<Unsigned>, "implementation bug");
unsigned int pos = len - 1;
while (val > 0) {
auto const num = val % base;
if (value == 0) {
*first = '0';
return {first + 1, {}};
- } else if
-#ifdef KOKKOS_ENABLE_CXX17
- constexpr
-#endif
- (std::is_signed<Integral>::value) {
+ } else if constexpr (std::is_signed_v<Integral>) {
if (value < 0) {
*first++ = '-';
unsigned_val = Unsigned(~value) + Unsigned(1);
}
}
- unsigned int const len = to_chars_len(unsigned_val);
+ std::ptrdiff_t const len = to_chars_len(unsigned_val);
if (last - first < len) {
return {last, errc::value_too_large};
}
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
#define KOKKOS_IMPL_TASKBASE_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_TaskScheduler_fwd.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
int16_t m_task_type; ///< Type of task
int16_t m_priority; ///< Priority of runnable task
- TaskBase(TaskBase&&) = delete;
- TaskBase(const TaskBase&) = delete;
- TaskBase& operator=(TaskBase&&) = delete;
+ TaskBase(TaskBase&&) = delete;
+ TaskBase(const TaskBase&) = delete;
+ TaskBase& operator=(TaskBase&&) = delete;
TaskBase& operator=(const TaskBase&) = delete;
KOKKOS_DEFAULTED_FUNCTION ~TaskBase() = default;
// Assign dependence to m_next. It will be processed in the subsequent
// call to schedule. Error if the dependence is reset.
- if (lock != Kokkos::Impl::desul_atomic_exchange(
- &m_next, dep, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice())) {
+ if (lock != desul::atomic_exchange(&m_next, dep, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice())) {
Kokkos::abort("TaskScheduler ERROR: resetting task dependence");
}
if (nullptr != dep) {
// The future may be destroyed upon returning from this call
// so increment reference count to track this assignment.
- Kokkos::Impl::desul_atomic_inc(&(dep->m_ref_count),
- Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice());
+ desul::atomic_inc(&(dep->m_ref_count), desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
}
}
// the number of full task types that fit into a cache line. We'll leave it
// here for now, though, since we're probably going to be ripping all of the
// old TaskBase stuff out eventually anyway.
+#ifndef KOKKOS_IMPL_32BIT
constexpr size_t unpadded_task_base_size = 44 + 2 * sizeof(int16_t);
// don't forget padding:
constexpr size_t task_base_misalignment =
static_assert(sizeof(TaskBase) == expected_task_base_size,
"Verifying expected sizeof(TaskBase)");
-
+#endif
// </editor-fold> end Verify the size of TaskBase is as expected }}}2
//------------------------------------------------------------------------------
template <class Scheduler, typename ResultType, class FunctorType>
class Task : public TaskBase, public FunctorType {
public:
- Task() = delete;
- Task(Task&&) = delete;
- Task(const Task&) = delete;
- Task& operator=(Task&&) = delete;
+ Task() = delete;
+ Task(Task&&) = delete;
+ Task(const Task&) = delete;
+ Task& operator=(Task&&) = delete;
Task& operator=(const Task&) = delete;
using root_type = TaskBase;
} /* namespace Impl */
} /* namespace Kokkos */
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
#define KOKKOS_IMPL_TASKNODE_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_TaskScheduler_fwd.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
-#ifdef KOKKOS_COMPILER_PGI
-// Bizzarely, an extra jump instruction forces the PGI compiler to not have a
-// bug related to (probably?) empty base optimization and/or aggregate
-// construction. This must be defined out-of-line to generate a jump
-// jump instruction
-void _kokkos_pgi_compiler_bug_workaround();
-#endif
-
enum TaskType : int16_t {
TaskTeam = 0,
TaskSingle = 1,
public:
KOKKOS_INLINE_FUNCTION
-#ifndef KOKKOS_COMPILER_PGI
- constexpr
-#endif
- explicit ReferenceCountedBase(
- reference_count_size_type initial_reference_count)
+ constexpr explicit ReferenceCountedBase(
+ reference_count_size_type initial_reference_count)
: m_ref_count(initial_reference_count) {
// This can't be here because it breaks constexpr
// KOKKOS_EXPECTS(initial_reference_count > 0);
-#ifdef KOKKOS_COMPILER_PGI
- Impl::_kokkos_pgi_compiler_bug_workaround();
-#endif
}
/** Decrement the reference count,
KOKKOS_INLINE_FUNCTION
void increment_reference_count() {
- Kokkos::Impl::desul_atomic_inc(&m_ref_count,
- Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice());
+ desul::atomic_inc(&m_ref_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
}
};
m_priority(static_cast<priority_type>(priority)),
m_is_respawning(false) {}
- TaskNode() = delete;
- TaskNode(TaskNode const&) = delete;
- TaskNode(TaskNode&&) = delete;
+ TaskNode() = delete;
+ TaskNode(TaskNode const&) = delete;
+ TaskNode(TaskNode&&) = delete;
TaskNode& operator=(TaskNode const&) = delete;
- TaskNode& operator=(TaskNode&&) = delete;
+ TaskNode& operator=(TaskNode&&) = delete;
KOKKOS_INLINE_FUNCTION
bool is_aggregate() const noexcept {
} /* namespace Kokkos */
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_TASKPOLICYDATA_HPP
#define KOKKOS_IMPL_TASKPOLICYDATA_HPP
//----------------------------------------------------------------------------
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_Core_fwd.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
KOKKOS_INLINE_FUNCTION
static constexpr bool has_predecessor() noexcept {
- return !std::is_same<PredecessorFuture, std::nullptr_t>::value;
+ return !std::is_same_v<PredecessorFuture, std::nullptr_t>;
}
KOKKOS_INLINE_FUNCTION
} // namespace Impl
} // namespace Kokkos
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
#define KOKKOS_IMPL_TASKQUEUE_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_TaskScheduler_fwd.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
//----------------------------------------
~TaskQueue();
- TaskQueue() = delete;
- TaskQueue(TaskQueue&&) = delete;
- TaskQueue(TaskQueue const&) = delete;
- TaskQueue& operator=(TaskQueue&&) = delete;
+ TaskQueue() = delete;
+ TaskQueue(TaskQueue&&) = delete;
+ TaskQueue(TaskQueue const&) = delete;
+ TaskQueue& operator=(TaskQueue&&) = delete;
TaskQueue& operator=(TaskQueue const&) = delete;
TaskQueue(const memory_pool& arg_memory_pool);
task_root_type* const rhs) {
if (*lhs) decrement(*lhs);
if (rhs) {
- Kokkos::Impl::desul_atomic_inc(&rhs->m_ref_count,
- Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice());
+ desul::atomic_inc(&rhs->m_ref_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
}
// Force write of *lhs
} /* namespace Impl */
} /* namespace Kokkos */
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_TASKQUEUECOMMON_HPP
#define KOKKOS_IMPL_TASKQUEUECOMMON_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_TaskScheduler_fwd.hpp>
KOKKOS_INLINE_FUNCTION
void _increment_ready_count() {
// TODO @tasking @memory_order DSH memory order
- Kokkos::Impl::desul_atomic_inc(&this->m_ready_count,
- Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice());
+ desul::atomic_inc(&this->m_ready_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
}
KOKKOS_INLINE_FUNCTION
void _decrement_ready_count() {
// TODO @tasking @memory_order DSH memory order
- Kokkos::Impl::desul_atomic_dec(&this->m_ready_count,
- Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice());
+ desul::atomic_dec(&this->m_ready_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
}
public:
KOKKOS_INLINE_FUNCTION
bool is_done() const noexcept {
- // TODO @tasking @memory_order DSH Memory order, instead of volatile
- return (*(volatile int*)(&m_ready_count)) == 0;
+ return desul::atomic_load(&m_ready_count, desul::MemoryOrderAcquire(),
+ desul::MemoryScopeDevice()) == 0;
}
KOKKOS_INLINE_FUNCTION
int32_t ready_count() const noexcept {
- // TODO @tasking @memory_order DSH Memory order, instead of volatile
- return (*(volatile int*)(&m_ready_count));
+ return desul::atomic_load(&m_ready_count, desul::MemoryOrderAcquire(),
+ desul::MemoryScopeDevice());
}
template <class TaskQueueTraits, class TeamSchedulerInfo>
// && Same<MemoryPool, typename Derived::memory_pool>
{
static_assert(
- std::is_same<ExecutionSpace,
- typename Derived::execution_space>::value &&
- std::is_same<MemorySpace, typename Derived::memory_space>::value &&
- std::is_same<MemoryPool, typename Derived::memory_pool>::value,
+ std::is_same_v<ExecutionSpace, typename Derived::execution_space> &&
+ std::is_same_v<MemorySpace, typename Derived::memory_space> &&
+ std::is_same_v<MemoryPool, typename Derived::memory_pool>,
"Type mismatch in task_queue_allocation_size customization point");
return sizeof(Derived);
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_TASKQUEUEMEMORYMANAGER_HPP
#define KOKKOS_IMPL_TASKQUEUEMEMORYMANAGER_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_TaskScheduler_fwd.hpp>
} else {
void* data = m_pool.allocate(static_cast<size_t>(requested_size));
- Kokkos::Impl::desul_atomic_inc(
- &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO? memory_order_relaxed
+ desul::atomic_inc(
+ &m_count_alloc, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
// TODO @tasking @minor DSH make this thread safe? (otherwise, it's just
// an approximation, which is probably fine...)
if (m_max_alloc < m_count_alloc) m_max_alloc = m_count_alloc;
KOKKOS_INLINE_FUNCTION void deallocate(
PoolAllocatedObjectBase<CountType>&& obj) {
m_pool.deallocate((void*)&obj, 1);
- Kokkos::Impl::desul_atomic_dec(
- &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO? memory_order_relaxed
+ desul::atomic_dec(
+ &m_count_alloc, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
}
KOKKOS_INLINE_FUNCTION
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
#define KOKKOS_IMPL_TASKQUEUEMULTIPLE_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_TaskScheduler_fwd.hpp>
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+// We allow using deprecated classes in this file
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_PUSH()
+#endif
+
namespace Kokkos {
namespace Impl {
for (int iteam = 0; iteam < m_other_queues->size(); ++iteam) {
if (iteam == m_league_rank) continue;
auto& steal_from = get_team_queue(iteam);
- if (*((volatile int*)&steal_from.m_ready_count) > 0) {
+ if (desul::atomic_load(&steal_from.m_ready_count,
+ desul::MemoryOrderAcquire(),
+ desul::MemoryScopeDevice()) > 0) {
// we've found at least one queue that's not done, so even if we
// can't pop something off of it we shouldn't return a nullptr
// indicating completion. rv will be end_tag when the pop fails
// task stolen.
// first increment our ready count, then decrement the ready count
// on the other queue:
- Kokkos::Impl::desul_atomic_inc(
- &this->m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO?
- // memory_order_relaxed
- Kokkos::Impl::desul_atomic_dec(
- &steal_from.m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO?
- // memory_order_relaxed
+ desul::atomic_inc(
+ &this->m_ready_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
+ desul::atomic_dec(
+ &steal_from.m_ready_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
return rv;
}
}
int m_size = static_cast<int>(KOKKOS_INVALID_INDEX);
public:
- LeagueQueueCollection() = delete;
- LeagueQueueCollection(LeagueQueueCollection const&) = delete;
- LeagueQueueCollection(LeagueQueueCollection&&) = delete;
+ LeagueQueueCollection() = delete;
+ LeagueQueueCollection(LeagueQueueCollection const&) = delete;
+ LeagueQueueCollection(LeagueQueueCollection&&) = delete;
LeagueQueueCollection& operator=(LeagueQueueCollection const&) = delete;
- LeagueQueueCollection& operator=(LeagueQueueCollection&&) = delete;
+ LeagueQueueCollection& operator=(LeagueQueueCollection&&) = delete;
~LeagueQueueCollection() {
// destroy only the initialized queues that we own
} /* namespace Impl */
} /* namespace Kokkos */
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+KOKKOS_IMPL_DISABLE_DEPRECATED_WARNINGS_POP()
+#endif
+
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP
+#define KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <impl/Kokkos_TaskQueueMultiple.hpp>
+
+#define KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING_MULTIPLE 0
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecSpace, class MemorySpace>
+void TaskQueueMultiple<ExecSpace,
+ MemorySpace>::Destroy::destroy_shared_allocation() {
+ m_queue->get_team_queue(0).~TaskQueueMultiple();
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP */
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_TASKQUEUE_IMPL_HPP
#define KOKKOS_IMPL_TASKQUEUE_IMPL_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#define KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING 0
void *const p = m_memory.allocate(n);
if (p) {
- Kokkos::Impl::desul_atomic_inc(
- &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO? memory_order_relaxed
+ desul::atomic_inc(
+ &m_count_alloc, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
// if ( m_max_alloc < m_count_alloc ) m_max_alloc = m_count_alloc ;
}
KOKKOS_FUNCTION void TaskQueue<ExecSpace, MemorySpace>::deallocate(void *p,
size_t n) {
m_memory.deallocate(p, n);
- Kokkos::Impl::desul_atomic_dec(
- &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO? memory_order_relaxed
+ desul::atomic_dec(&m_count_alloc, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
}
//----------------------------------------------------------------------------
// *queue = task;
// }
// old_head = *queue;
- old_head = Kokkos::Impl::desul_atomic_compare_exchange(
+ old_head = desul::atomic_compare_exchange(
const_cast<task_root_type **>(queue), old_head, task,
- Kokkos::Impl::MemoryOrderSeqCst(), Kokkos::Impl::MemoryScopeDevice());
+ desul::MemoryOrderSeqCst(), desul::MemoryScopeDevice());
if (old_head_tmp == old_head) return true;
}
task_root_type *const x = task;
// task = Kokkos::atomic_compare_exchange(queue, x, lock);
- task = Kokkos::Impl::desul_atomic_compare_exchange(
- const_cast<task_root_type **>(queue), x, lock,
- Kokkos::Impl::MemoryOrderSeqCst(), Kokkos::Impl::MemoryScopeDevice());
+ task = desul::atomic_compare_exchange(const_cast<task_root_type **>(queue),
+ x, lock, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice());
if (x == task) {
// CAS succeeded and queue is locked
// to track number of ready + executing tasks.
// The ready count will be decremented when the task is complete.
- Kokkos::Impl::desul_atomic_inc(
- &m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO? memory_order_relaxed
+ desul::atomic_inc(
+ &m_ready_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
task_root_type *volatile *const ready_queue =
&m_ready[t.m_priority][t.m_task_type];
task_root_type *const zero = nullptr;
task_root_type *const lock = (task_root_type *)task_root_type::LockTag;
- if (lock != Kokkos::Impl::desul_atomic_exchange(
- &task->m_next, zero, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice())) {
+ if (lock != desul::atomic_exchange(&task->m_next, zero,
+ desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice())) {
Kokkos::abort("TaskScheduler::respawn ERROR: already respawned");
}
}
// Stop other tasks from adding themselves to this task's wait queue
// by locking the head of this task's wait queue.
- task_root_type *x = Kokkos::Impl::desul_atomic_exchange(
+ task_root_type *x = desul::atomic_exchange(
const_cast<task_root_type **>(&t.m_wait), lock,
- Kokkos::Impl::MemoryOrderSeqCst(), Kokkos::Impl::MemoryScopeDevice());
+ desul::MemoryOrderSeqCst(), desul::MemoryScopeDevice());
if (x != (task_root_type *)lock) {
// This thread has transitioned this 'task' to complete.
// A runnable task was popped from a ready queue and executed.
// If respawned into a ready queue then the ready count was incremented
// so decrement whether respawned or not.
- Kokkos::Impl::desul_atomic_dec(
- &m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
- Kokkos::Impl::MemoryScopeDevice()); // TODO? memory_order_relaxed
+ desul::atomic_dec(
+ &m_ready_count, desul::MemoryOrderSeqCst(),
+ desul::MemoryScopeDevice()); // TODO? memory_order_relaxed
}
}
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
// Experimental unified task-data parallel manycore LDRD
#define KOKKOS_IMPL_TASKRESULT_HPP
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_TaskScheduler_fwd.hpp>
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_TASKTEAMMEMBER_HPP
#define KOKKOS_TASKTEAMMEMBER_HPP
//----------------------------------------------------------------------------
#include <Kokkos_Macros.hpp>
+
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#error "The tasking framework is deprecated"
+#endif
+
#if defined(KOKKOS_ENABLE_TASKDAG)
#include <Kokkos_Core_fwd.hpp>
// type that we're adapting
template <typename... Args>
KOKKOS_INLINE_FUNCTION explicit TaskTeamMemberAdapter(
- std::enable_if_t<std::is_constructible<TeamMember, Args...>::value,
- Scheduler>
+ std::enable_if_t<std::is_constructible_v<TeamMember, Args...>, Scheduler>
arg_scheduler,
Args&&... args) // TODO @tasking @minor DSH noexcept specification
: TeamMember(std::forward<Args>(args)...),
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+static_assert(false,
+ "Including non-public Kokkos header files is not allowed.");
+#endif
+
+#ifndef KOKKOS_IMPL_TEAMMDPOLICY_HPP
+#define KOKKOS_IMPL_TEAMMDPOLICY_HPP
+
+namespace Kokkos {
+
+namespace Impl {
+
+// Tag class to choose the nested loop specialization
+// - LastNestLevel means call the actual closure
+// - ParThread means use TeamThreadRange
+// - ParVector means use ThreadVectorRange
+template <TeamMDRangeLastNestLevel LastNestLevel,
+ TeamMDRangeParThread ParThread, TeamMDRangeParVector ParVector>
+struct TeamMDRangeMode {
+ static constexpr TeamMDRangeLastNestLevel last_nest_level = LastNestLevel;
+ static constexpr TeamMDRangeParThread par_thread = ParThread;
+ static constexpr TeamMDRangeParVector par_vector = ParVector;
+};
+
+// Tag class to keep track of the loop nest level and where to deploy thread and
+// vector parallelism
+// - Rank is Kokkos::Rank<TotalNestLevel, Iter>
+// - total_nest_level is the total number of loop nests
+// - iter is whether to go forward or backward through ranks (i.e. the
+// iteration order for MDRangePolicy)
+// - ParThreadNestLevel is the nesting level on which to deploy thread
+// parallelism
+// - ParVectorNestLevel is the nesting level on which to deploy vector
+// parallelism
+// - CurrentNestLevel is the nest level of interest
+template <typename Rank, int ParThreadNestLevel, int ParVectorNestLevel,
+ int CurrentNestLevel>
+struct TeamMDRangeNestingTracker {
+ using NestLevelType = int;
+ static constexpr Iterate iter = Rank::outer_direction;
+ static constexpr NestLevelType total_nest_level = Rank::rank;
+ static constexpr NestLevelType par_thread_nest_level = ParThreadNestLevel;
+ static constexpr NestLevelType par_vector_nest_level = ParVectorNestLevel;
+ static constexpr NestLevelType current_nest_level = CurrentNestLevel;
+
+ // We have to recursively process ranks [0..total_nest_level-1]
+ using RangeMode =
+ TeamMDRangeMode<(iter == Iterate::Right)
+ ? static_cast<TeamMDRangeLastNestLevel>(
+ current_nest_level == total_nest_level)
+ : static_cast<TeamMDRangeLastNestLevel>(
+ current_nest_level == -1),
+ static_cast<TeamMDRangeParThread>(current_nest_level ==
+ par_thread_nest_level),
+ static_cast<TeamMDRangeParVector>(current_nest_level ==
+ par_vector_nest_level)>;
+};
+
+// Structs to determine on which nested level parallelization happens.
+// - Rank is Kokkos::Rank<TotalNestLevel, Iter>
+// - TotalNestLevel is the total number of loop nests
+// - Iter is whether to go forward or backward through ranks (i.e. the
+// iteration order for MDRangePolicy)
+// - ThreadAndVector determines whether both vector and thread parallelism is
+// in use
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct HostBasedNestLevel {
+ static constexpr bool is_direction_left =
+ (Rank::outer_direction == Iterate::Left);
+ static constexpr int par_rt = is_direction_left ? Rank::rank - 1 : 0;
+ static constexpr int par_rv = is_direction_left ? 0 : Rank::rank - 1;
+ static constexpr int invalid = -2;
+};
+
+template <typename Rank, TeamMDRangeThreadAndVector ThreadAndVector>
+struct AcceleratorBasedNestLevel {
+ static constexpr bool is_direction_left =
+ (Rank::outer_direction == Iterate::Left);
+
+ // If vector parallelism is in use, deploy thread parallelism on
+ // the second to the last nested level; otherwise, thread parallelism on the
+ // last nested level
+ static constexpr int left_par_rt =
+ (ThreadAndVector == TeamMDRangeThreadAndVector::Both) ? 1 : 0;
+
+ static constexpr int right_par_rt =
+ (ThreadAndVector == TeamMDRangeThreadAndVector::Both) ? Rank::rank - 2
+ : Rank::rank - 1;
+
+ static constexpr int par_rt = is_direction_left ? left_par_rt : right_par_rt;
+
+ // Vector parallelism will always be on the last index
+ static constexpr int par_rv = is_direction_left ? 0 : Rank::rank - 1;
+ static constexpr int invalid = -2;
+};
+
+template <typename TeamHandle>
+KOKKOS_INLINE_FUNCTION auto nested_policy(
+ TeamMDRangeMode<TeamMDRangeLastNestLevel::NotLastNestLevel,
+ TeamMDRangeParThread::ParThread,
+ TeamMDRangeParVector::NotParVector>,
+ TeamHandle const team, int count) {
+ return TeamThreadRange(team, count);
+}
+
+template <typename TeamHandle>
+KOKKOS_INLINE_FUNCTION auto nested_policy(
+ TeamMDRangeMode<TeamMDRangeLastNestLevel::NotLastNestLevel,
+ TeamMDRangeParThread::NotParThread,
+ TeamMDRangeParVector::ParVector>,
+ TeamHandle const team, int count) {
+ return ThreadVectorRange(team, count);
+}
+
+template <typename TeamHandle>
+KOKKOS_INLINE_FUNCTION auto nested_policy(
+ TeamMDRangeMode<TeamMDRangeLastNestLevel::NotLastNestLevel,
+ TeamMDRangeParThread::ParThread,
+ TeamMDRangeParVector::ParVector>,
+ TeamHandle const team, int count) {
+ return TeamVectorRange(team, count);
+}
+
+// TeamMDRangeNestingTracker is only needed to deduce template parameters
+template <typename Rank, int ParThreadNestLevel, int ParVectorNestLevel,
+ int CurrentNestLevel, typename Policy, typename Lambda,
+ typename... Args>
+KOKKOS_INLINE_FUNCTION void nested_loop(
+ TeamMDRangeMode<TeamMDRangeLastNestLevel::LastNestLevel,
+ TeamMDRangeParThread::NotParThread,
+ TeamMDRangeParVector::NotParVector> const,
+ TeamMDRangeNestingTracker<Rank, ParThreadNestLevel, ParVectorNestLevel,
+ CurrentNestLevel>,
+ Policy const&, Lambda const& lambda, Impl::NoReductionTag&&, Args... args) {
+ lambda(args...);
+}
+
+template <typename Rank, int ParThreadNestLevel, int ParVectorNestLevel,
+ int CurrentNestLevel, typename Policy, typename Lambda,
+ typename ReducerValueType, typename... Args>
+KOKKOS_INLINE_FUNCTION void nested_loop(
+ TeamMDRangeMode<TeamMDRangeLastNestLevel::LastNestLevel,
+ TeamMDRangeParThread::NotParThread,
+ TeamMDRangeParVector::NotParVector> const,
+ TeamMDRangeNestingTracker<Rank, ParThreadNestLevel, ParVectorNestLevel,
+ CurrentNestLevel>,
+ Policy const&, Lambda const& lambda, ReducerValueType& val, Args... args) {
+ lambda(args..., val);
+}
+
+// Nested loop for serial iteration
+template <typename Rank, int ParThreadNestLevel, int ParVectorNestLevel,
+ int CurrentNestLevel, typename Policy, typename Lambda,
+ typename ReducerValueType, typename... Args>
+KOKKOS_INLINE_FUNCTION void nested_loop(
+ TeamMDRangeMode<TeamMDRangeLastNestLevel::NotLastNestLevel,
+ TeamMDRangeParThread::NotParThread,
+ TeamMDRangeParVector::NotParVector> const,
+ TeamMDRangeNestingTracker<Rank, ParThreadNestLevel, ParVectorNestLevel,
+ CurrentNestLevel>,
+ Policy const& policy, Lambda const& lambda, ReducerValueType&& val,
+ Args... args) {
+ constexpr int next_nest_level =
+ CurrentNestLevel + (Rank::outer_direction == Iterate::Right ? 1 : -1);
+ using NextNestingTracker =
+ TeamMDRangeNestingTracker<Rank, ParThreadNestLevel, ParVectorNestLevel,
+ next_nest_level>;
+ using TeamMDNextMode = typename NextNestingTracker::RangeMode;
+
+ for (int i = 0; i != policy.boundaries[CurrentNestLevel]; ++i) {
+ if constexpr (Rank::outer_direction == Iterate::Right) {
+ nested_loop(TeamMDNextMode(), NextNestingTracker(), policy, lambda,
+ std::forward<ReducerValueType>(val), args..., i);
+ } else {
+ nested_loop(TeamMDNextMode(), NextNestingTracker(), policy, lambda,
+ std::forward<ReducerValueType>(val), i, args...);
+ }
+ }
+}
+
+template <TeamMDRangeParThread ParThread, TeamMDRangeParVector ParVector,
+ typename Rank, int ParThreadNestLevel, int ParVectorNestLevel,
+ int CurrentNestLevel, typename Policy, typename Lambda,
+ typename ReducerValueType, typename... Args>
+KOKKOS_INLINE_FUNCTION void nested_loop(
+ TeamMDRangeMode<TeamMDRangeLastNestLevel::NotLastNestLevel, ParThread,
+ ParVector> const mode,
+ TeamMDRangeNestingTracker<Rank, ParThreadNestLevel, ParVectorNestLevel,
+ CurrentNestLevel>,
+ Policy const& policy, Lambda const& lambda, ReducerValueType&& val,
+ Args... args) {
+ constexpr int next_nest_level =
+ CurrentNestLevel + (Rank::outer_direction == Iterate::Right ? 1 : -1);
+ using NextNestingTracker =
+ TeamMDRangeNestingTracker<Rank, ParThreadNestLevel, ParVectorNestLevel,
+ next_nest_level>;
+ using TeamMDNextMode = typename NextNestingTracker::RangeMode;
+
+ // This recursively processes ranks from [0..TotalNestLevel-1]
+ // args... is passed by value because it should always be ints
+ parallel_for(
+ nested_policy(mode, policy.team, policy.boundaries[CurrentNestLevel]),
+ [&](int const& i) {
+ if constexpr (Rank::outer_direction == Iterate::Right) {
+ nested_loop(TeamMDNextMode(), NextNestingTracker(), policy, lambda,
+ std::forward<ReducerValueType>(val), args..., i);
+ } else {
+ nested_loop(TeamMDNextMode(), NextNestingTracker(), policy, lambda,
+ std::forward<ReducerValueType>(val), i, args...);
+ }
+ });
+}
+
+template <typename Rank, typename TeamMDPolicy, typename Lambda,
+ typename ReductionValueType>
+KOKKOS_INLINE_FUNCTION void md_parallel_impl(TeamMDPolicy const& policy,
+ Lambda const& lambda,
+ ReductionValueType&& val) {
+ static_assert(TeamMDPolicy::total_nest_level >= 2 &&
+ TeamMDPolicy::total_nest_level <= 8);
+
+ using TeamHandle = typename TeamMDPolicy::TeamHandleType;
+
+ constexpr auto total_nest_level = TeamMDPolicy::total_nest_level;
+ constexpr auto iter = TeamMDPolicy::iter;
+ constexpr auto thread_and_vector =
+ ((TeamMDPolicy::par_thread == Impl::TeamMDRangeParThread::ParThread) &&
+ (TeamMDPolicy::par_vector == Impl::TeamMDRangeParVector::ParVector))
+ ? Impl::TeamMDRangeThreadAndVector::Both
+ : Impl::TeamMDRangeThreadAndVector::NotBoth;
+ constexpr auto begin_rank =
+ (iter == Iterate::Right) ? 0 : (total_nest_level - 1);
+
+ using ThreadAndVectorNestLevel =
+ Impl::ThreadAndVectorNestLevel<Rank, typename TeamHandle::execution_space,
+ thread_and_vector>;
+
+ constexpr auto par_thread_nest_level =
+ (TeamMDPolicy::par_thread == TeamMDRangeParThread::ParThread)
+ ? ThreadAndVectorNestLevel::par_rt
+ : ThreadAndVectorNestLevel::invalid;
+ constexpr auto par_vector_nest_level =
+ (TeamMDPolicy::par_vector == TeamMDRangeParVector::ParVector)
+ ? ThreadAndVectorNestLevel::par_rv
+ : ThreadAndVectorNestLevel::invalid;
+
+ using InitNestingTracker =
+ TeamMDRangeNestingTracker<Rank, par_thread_nest_level,
+ par_vector_nest_level, begin_rank>;
+
+ using InitTeamMDMode = typename InitNestingTracker::RangeMode;
+
+ nested_loop(InitTeamMDMode(), InitNestingTracker(), policy, lambda,
+ std::forward<ReductionValueType>(val));
+}
+
+} // namespace Impl
+
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+/**
+ * Header file to include all of Kokkos Tooling support
+ */
+
+#ifndef KOKKOS_IMPL_KOKKOS_TOOLS_HPP
+#define KOKKOS_IMPL_KOKKOS_TOOLS_HPP
+
+#include <impl/Kokkos_Profiling.hpp>
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_KOKKOS_TOOLS_GENERIC_HPP
#define KOKKOS_IMPL_KOKKOS_TOOLS_GENERIC_HPP
#include <impl/Kokkos_Profiling.hpp>
+#include <impl/Kokkos_FunctorAnalysis.hpp>
#include <Kokkos_Core_fwd.hpp>
#include <Kokkos_ExecPolicy.hpp>
static std::map<std::string, Kokkos::Tools::Experimental::TeamSizeTuner>
team_tuners;
+static std::map<std::string,
+ Kokkos::Tools::Experimental::RangePolicyOccupancyTuner>
+ range_policy_tuners;
+
template <int Rank>
using MDRangeTuningMap =
std::map<std::string, Kokkos::Tools::Experimental::MDRangeTuner<Rank>>;
// For any policies without a tuning implementation, with a reducer
template <class ReducerType, class ExecPolicy, class Functor, typename TagType>
-void tune_policy(const size_t, const std::string&, ExecPolicy&, const Functor&,
- TagType) {}
+auto tune_policy(const size_t, const std::string&, const ExecPolicy& policy,
+ const Functor&, TagType) {
+ return policy;
+}
// For any policies without a tuning implementation, without a reducer
template <class ExecPolicy, class Functor, typename TagType>
-void tune_policy(const size_t, const std::string&, ExecPolicy&, const Functor&,
- const TagType&) {}
+auto tune_policy(const size_t, const std::string&, const ExecPolicy& policy,
+ const Functor&, const TagType&) {
+ return policy;
+}
/**
* Tuning for parallel_fors and parallel_scans is a fairly simple process.
auto max = policy.team_size_max(functor, tag);
return max;
}
+ template <typename Policy, typename FunctorReducer>
+ int get_max_team_size(const Policy& policy,
+ const FunctorReducer& functor_reducer,
+ const Kokkos::ParallelReduceTag tag) {
+ auto max = policy.team_size_max(functor_reducer.get_functor(),
+ functor_reducer.get_reducer(), tag);
+ return max;
+ }
template <typename Policy, typename Functor, typename Tag>
int get_recommended_team_size(const Policy& policy, const Functor& functor,
const Tag tag) {
using driver = Kokkos::Impl::ParallelFor<Functor, Policy, exec_space>;
return driver::max_tile_size_product(policy, functor);
}
- template <typename Policy, typename Functor>
+ template <typename Policy, typename FunctorReducer>
int get_mdrange_max_tile_size_product(const Policy& policy,
- const Functor& functor,
+ const FunctorReducer& functor_reducer,
const Kokkos::ParallelReduceTag&) {
using exec_space = typename Policy::execution_space;
using driver =
- Kokkos::Impl::ParallelReduce<Functor, Policy, Kokkos::InvalidType,
- exec_space>;
- return driver::max_tile_size_product(policy, functor);
+ Kokkos::Impl::ParallelReduce<FunctorReducer, Policy, exec_space>;
+ return driver::max_tile_size_product(policy, functor_reducer.get_functor());
}
};
// constructible from a reference to an
// instance of their value_type so we construct
// a value_type and temporary reducer here
-template <typename ReducerType>
struct ComplexReducerSizeCalculator {
- template <typename Policy, typename Functor, typename Tag>
- int get_max_team_size(const Policy& policy, const Functor& functor,
- const Tag tag) {
- using value_type = typename ReducerType::value_type;
- value_type value;
- ReducerType reducer_example = ReducerType(value);
- return policy.team_size_max(functor, reducer_example, tag);
+ template <typename Policy, typename FunctorReducer, typename Tag>
+ int get_max_team_size(const Policy& policy,
+ const FunctorReducer& functor_reducer, const Tag tag) {
+ return policy.team_size_max(functor_reducer.get_functor(),
+ functor_reducer.get_reducer(), tag);
}
- template <typename Policy, typename Functor, typename Tag>
- int get_recommended_team_size(const Policy& policy, const Functor& functor,
+ template <typename Policy, typename FunctorReducer, typename Tag>
+ int get_recommended_team_size(const Policy& policy,
+ const FunctorReducer& functor_reducer,
const Tag tag) {
- using value_type = typename ReducerType::value_type;
- value_type value;
- ReducerType reducer_example = ReducerType(value);
- return policy.team_size_recommended(functor, reducer_example, tag);
+ return policy.team_size_recommended(functor_reducer.get_functor(),
+ functor_reducer.get_reducer(), tag);
}
- template <typename Policy, typename Functor>
+ template <typename Policy, typename FunctorReducer>
int get_mdrange_max_tile_size_product(const Policy& policy,
- const Functor& functor,
+ const FunctorReducer& functor_reducer,
const Kokkos::ParallelReduceTag&) {
using exec_space = typename Policy::execution_space;
using driver =
- Kokkos::Impl::ParallelReduce<Functor, Policy, ReducerType, exec_space>;
- return driver::max_tile_size_product(policy, functor);
+ Kokkos::Impl::ParallelReduce<FunctorReducer, Policy, exec_space>;
+ return driver::max_tile_size_product(policy, functor_reducer.get_functor());
}
};
+template <typename Policy>
+auto default_tuned_version_of(const Policy& policy) {
+ return policy;
+}
+
} // namespace Impl
template <class Tuner, class Functor, class TagType,
class TuningPermissionFunctor, class Map, class Policy>
-void generic_tune_policy(const std::string& label_in, Map& map, Policy& policy,
- const Functor& functor, const TagType& tag,
+auto generic_tune_policy(const std::string& label_in, Map& map,
+ const Policy& policy, const Functor& functor,
+ const TagType& tag,
const TuningPermissionFunctor& should_tune) {
if (should_tune(policy)) {
std::string label = label_in;
}
return my_tuner;
}();
- tuner_iter->second.tune(policy);
+ return tuner_iter->second.tune(policy);
}
+ return Impl::default_tuned_version_of(policy);
}
template <class Tuner, class ReducerType, class Functor, class TagType,
class TuningPermissionFunctor, class Map, class Policy>
-void generic_tune_policy(const std::string& label_in, Map& map, Policy& policy,
- const Functor& functor, const TagType& tag,
+auto generic_tune_policy(const std::string& label_in, Map& map,
+ const Policy& policy, const Functor& functor,
+ const TagType& tag,
const TuningPermissionFunctor& should_tune) {
if (should_tune(policy)) {
std::string label = label_in;
auto tuner_iter = [&]() {
auto my_tuner = map.find(label);
if (my_tuner == map.end()) {
- return (map.emplace(
- label,
- Tuner(label, policy, functor, tag,
- Impl::ComplexReducerSizeCalculator<ReducerType>{}))
+ return (map.emplace(label, Tuner(label, policy, functor, tag,
+ Impl::ComplexReducerSizeCalculator{}))
.first);
}
return my_tuner;
}();
- tuner_iter->second.tune(policy);
+ return tuner_iter->second.tune(policy);
}
+ return Impl::default_tuned_version_of(policy);
}
// tune a TeamPolicy, without reducer
template <class Functor, class TagType, class... Properties>
-void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
- Kokkos::TeamPolicy<Properties...>& policy,
+auto tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+ const Kokkos::TeamPolicy<Properties...>& policy,
const Functor& functor, const TagType& tag) {
- generic_tune_policy<Experimental::TeamSizeTuner>(
+ return generic_tune_policy<Experimental::TeamSizeTuner>(
label_in, team_tuners, policy, functor, tag,
[](const Kokkos::TeamPolicy<Properties...>& candidate_policy) {
return (candidate_policy.impl_auto_team_size() ||
// tune a TeamPolicy, with reducer
template <class ReducerType, class Functor, class TagType, class... Properties>
-void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
- Kokkos::TeamPolicy<Properties...>& policy,
+auto tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+ const Kokkos::TeamPolicy<Properties...>& policy,
const Functor& functor, const TagType& tag) {
- generic_tune_policy<Experimental::TeamSizeTuner, ReducerType>(
+ return generic_tune_policy<Experimental::TeamSizeTuner, ReducerType>(
label_in, team_tuners, policy, functor, tag,
[](const Kokkos::TeamPolicy<Properties...>& candidate_policy) {
return (candidate_policy.impl_auto_team_size() ||
});
}
+template <class Functor, class TagType, class... Properties>
+auto tune_occupancy_controlled_policy(
+ const size_t /**tuning_context*/, const std::string& label_in,
+ const Kokkos::RangePolicy<Properties...>& policy, const Functor& functor,
+ const TagType& tag) {
+ return generic_tune_policy<Experimental::RangePolicyOccupancyTuner>(
+ label_in, range_policy_tuners, policy, functor, tag,
+ [](const Kokkos::RangePolicy<Properties...>& candidate_policy) {
+ return candidate_policy.impl_get_occupancy_control().should_tune();
+ });
+}
+template <class Functor, class TagType, class... Properties>
+auto tune_range_policy(const size_t tuning_context, const std::string& label_in,
+ const Kokkos::RangePolicy<Properties...>& policy,
+ const Functor& functor, const TagType& tag,
+ std::true_type) {
+ return tune_occupancy_controlled_policy(tuning_context, label_in, policy,
+ functor, tag);
+}
+template <class Functor, class TagType, class... Properties>
+auto tune_range_policy(const size_t /**tuning_context*/,
+ const std::string& /*label_in*/,
+ const Kokkos::RangePolicy<Properties...>& policy,
+ const Functor& /**functor*/, const TagType& /**tag*/,
+ std::false_type) {
+ return policy;
+}
+
+// Reducer versions
+template <class RT, class Functor, class TagType, class... Properties>
+auto tune_occupancy_controlled_policy(
+ const size_t /**tuning_context*/, const std::string& label_in,
+ const Kokkos::RangePolicy<Properties...>& policy, const Functor& functor,
+ const TagType& tag) {
+ return generic_tune_policy<Experimental::RangePolicyOccupancyTuner>(
+ label_in, range_policy_tuners, policy, functor, tag,
+ [](const Kokkos::RangePolicy<Properties...>& candidate_policy) {
+ return candidate_policy.impl_get_occupancy_control().should_tune();
+ });
+}
+template <class RT, class Functor, class TagType, class... Properties>
+auto tune_range_policy(const size_t tuning_context, const std::string& label_in,
+ const Kokkos::RangePolicy<Properties...>& policy,
+ const Functor& functor, const TagType& tag,
+ std::true_type) {
+ return tune_occupancy_controlled_policy<RT>(tuning_context, label_in, policy,
+ functor, tag);
+}
+template <class ReducerType, class Functor, class TagType, class... Properties>
+auto tune_range_policy(const size_t /**tuning_context*/,
+ const std::string& /**label_in*/,
+ const Kokkos::RangePolicy<Properties...>& policy,
+ const Functor& /**functor*/, const TagType& /**tag*/,
+ std::false_type) {
+ return policy;
+}
+
+// tune a RangePolicy, without reducer
+template <class Functor, class TagType, class... Properties>
+auto tune_policy(const size_t tuning_context, const std::string& label_in,
+ const Kokkos::RangePolicy<Properties...>& policy,
+ const Functor& functor, const TagType& tag) {
+ using policy_t = Kokkos::RangePolicy<Properties...>;
+ using has_desired_occupancy =
+ typename std::is_same<typename policy_t::occupancy_control,
+ Kokkos::Experimental::DesiredOccupancy>::type;
+ return tune_range_policy(tuning_context, label_in, policy, functor, tag,
+ has_desired_occupancy{});
+}
+
+// tune a RangePolicy, with reducer
+template <class ReducerType, class Functor, class TagType, class... Properties>
+auto tune_policy(const size_t tuning_context, const std::string& label_in,
+ const Kokkos::RangePolicy<Properties...>& policy,
+ const Functor& functor, const TagType& tag) {
+ using policy_t = Kokkos::RangePolicy<Properties...>;
+ using has_desired_occupancy =
+ typename std::is_same<typename policy_t::occupancy_control,
+ Kokkos::Experimental::DesiredOccupancy>::type;
+ return tune_range_policy<ReducerType>(tuning_context, label_in, policy,
+ functor, tag, has_desired_occupancy{});
+}
+
// tune a MDRangePolicy, without reducer
template <class Functor, class TagType, class... Properties>
-void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
- Kokkos::MDRangePolicy<Properties...>& policy,
+auto tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+ const Kokkos::MDRangePolicy<Properties...>& policy,
const Functor& functor, const TagType& tag) {
using Policy = Kokkos::MDRangePolicy<Properties...>;
static constexpr int rank = Policy::rank;
- generic_tune_policy<Experimental::MDRangeTuner<rank>>(
+ return generic_tune_policy<Experimental::MDRangeTuner<rank>>(
label_in, mdrange_tuners<rank>, policy, functor, tag,
[](const Policy& candidate_policy) {
return candidate_policy.impl_tune_tile_size();
// tune a MDRangePolicy, with reducer
template <class ReducerType, class Functor, class TagType, class... Properties>
-void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
- Kokkos::MDRangePolicy<Properties...>& policy,
+auto tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+ const Kokkos::MDRangePolicy<Properties...>& policy,
const Functor& functor, const TagType& tag) {
using Policy = Kokkos::MDRangePolicy<Properties...>;
static constexpr int rank = Policy::rank;
- generic_tune_policy<Experimental::MDRangeTuner<rank>, ReducerType>(
+ return generic_tune_policy<Experimental::MDRangeTuner<rank>, ReducerType>(
label_in, mdrange_tuners<rank>, policy, functor, tag,
[](const Policy& candidate_policy) {
return candidate_policy.impl_tune_tile_size();
template <class ReducerType>
struct ReductionSwitcher {
template <class Functor, class TagType, class ExecPolicy>
- static void tune(const size_t tuning_context, const std::string& label,
- ExecPolicy& policy, const Functor& functor,
+ static auto tune(const size_t tuning_context, const std::string& label,
+ const ExecPolicy& policy, const Functor& functor,
const TagType& tag) {
if (Kokkos::tune_internals()) {
- tune_policy<ReducerType>(tuning_context, label, policy, functor, tag);
+ return tune_policy<ReducerType>(tuning_context, label, policy, functor,
+ tag);
}
+ return Impl::default_tuned_version_of(policy);
}
};
template <>
struct ReductionSwitcher<Kokkos::InvalidType> {
template <class Functor, class TagType, class ExecPolicy>
- static void tune(const size_t tuning_context, const std::string& label,
- ExecPolicy& policy, const Functor& functor,
+ static auto tune(const size_t tuning_context, const std::string& label,
+ const ExecPolicy& policy, const Functor& functor,
const TagType& tag) {
if (Kokkos::tune_internals()) {
- tune_policy(tuning_context, label, policy, functor, tag);
+ return tune_policy(tuning_context, label, policy, functor, tag);
}
+ return Impl::default_tuned_version_of(policy);
}
};
template <class Tuner, class Functor, class TagType,
class TuningPermissionFunctor, class Map, class Policy>
void generic_report_results(const std::string& label_in, Map& map,
- Policy& policy, const Functor&, const TagType&,
+ const Policy& policy, const Functor&,
+ const TagType&,
const TuningPermissionFunctor& should_tune) {
if (should_tune(policy)) {
std::string label = label_in;
// report results for a policy type we don't tune (do nothing)
template <class ExecPolicy, class Functor, typename TagType>
-void report_policy_results(const size_t, const std::string&, ExecPolicy&,
+void report_policy_results(const size_t, const std::string&, const ExecPolicy&,
const Functor&, const TagType&) {}
// report results for a TeamPolicy
template <class Functor, class TagType, class... Properties>
void report_policy_results(const size_t /**tuning_context*/,
const std::string& label_in,
- Kokkos::TeamPolicy<Properties...>& policy,
+ const Kokkos::TeamPolicy<Properties...>& policy,
const Functor& functor, const TagType& tag) {
generic_report_results<Experimental::TeamSizeTuner>(
label_in, team_tuners, policy, functor, tag,
template <class Functor, class TagType, class... Properties>
void report_policy_results(const size_t /**tuning_context*/,
const std::string& label_in,
- Kokkos::MDRangePolicy<Properties...>& policy,
+ const Kokkos::MDRangePolicy<Properties...>& policy,
const Functor& functor, const TagType& tag) {
using Policy = Kokkos::MDRangePolicy<Properties...>;
static constexpr int rank = Policy::rank;
});
}
+// report results for an MDRangePolicy
+template <class Functor, class TagType, class... Properties>
+void report_policy_results(const size_t /**tuning_context*/,
+ const std::string& label_in,
+ const Kokkos::RangePolicy<Properties...>& policy,
+ const Functor& functor, const TagType& tag) {
+ using Policy = Kokkos::RangePolicy<Properties...>;
+ generic_report_results<Experimental::RangePolicyOccupancyTuner>(
+ label_in, range_policy_tuners, policy, functor, tag, [](const Policy&) {
+ return Kokkos::RangePolicy<
+ Properties...>::traits::experimental_contains_desired_occupancy;
+ });
+}
+
} // namespace Impl
} // namespace Experimental
namespace Impl {
template <class ExecPolicy, class FunctorType>
-void begin_parallel_for(ExecPolicy& policy, FunctorType& functor,
+auto begin_parallel_for(const ExecPolicy& policy, FunctorType& functor,
const std::string& label, uint64_t& kpID) {
+ using response_type =
+ Kokkos::Tools::Impl::ToolResponse<ExecPolicy, FunctorType>;
+ response_type response{policy};
if (Kokkos::Tools::profileLibraryLoaded()) {
Kokkos::Impl::ParallelConstructName<FunctorType,
typename ExecPolicy::work_tag>
&kpID);
}
#ifdef KOKKOS_ENABLE_TUNING
- size_t context_id = Kokkos::Tools::Experimental::get_new_context_id();
+ size_t context_id = Kokkos::Tools::Experimental::get_current_context_id();
if (Kokkos::tune_internals()) {
- Experimental::Impl::tune_policy(context_id, label, policy, functor,
- Kokkos::ParallelForTag{});
+ return response_type{Kokkos::Tools::Experimental::Impl::tune_policy(
+ context_id, label, policy, functor, Kokkos::ParallelForTag{})};
}
#else
(void)functor;
#endif
+ return response;
}
template <class ExecPolicy, class FunctorType>
-void end_parallel_for(ExecPolicy& policy, FunctorType& functor,
+void end_parallel_for(const ExecPolicy& policy, FunctorType& functor,
const std::string& label, uint64_t& kpID) {
if (Kokkos::Tools::profileLibraryLoaded()) {
Kokkos::Tools::endParallelFor(kpID);
}
template <class ExecPolicy, class FunctorType>
-void begin_parallel_scan(ExecPolicy& policy, FunctorType& functor,
+auto begin_parallel_scan(const ExecPolicy& policy, FunctorType& functor,
const std::string& label, uint64_t& kpID) {
+ using response_type =
+ Kokkos::Tools::Impl::ToolResponse<ExecPolicy, FunctorType>;
+ response_type response{policy};
if (Kokkos::Tools::profileLibraryLoaded()) {
Kokkos::Impl::ParallelConstructName<FunctorType,
typename ExecPolicy::work_tag>
&kpID);
}
#ifdef KOKKOS_ENABLE_TUNING
- size_t context_id = Kokkos::Tools::Experimental::get_new_context_id();
+ size_t context_id = Kokkos::Tools::Experimental::get_current_context_id();
if (Kokkos::tune_internals()) {
- Experimental::Impl::tune_policy(context_id, label, policy, functor,
- Kokkos::ParallelScanTag{});
+ return response_type{Kokkos::Tools::Experimental::Impl::tune_policy(
+ context_id, label, policy, functor, Kokkos::ParallelScanTag{})};
}
#else
(void)functor;
#endif
+ return response;
}
template <class ExecPolicy, class FunctorType>
-void end_parallel_scan(ExecPolicy& policy, FunctorType& functor,
+void end_parallel_scan(const ExecPolicy& policy, FunctorType& functor,
const std::string& label, uint64_t& kpID) {
if (Kokkos::Tools::profileLibraryLoaded()) {
Kokkos::Tools::endParallelScan(kpID);
}
template <class ReducerType, class ExecPolicy, class FunctorType>
-void begin_parallel_reduce(ExecPolicy& policy, FunctorType& functor,
+auto begin_parallel_reduce(const ExecPolicy& policy, FunctorType& functor,
const std::string& label, uint64_t& kpID) {
+ using response_type = ToolResponse<ExecPolicy, FunctorType>;
+ response_type response{policy};
if (Kokkos::Tools::profileLibraryLoaded()) {
Kokkos::Impl::ParallelConstructName<FunctorType,
typename ExecPolicy::work_tag>
&kpID);
}
#ifdef KOKKOS_ENABLE_TUNING
- size_t context_id = Kokkos::Tools::Experimental::get_new_context_id();
- Experimental::Impl::ReductionSwitcher<ReducerType>::tune(
- context_id, label, policy, functor, Kokkos::ParallelReduceTag{});
+ size_t context_id = Kokkos::Tools::Experimental::get_current_context_id();
+ return response_type{Experimental::Impl::ReductionSwitcher<ReducerType>::tune(
+ context_id, label, policy, functor, Kokkos::ParallelReduceTag{})};
#else
(void)functor;
#endif
+ return response;
}
template <class ReducerType, class ExecPolicy, class FunctorType>
-void end_parallel_reduce(ExecPolicy& policy, FunctorType& functor,
+void end_parallel_reduce(const ExecPolicy& policy, FunctorType& functor,
const std::string& label, uint64_t& kpID) {
if (Kokkos::Tools::profileLibraryLoaded()) {
Kokkos::Tools::endParallelReduce(kpID);
#endif
}
-} // end namespace Impl
+} // namespace Impl
} // namespace Tools
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOSTRAITS_HPP
#define KOKKOSTRAITS_HPP
template <typename T, typename S, typename... Pack>
struct has_type<T, S, Pack...> {
private:
- enum { self_value = std::is_same<T, S>::value };
+ enum { self_value = std::is_same_v<T, S> };
using next = has_type<T, Pack...>;
// Accept std::is_integral OR std::is_enum as an integral value
// since a simple enum value is automically convertible to an
// integral value.
- (std::is_integral<T>::value || std::is_enum<T>::value) &&
- are_integral<Args...>::value
+ (std::is_integral_v<T> || std::is_enum_v<T>)&&are_integral<Args...>::value
};
};
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_CORE_IMPL_UTILITIES_HPP
#define KOKKOS_CORE_IMPL_UTILITIES_HPP
namespace Kokkos {
namespace Impl {
-template <typename T>
-struct identity {
- using type = T;
+// same as std::integral_constant but with __host__ __device__ annotations on
+// the implicit conversion function and the call operator
+template <class T, T v>
+struct integral_constant {
+ using value_type = T;
+ using type = integral_constant<T, v>;
+ static constexpr T value = v;
+ KOKKOS_FUNCTION constexpr operator value_type() const noexcept {
+ return value;
+ }
+ KOKKOS_FUNCTION constexpr value_type operator()() const noexcept {
+ return value;
+ }
};
-template <typename T>
-using identity_t = typename identity<T>::type;
+//==============================================================================
template <typename... Is>
struct always_true : std::true_type {};
-#if defined(__cpp_lib_void_t)
-// since C++17
-using std::void_t;
-#else
-template <class...>
-using void_t = void;
-#endif
+// type-dependent expression that is always false intended for use in
+// static_assert to check "we should never get there"
+template <typename... Deps>
+struct always_false : std::false_type {};
//==============================================================================
-// <editor-fold desc="remove_cvref_t"> {{{1
+
+#if defined(__cpp_lib_type_identity)
+// since C++20
+using std::type_identity;
+using std::type_identity_t;
+#else
+template <typename T>
+struct type_identity {
+ using type = T;
+};
+
+template <typename T>
+using type_identity_t = typename type_identity<T>::type;
+#endif
#if defined(__cpp_lib_remove_cvref)
// since C++20
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
-// </editor-fold> end remove_cvref_t }}}1
-//==============================================================================
+// same as C++23 std::to_underlying but with __host__ __device__ annotations
+template <typename E>
+KOKKOS_FUNCTION constexpr std::underlying_type_t<E> to_underlying(
+ E e) noexcept {
+ return static_cast<std::underlying_type_t<E>>(e);
+}
+
+#if defined(__cpp_lib_is_scoped_enum)
+// since C++23
+using std::is_scoped_enum;
+using std::is_scoped_enum_v;
+#else
+template <typename E, bool = std::is_enum_v<E>>
+struct is_scoped_enum_impl : std::false_type {};
+
+template <typename E>
+struct is_scoped_enum_impl<E, true>
+ : std::bool_constant<!std::is_convertible_v<E, std::underlying_type_t<E>>> {
+};
+
+template <typename E>
+struct is_scoped_enum : is_scoped_enum_impl<E>::type {};
+
+template <typename E>
+inline constexpr bool is_scoped_enum_v = is_scoped_enum<E>::value;
+#endif
//==============================================================================
// <editor-fold desc="is_specialization_of"> {{{1
// </editor-fold> end is_specialization_of }}}1
//==============================================================================
-//==============================================================================
-// <editor-fold desc="Folding emulation"> {{{1
-
-// acts like void for comma fold emulation
-struct _fold_comma_emulation_return {};
-
-template <class... Ts>
-constexpr KOKKOS_INLINE_FUNCTION _fold_comma_emulation_return
-emulate_fold_comma_operator(Ts&&...) noexcept {
- return _fold_comma_emulation_return{};
-}
-
-#define KOKKOS_IMPL_FOLD_COMMA_OPERATOR(expr) \
- ::Kokkos::Impl::emulate_fold_comma_operator( \
- ::std::initializer_list<::Kokkos::Impl::_fold_comma_emulation_return>{ \
- ((expr), ::Kokkos::Impl::_fold_comma_emulation_return{})...})
-
-// </editor-fold> end Folding emulation }}}1
-//==============================================================================
-
-//==============================================================================
-// destruct_delete is a unique_ptr deleter for objects
-// created by placement new into already allocated memory
-// by only calling the destructor on the object.
-//
-// Because unique_ptr never calls its deleter with a nullptr value,
-// no need to check if p == nullptr.
-//
-// Note: This differs in interface from std::default_delete in that the
-// function call operator is templated instead of the class, to make
-// it easier to use and disallow specialization.
-struct destruct_delete {
- template <typename T>
- KOKKOS_INLINE_FUNCTION constexpr void operator()(T* p) const noexcept {
- p->~T();
- }
-};
-//==============================================================================
-
//==============================================================================
// <editor-fold desc="type_list"> {{{1
template <class Entry, class... OutTs>
struct _type_list_remove_first_impl<Entry, type_list<>, type_list<OutTs...>>
- : identity<type_list<OutTs...>> {};
+ : type_identity<type_list<OutTs...>> {};
template <class Entry, class List>
struct type_list_remove_first
template <template <class> class UnaryPred, class List>
struct type_list_any;
-#ifdef KOKKOS_ENABLE_CXX17
template <template <class> class UnaryPred, class... Ts>
struct type_list_any<UnaryPred, type_list<Ts...>>
: std::bool_constant<(UnaryPred<Ts>::value || ...)> {};
-#else
-template <template <class> class UnaryPred, class T, class... Ts>
-struct type_list_any<UnaryPred, type_list<T, Ts...>> {
- using type = typename std::conditional_t<
- UnaryPred<T>::value, std::true_type,
- type_list_any<UnaryPred, type_list<Ts...>>>::type;
- static constexpr auto value = type::value;
-};
-
-template <template <class> class UnaryPred>
-struct type_list_any<UnaryPred, type_list<>> : std::false_type {};
-
-#endif
// </editor-fold> end type_list_any }}}2
//------------------------------------------------------------------------------
// </editor-fold> end type_list }}}1
//==============================================================================
+//==============================================================================
+// The weird !sizeof(F*) to express false is to make the
+// expression dependent on the type of F, and thus only applicable
+// at instantiation and not first-pass semantic analysis of the
+// template definition.
+template <typename T>
+constexpr bool dependent_false_v = !sizeof(T*);
+//==============================================================================
+
} // namespace Impl
} // namespace Kokkos
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_VLAEMULATION_HPP
#define KOKKOS_IMPL_VLAEMULATION_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#include <Kokkos_Macros.hpp>
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_ZEROMEMSET_FWD_HPP
+#define KOKKOS_ZEROMEMSET_FWD_HPP
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename ExecutionSpace>
+struct ZeroMemset;
+
+} // namespace Impl
+} // namespace Kokkos
+
+#endif // #ifndef KOKKOS_ZEROMEMSET_FWD_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
#define KOKKOS_IMPL_PUBLIC_INCLUDE
inline void print_bitmap(std::ostream& s, const hwloc_const_bitmap_t bitmap) {
s << "{";
- for (int i = hwloc_bitmap_first(bitmap); - 1 != i;
+ for (int i = hwloc_bitmap_first(bitmap); -1 != i;
i = hwloc_bitmap_next(bitmap, i)) {
s << " " << i;
}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_CUDA_SETUP_HPP_
+#define KOKKOS_CUDA_SETUP_HPP_
+
+#if !defined(KOKKOS_ENABLE_CUDA)
+#error \
+ "KOKKOS_ENABLE_CUDA was not defined, but Kokkos_Setup_Cuda.hpp was included anyway."
+#endif
+
+#if defined(KOKKOS_ENABLE_CUDA) && !defined(__CUDACC__)
+#error \
+ "KOKKOS_ENABLE_CUDA defined but the compiler is not defining the __CUDACC__ macro as expected"
+// Some tooling environments will still function better if we do this here.
+#define __CUDACC__
+#endif /* defined(KOKKOS_ENABLE_CUDA) && !defined(__CUDACC__) */
+
+// Compiling with a CUDA compiler.
+//
+// Include <cuda.h> to pick up the CUDA_VERSION macro defined as:
+// CUDA_VERSION = ( MAJOR_VERSION * 1000 ) + ( MINOR_VERSION * 10 )
+//
+// When generating device code the __CUDA_ARCH__ macro is defined as:
+// __CUDA_ARCH__ = ( MAJOR_CAPABILITY * 100 ) + ( MINOR_CAPABILITY * 10 )
+
+#include <cuda_runtime.h>
+#include <cuda.h>
+
+#if defined(_WIN32)
+#define KOKKOS_IMPL_WINDOWS_CUDA
+#endif
+
+#if !defined(CUDA_VERSION)
+#error "#include <cuda.h> did not define CUDA_VERSION."
+#endif
+
+#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 300)
+// Compiling with CUDA compiler for device code.
+#error "Cuda device capability >= 3.0 is required."
+#endif
+
+#define KOKKOS_LAMBDA [=] __host__ __device__
+#define KOKKOS_CLASS_LAMBDA [ =, *this ] __host__ __device__
+
+#define KOKKOS_DEDUCTION_GUIDE __host__ __device__
+
+#define KOKKOS_IMPL_FORCEINLINE_FUNCTION __device__ __host__ __forceinline__
+#define KOKKOS_IMPL_FORCEINLINE __forceinline__
+#define KOKKOS_IMPL_INLINE_FUNCTION __device__ __host__ inline
+#define KOKKOS_IMPL_FUNCTION __device__ __host__
+#define KOKKOS_IMPL_HOST_FUNCTION __host__
+#define KOKKOS_IMPL_DEVICE_FUNCTION __device__
+
+// clang-format off
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION __device__ __host__
+#else
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION @"KOKKOS_RELOCATABLE_FUNCTION requires Kokkos_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE=ON"
+#endif
+// clang-format on
+
+#endif /* KOKKOS_CUDA_SETUP_HPP_ */
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SETUP_HIP_HPP_
+#define KOKKOS_SETUP_HIP_HPP_
+
+#if defined(KOKKOS_ENABLE_HIP)
+
+#define KOKKOS_IMPL_HIP_CLANG_WORKAROUND
+
+#include <hip/hip_runtime.h>
+#include <hip/hip_runtime_api.h>
+
+#define KOKKOS_LAMBDA [=] __host__ __device__
+#define KOKKOS_CLASS_LAMBDA [ =, *this ] __host__ __device__
+
+#define KOKKOS_DEDUCTION_GUIDE __host__ __device__
+
+#define KOKKOS_IMPL_FORCEINLINE_FUNCTION __device__ __host__ __forceinline__
+#define KOKKOS_IMPL_INLINE_FUNCTION __device__ __host__ inline
+#define KOKKOS_IMPL_FUNCTION __device__ __host__
+#define KOKKOS_IMPL_HOST_FUNCTION __host__
+#define KOKKOS_IMPL_DEVICE_FUNCTION __device__
+
+// clang-format off
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION __device__ __host__
+#else
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION @"KOKKOS_RELOCATABLE_FUNCTION requires Kokkos_ENABLE_HIP_RELOCATABLE_DEVICE_CODE=ON"
+#endif
+// clang-format on
+
+// The implementation of hipGraph in ROCm 5.2 is bugged, so we cannot use it.
+#if !((HIP_VERSION_MAJOR == 5) && (HIP_VERSION_MINOR == 2))
+#define KOKKOS_IMPL_HIP_NATIVE_GRAPH
+#endif
+
+#ifdef KOKKOS_ARCH_AMD_GFX942_APU
+#define KOKKOS_IMPL_HIP_UNIFIED_MEMORY
+#endif
+
+#endif // #if defined( KOKKOS_ENABLE_HIP )
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SETUP_SYCL_HPP_
+#define KOKKOS_SETUP_SYCL_HPP_
+
+// FIXME_SYCL Using in-order queues currently gives better performance on Intel
+// GPUs and we run into correctness issues with out-of-order queues on NVIDIA
+// GPUs.
+#define KOKKOS_IMPL_SYCL_USE_IN_ORDER_QUEUES
+
+// FIXME_SYCL the fallback assert is temporarily disabled by default in the
+// compiler so we need to force it
+#ifndef SYCL_ENABLE_FALLBACK_ASSERT
+#define SYCL_ENABLE_FALLBACK_ASSERT
+#endif
+#ifndef SYCL_FALLBACK_ASSERT
+#define SYCL_FALLBACK_ASSERT 1
+#endif
+
+// FIXME_SYCL
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
+#include <CL/sycl.hpp>
+#endif
+
+#if defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20230200
+#define KOKKOS_IMPL_SYCL_GET_MULTI_PTR(accessor) \
+ accessor.get_multi_ptr<sycl::access::decorated::yes>()
+#else
+#define KOKKOS_IMPL_SYCL_GET_MULTI_PTR(accessor) accessor.get_pointer()
+#endif
+
+// FIXME_SYCL Use type directly once it has stabilized in SYCL.
+namespace Kokkos::Impl {
+#ifndef SYCL_EXT_INTEL_USM_ADDRESS_SPACES
+#error SYCL_EXT_INTEL_USM_ADDRESS_SPACES undefined!
+#elif SYCL_EXT_INTEL_USM_ADDRESS_SPACES >= 2
+template <typename T>
+using sycl_device_ptr = sycl::ext::intel::device_ptr<T>;
+template <typename T>
+using sycl_host_ptr = sycl::ext::intel::host_ptr<T>;
+#else
+template <typename T>
+using sycl_device_ptr = sycl::device_ptr<T>;
+template <typename T>
+using sycl_host_ptr = sycl::host_ptr<T>;
+#endif
+} // namespace Kokkos::Impl
+
+// clang-format off
+#ifdef KOKKOS_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION SYCL_EXTERNAL
+#else
+#define KOKKOS_IMPL_RELOCATABLE_FUNCTION @"KOKKOS_RELOCATABLE_FUNCTION requires Kokkos_ENABLE_SYCL_RELOCATABLE_DEVICE_CODE=ON"
+#endif
+// clang-format on
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_EXECUTIONSPACETRAIT_HPP
#define KOKKOS_KOKKOS_EXECUTIONSPACETRAIT_HPP
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
+#define KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp> // IsGraphKernelTag
+#include <traits/Kokkos_Traits_fwd.hpp>
+#include <impl/Kokkos_Utilities.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+struct GraphKernelTrait : TraitSpecificationBase<GraphKernelTrait> {
+ struct base_traits {
+ using is_graph_kernel = std::false_type;
+ KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+ };
+ template <class, class AnalyzeNextTrait>
+ struct mixin_matching_trait : AnalyzeNextTrait {
+ using base_t = AnalyzeNextTrait;
+ using base_t::base_t;
+ using is_graph_kernel = std::true_type;
+ };
+ template <class T>
+ using trait_matches_specification = std::is_same<T, IsGraphKernelTag>;
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+} // end namespace Impl
+} // end namespace Kokkos
+
+#endif // KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_INDEXTYPETRAIT_HPP
#define KOKKOS_KOKKOS_INDEXTYPETRAIT_HPP
"Kokkos Error: More than one index type given. Search "
"compiler output for 'show_extra_index_type' to see the "
"type of the errant tag.");
- static_assert(std::is_integral<IntegralIndexType>::value, "");
+ static_assert(std::is_integral_v<IntegralIndexType>);
static constexpr bool index_type_is_defaulted = false;
using index_type = Kokkos::IndexType<IntegralIndexType>;
};
template <class IntegralIndexType>
struct PolicyTraitMatcher<
IndexTypeTrait, IntegralIndexType,
- std::enable_if_t<std::is_integral<IntegralIndexType>::value>>
- : std::true_type {};
+ std::enable_if_t<std::is_integral_v<IntegralIndexType>>> : std::true_type {
+};
// </editor-fold> end PolicyTraitMatcher specialization"> }}}1
//==============================================================================
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_ITERATIONPATTERNTRAIT_HPP
#define KOKKOS_KOKKOS_ITERATIONPATTERNTRAIT_HPP
show_extra_iteration_pattern_erroneously_given_to_execution_policy<
typename base_t::iteration_pattern>{};
static_assert(
- std::is_void<typename base_t::iteration_pattern>::value,
+ std::is_void_v<typename base_t::iteration_pattern>,
"Kokkos Error: More than one index type given. Search "
"compiler output for 'show_extra_iteration_pattern' to see the "
"type of the errant tag.");
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_LAUNCHBOUNDSTRAIT_HPP
#define KOKKOS_KOKKOS_LAUNCHBOUNDSTRAIT_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_OCCUPANCYCONTROLTRAIT_HPP
#define KOKKOS_KOKKOS_OCCUPANCYCONTROLTRAIT_HPP
struct DesiredOccupancy {
int m_occ = 100;
- explicit constexpr DesiredOccupancy(int occ) : m_occ(occ) {
+ bool tune;
+ explicit constexpr DesiredOccupancy(int occ) : m_occ(occ), tune(false) {
KOKKOS_EXPECTS(0 <= occ && occ <= 100);
}
+ explicit constexpr DesiredOccupancy(const Kokkos::AUTO_t) : tune(true) {}
explicit constexpr operator int() const { return m_occ; }
constexpr int value() const { return m_occ; }
+ constexpr bool should_tune() const { return tune; }
DesiredOccupancy() = default;
explicit DesiredOccupancy(MaximizeOccupancy const&) : DesiredOccupancy() {}
};
using mixin_matching_trait =
OccupancyControlPolicyMixin<OccControl, AnalyzeNextTrait>;
template <class T>
- using trait_matches_specification = std::integral_constant<
- bool,
- std::is_same<T, Kokkos::Experimental::DesiredOccupancy>::value ||
- std::is_same<T, Kokkos::Experimental::MaximizeOccupancy>::value>;
+ using trait_matches_specification = std::bool_constant<
+ std::is_same_v<T, Kokkos::Experimental::DesiredOccupancy> ||
+ std::is_same_v<T, Kokkos::Experimental::MaximizeOccupancy>>;
};
// </editor-fold> end Occupancy control trait specification }}}1
template <typename Policy>
constexpr auto prefer(Policy const& p, MaximizeOccupancy) {
- static_assert(Kokkos::is_execution_policy<Policy>::value, "");
+ static_assert(Kokkos::is_execution_policy<Policy>::value);
using new_policy_t =
Kokkos::Impl::OccupancyControlTrait::policy_with_trait<Policy,
MaximizeOccupancy>;
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#include <impl/Kokkos_Utilities.hpp> // type_list
TraitSpec, PolicyTemplate, type_list<ProcessedTraits...>,
type_list<MatchingTrait, ToProcessTraits...>, NewTrait,
std::enable_if_t<PolicyTraitMatcher<TraitSpec, MatchingTrait>::value>> {
- static_assert(PolicyTraitMatcher<TraitSpec, NewTrait>::value, "");
+ static_assert(PolicyTraitMatcher<TraitSpec, NewTrait>::value);
using type = PolicyTemplate<ProcessedTraits..., NewTrait, ToProcessTraits...>;
};
struct PolicyTraitAdaptorImpl<TraitSpec, PolicyTemplate,
type_list<ProcessedTraits...>, type_list<>,
NewTrait> {
- static_assert(PolicyTraitMatcher<TraitSpec, NewTrait>::value, "");
+ static_assert(PolicyTraitMatcher<TraitSpec, NewTrait>::value);
using type = PolicyTemplate<ProcessedTraits..., NewTrait>;
};
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#include <impl/Kokkos_Utilities.hpp> // type_list
+
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+#ifndef KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
+#define KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="PolicyTraitMatcher"> {{{1
+
+// To handle the WorkTag case, we need more than just a predicate; we need
+// something that we can default to in the unspecialized case, just like we
+// do for AnalyzeExecPolicy
+template <class TraitSpec, class Trait, class Enable = void>
+struct PolicyTraitMatcher : std::false_type {};
+
+template <class TraitSpec, class Trait>
+struct PolicyTraitMatcher<
+ TraitSpec, Trait,
+ std::enable_if_t<
+ TraitSpec::template trait_matches_specification<Trait>::value>>
+ : std::true_type {};
+
+// </editor-fold> end PolicyTraitMatcher }}}1
+//==============================================================================
+
+} // end namespace Impl
+} // end namespace Kokkos
+
+#endif // KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_SCHEDULETRAIT_HPP
#define KOKKOS_KOKKOS_SCHEDULETRAIT_HPP
template <class Policy, class ScheduleType>
constexpr auto require(Policy const& p, Kokkos::Schedule<ScheduleType>) {
- static_assert(Kokkos::is_execution_policy<Policy>::value, "");
+ static_assert(Kokkos::is_execution_policy<Policy>::value);
using new_policy_t = Kokkos::Impl::ScheduleTrait::policy_with_trait<
Policy, Kokkos::Schedule<ScheduleType>>;
return new_policy_t{p};
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_TRAITS_FWD_HPP
#define KOKKOS_KOKKOS_TRAITS_FWD_HPP
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_WORKITEMPROPERTYTRAIT_HPP
#define KOKKOS_KOKKOS_WORKITEMPROPERTYTRAIT_HPP
template <class Policy, unsigned long Property>
constexpr auto require(const Policy p,
WorkItemProperty::ImplWorkItemProperty<Property>) {
- static_assert(Kokkos::is_execution_policy<Policy>::value, "");
+ static_assert(Kokkos::is_execution_policy<Policy>::value);
using new_policy_t = Kokkos::Impl::WorkItemPropertyTrait::policy_with_trait<
Policy, WorkItemProperty::ImplWorkItemProperty<Property>>;
return new_policy_t{p};
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_KOKKOS_WORKTAGTRAIT_HPP
#define KOKKOS_KOKKOS_WORKTAGTRAIT_HPP
show_extra_work_tag_erroneously_given_to_execution_policy<
typename base_t::work_tag>{};
static_assert(
- std::is_void<typename base_t::work_tag>::value,
+ std::is_void_v<typename base_t::work_tag>,
"Kokkos Error: More than one work tag given. Search compiler output "
"for 'show_extra_work_tag' to see the type of the errant tag.");
};
// old code that just did a big long series of nested std::conditionals, but
// we should benchmark this assumption if it becomes a problem.
template <class T>
- using trait_matches_specification = std::integral_constant<
- bool,
-#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
- std::is_empty<T>::value &&
-#else
- !std::is_void<T>::value &&
-#endif
- !type_list_any<_trait_matches_spec_predicate<T>::template apply,
- _exec_policy_traits_without_work_tag>::value>;
+ using trait_matches_specification = std::bool_constant<
+ std::is_empty_v<T> &&
+ !type_list_any<_trait_matches_spec_predicate<T>::template apply,
+ _exec_policy_traits_without_work_tag>::value>;
};
// </editor-fold> end trait specification }}}1
--- /dev/null
+tag: 2.01.00 date: 07:21:2016 master: xxxxxxxx develop: fa6dfcc4
+tag: 2.01.06 date: 09:02:2016 master: 9afaa87f develop: 555f1a3a
+tag: 2.01.10 date: 09:27:2016 master: e4119325 develop: e6cda11e
+tag: 2.02.00 date: 10:30:2016 master: 6c90a581 develop: ca3dd56e
+tag: 2.02.01 date: 11:01:2016 master: 9c698c86 develop: b0072304
+tag: 2.02.07 date: 12:16:2016 master: 4b4cc4ba develop: 382c0966
+tag: 2.02.15 date: 02:10:2017 master: 8c64cd93 develop: 28dea8b6
+tag: 2.03.00 date: 04:25:2017 master: 120d9ce7 develop: 015ba641
+tag: 2.03.05 date: 05:27:2017 master: 36b92f43 develop: 79073186
+tag: 2.03.13 date: 07:27:2017 master: da314444 develop: 29ccb58a
+tag: 2.04.00 date: 08:16:2017 master: 54eb75c0 develop: 32fb8ee1
+tag: 2.04.04 date: 09:11:2017 master: 2b7e9c20 develop: 51e7b25a
+tag: 2.04.11 date: 10:28:2017 master: 54a1330a develop: ed36c017
+tag: 2.5.00 date: 12:15:2017 master: dfe685f4 develop: ec7ad6d8
+tag: 2.6.00 date: 03:07:2018 master: 62e760fa develop: d1ba7d71
+tag: 2.7.00 date: 05:24:2018 master: e01945d0 develop: 2d13f608
+tag: 2.7.24 date: 11:04:2018 master: d3a94192 develop: 7a06fc81
+tag: 2.8.00 date: 02:05:2019 master: 34931a36 develop: d1659d1d
+tag: 2.9.00 date: 06:24:2019 master: 5d6e7fb3 develop: 4c6cb80a
+tag: 3.0.00 date: 01:31:2020 master: 2983b80d release-candidate-3.0: fdc904a6
+tag: 3.1.00 date: 04:14:2020 master: cd1b1d0a develop: fd90af43
+tag: 3.1.01 date: 05:04:2020 master: 785d19f2 release: 2be028bc
+tag: 3.2.00 date: 08:19:2020 master: 3b2fdc7e release: 5dc6d303
+tag: 3.3.00 date: 12:16:2020 master: 734f577a release: 1535ba5c
+tag: 3.3.01 date: 01:06:2021 master: 6d65b5a3 release: 4d23839c
+tag: 3.4.00 date: 04:26:2021 master: 1fb0c284 release: 5d7738d6
+tag: 3.4.01 date: 05:20:2021 master: 4b97a22f release: 410b15c8
+tag: 3.5.00 date: 11:19:2021 master: c28a8b03 release: 21b879e4
+tag: 3.6.00 date: 04:14:2022 master: 2834f94a release: 6ea708ff
+tag: 3.6.01 date: 06:16:2022 master: b52f8c83 release: afe9b404
+tag: 3.7.00 date: 08:25:2022 master: d19aab99 release: 0018e5fb
+tag: 3.7.01 date: 12:01:2022 master: 61d7db55 release: d3bb8cfe
+tag: 4.0.00 date: 02:23:2023 master: 5ad60966 release: 52ea2953
+tag: 4.0.01 date: 04:26:2023 master: aa1f48f3 release: 5893754f
+tag: 4.1.00 date: 06:20:2023 master: 62d2b6c8 release: adde1e6a
+tag: 4.2.00 date: 11:09:2023 master: 1a3ea28f release: abe01c88
+tag: 4.2.01 date: 01:30:2024 master: 71a9bcae release: 221e5f7a
+tag: 4.3.00 date: 04:03:2024 master: e0dc0128 release: f08217a4
+tag: 4.3.01 date: 05:07:2024 master: 486cc745 release: 262d2d6e
+tag: 4.4.00 date: 08:08:2024 master: 6ecdf605 release: 6068673c
+tag: 4.4.01 date: 09:12:2024 master: 08ceff92 release: 2d60c039
+tag: 4.5.00 date: 11:11:2024 master: 15dc143e release: 5164f2f6
+tag: 4.5.01 date: 12:19:2024 master: 09e775bf release: e0d656f9
--- /dev/null
+if(NOT Kokkos_INSTALL_TESTING)
+ add_subdirectory(src)
+endif()
+
+kokkos_add_test_directories(unit_tests)
--- /dev/null
+#I have to leave these here for tribits
+kokkos_include_directories(${CMAKE_CURRENT_BINARY_DIR})
+kokkos_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+#-----------------------------------------------------------------------------
+
+file(GLOB SIMD_HEADERS *.hpp)
+file(GLOB SIMD_SOURCES *.cpp)
+
+install(
+ DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/"
+ DESTINATION ${KOKKOS_HEADER_DIR}
+ FILES_MATCHING
+ PATTERN "*.hpp"
+)
+
+#-----------------------------------------------------------------------------
+
+# We have to pass the sources in here for Tribits
+# These will get ignored for standalone CMake and a true interface library made
+kokkos_add_library(kokkossimd SOURCES ${SIMD_SOURCES} HEADERS ${SIMD_HEADERS})
+kokkos_lib_include_directories(
+ kokkossimd ${KOKKOS_TOP_BUILD_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
+)
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SIMD_HPP
+#define KOKKOS_SIMD_HPP
+
+#include <Kokkos_SIMD_Common.hpp>
+
+// suppress NVCC warnings with the [[nodiscard]] attribute on overloaded
+// operators implemented as hidden friends
+#if defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC < 1130
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wattributes"
+#endif
+
+#include <Kokkos_SIMD_Scalar.hpp>
+
+#include <Kokkos_Macros.hpp>
+
+// FIXME_OPENMPTARGET The device pass disables all compiler macros checked
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+#if defined(KOKKOS_ARCH_AVX2)
+#include <Kokkos_SIMD_AVX2.hpp>
+#endif
+
+#if defined(KOKKOS_ARCH_AVX512XEON)
+#include <Kokkos_SIMD_AVX512.hpp>
+#endif
+
+#if defined(KOKKOS_ARCH_ARM_NEON)
+#include <Kokkos_SIMD_NEON.hpp>
+#endif
+#else // KOKKOS_ENABLE_OPENMPTARGET
+#if defined(KOKKOS_ARCH_AVX) && !defined(__AVX__)
+#error "__AVX__ must be defined for KOKKOS_ARCH_AVX"
+#endif
+
+#if defined(KOKKOS_ARCH_AVX2)
+#if !defined(__AVX2__)
+#error "__AVX2__ must be defined for KOKKOS_ARCH_AVX2"
+#endif
+#include <Kokkos_SIMD_AVX2.hpp>
+#endif
+
+#if defined(KOKKOS_ARCH_AVX512XEON)
+#if !defined(__AVX512F__)
+#error "__AVX512F__ must be defined for KOKKOS_ARCH_AVX512XEON"
+#endif
+#include <Kokkos_SIMD_AVX512.hpp>
+#endif
+
+#if defined(KOKKOS_ARCH_ARM_NEON)
+#if !defined(__ARM_NEON)
+#error "__ARM_NEON must be definded for KOKKOS_ARCH_ARM_NEON"
+#endif
+#include <Kokkos_SIMD_NEON.hpp>
+#endif
+#endif
+
+#if defined(KOKKOS_COMPILER_NVCC) && KOKKOS_COMPILER_NVCC < 1130
+#pragma GCC diagnostic pop
+#endif
+
+#include <Kokkos_SIMD_Common_Math.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace simd_abi {
+
+namespace Impl {
+
+#if defined(KOKKOS_ARCH_AVX512XEON)
+using host_native = avx512_fixed_size<8>;
+#elif defined(KOKKOS_ARCH_AVX2)
+using host_native = avx2_fixed_size<4>;
+#elif defined(KOKKOS_ARCH_ARM_NEON)
+using host_native = neon_fixed_size<2>;
+#else
+using host_native = scalar;
+#endif
+
+template <class T>
+struct ForSpace;
+
+#ifdef KOKKOS_ENABLE_SERIAL
+template <>
+struct ForSpace<Kokkos::Serial> {
+ using type = host_native;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA
+template <>
+struct ForSpace<Kokkos::Cuda> {
+ using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_THREADS
+template <>
+struct ForSpace<Kokkos::Threads> {
+ using type = host_native;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_HPX
+template <>
+struct ForSpace<Kokkos::Experimental::HPX> {
+ using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMP
+template <>
+struct ForSpace<Kokkos::OpenMP> {
+ using type = host_native;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+template <>
+struct ForSpace<Kokkos::Experimental::OpenMPTarget> {
+ using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENACC
+template <>
+struct ForSpace<Kokkos::Experimental::OpenACC> {
+ using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_HIP
+template <>
+struct ForSpace<Kokkos::HIP> {
+ using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_SYCL
+template <>
+struct ForSpace<Kokkos::SYCL> {
+ using type = scalar;
+};
+#endif
+
+} // namespace Impl
+
+template <class Space>
+using ForSpace = typename Impl::ForSpace<typename Space::execution_space>::type;
+
+template <class T>
+using native = ForSpace<Kokkos::DefaultExecutionSpace>;
+
+} // namespace simd_abi
+
+template <class T>
+using native_simd = simd<T, simd_abi::native<T>>;
+template <class T>
+using native_simd_mask = simd_mask<T, simd_abi::native<T>>;
+
+namespace Impl {
+
+template <class... Abis>
+class abi_set {};
+
+template <typename... Ts>
+class data_types {};
+
+#if defined(KOKKOS_ARCH_AVX512XEON)
+using host_abi_set = abi_set<simd_abi::scalar, simd_abi::avx512_fixed_size<8>,
+ simd_abi::avx512_fixed_size<16>>;
+using data_type_set = data_types<std::int32_t, std::uint32_t, std::int64_t,
+ std::uint64_t, double, float>;
+#elif defined(KOKKOS_ARCH_AVX2)
+using host_abi_set = abi_set<simd_abi::scalar, simd_abi::avx2_fixed_size<4>,
+ simd_abi::avx2_fixed_size<8>>;
+using data_type_set =
+ data_types<std::int32_t, std::int64_t, std::uint64_t, double, float>;
+#elif defined(KOKKOS_ARCH_ARM_NEON)
+using host_abi_set = abi_set<simd_abi::scalar, simd_abi::neon_fixed_size<2>,
+ simd_abi::neon_fixed_size<4>>;
+using data_type_set =
+ data_types<std::int32_t, std::int64_t, std::uint64_t, double, float>;
+#else
+using host_abi_set = abi_set<simd_abi::scalar>;
+using data_type_set = data_types<std::int32_t, std::uint32_t, std::int64_t,
+ std::uint64_t, double, float>;
+#endif
+
+using device_abi_set = abi_set<simd_abi::scalar>;
+
+} // namespace Impl
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SIMD_AVX2_HPP
+#define KOKKOS_SIMD_AVX2_HPP
+
+#include <functional>
+#include <type_traits>
+
+#include <Kokkos_SIMD_Common.hpp>
+#include <Kokkos_BitManipulation.hpp> // bit_cast
+
+#include <immintrin.h>
+
+#ifdef KOKKOS_SIMD_COMMON_MATH_HPP
+#error \
+ "Kokkos_SIMD_AVX2.hpp must be included before Kokkos_SIMD_Common_Math.hpp!"
+#endif
+
+// FIXME_HIP ROCm 5.6, 5.7, and 6.0 can't compile with the intrinsic used here.
+#if defined(__HIPCC__) && \
+ (((HIP_VERSION_MAJOR == 5) && \
+ ((HIP_VERSION_MINOR == 6) || (HIP_VERSION_MINOR == 7))) || \
+ ((HIP_VERSION_MAJOR == 6) && ((HIP_VERSION_MINOR == 0))))
+#define KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+#endif
+
+namespace Kokkos {
+
+namespace Experimental {
+
+namespace simd_abi {
+
+template <int N>
+class avx2_fixed_size {};
+
+} // namespace simd_abi
+
+template <>
+class simd_mask<double, simd_abi::avx2_fixed_size<4>> {
+ __m256d m_value;
+
+ public:
+ class reference {
+ __m256d& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __m256d bit_mask() const {
+ return _mm256_castsi256_pd(_mm256_setr_epi64x(
+ -std::int64_t(m_lane == 0), -std::int64_t(m_lane == 1),
+ -std::int64_t(m_lane == 2), -std::int64_t(m_lane == 3)));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__m256d& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask = _mm256_or_pd(bit_mask(), m_mask);
+ } else {
+ m_mask = _mm256_andnot_pd(bit_mask(), m_mask);
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (_mm256_movemask_pd(m_mask) & (1 << m_lane)) != 0;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(_mm256_castsi256_pd(_mm256_set1_epi64x(-std::int64_t(value)))) {
+ }
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : m_value(_mm256_castsi256_pd(_mm256_setr_epi64x(
+ -std::int64_t(gen(std::integral_constant<std::size_t, 0>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 1>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 2>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 3>()))))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>> const& i32_mask);
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __m256d const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256d()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<__m256d&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_mm256_or_pd(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_mm256_and_pd(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ auto const true_value = static_cast<__m256d>(simd_mask(true));
+ return simd_mask(_mm256_andnot_pd(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return _mm256_movemask_pd(m_value) == _mm256_movemask_pd(other.m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <>
+class simd_mask<float, simd_abi::avx2_fixed_size<4>> {
+ __m128 m_value;
+
+ public:
+ class reference {
+ __m128& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __m128 bit_mask() const {
+ return _mm_castsi128_ps(_mm_setr_epi32(
+ -std::int32_t(m_lane == 0), -std::int32_t(m_lane == 1),
+ -std::int32_t(m_lane == 2), -std::int32_t(m_lane == 3)));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__m128& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask = _mm_or_ps(bit_mask(), m_mask);
+ } else {
+ m_mask = _mm_andnot_ps(bit_mask(), m_mask);
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (_mm_movemask_ps(m_mask) & (1 << m_lane)) != 0;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(_mm_castsi128_ps(_mm_set1_epi32(-std::int32_t(value)))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : m_value(_mm_castsi128_ps(_mm_setr_epi32(
+ -std::int32_t(gen(std::integral_constant<std::size_t, 0>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 1>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 2>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 3>()))))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __m128 const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m128()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<__m128&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_mm_or_ps(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_mm_and_ps(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ auto const true_value = static_cast<__m128>(simd_mask(true));
+ return simd_mask(_mm_andnot_ps(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return _mm_movemask_ps(m_value) == _mm_movemask_ps(other.m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <>
+class simd_mask<float, simd_abi::avx2_fixed_size<8>> {
+ __m256 m_value;
+
+ public:
+ class reference {
+ __m256& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __m256 bit_mask() const {
+ // FIXME_HIP ROCm 5.6, 5.7, and 6.0 can't compile with the intrinsic used
+ // here.
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ return _mm256_cvtepi32_ps(_mm256_setr_epi32(
+#else
+ return _mm256_castsi256_ps(_mm256_setr_epi32(
+#endif
+ -std::int32_t(m_lane == 0), -std::int32_t(m_lane == 1),
+ -std::int32_t(m_lane == 2), -std::int32_t(m_lane == 3),
+ -std::int32_t(m_lane == 4), -std::int32_t(m_lane == 5),
+ -std::int32_t(m_lane == 6), -std::int32_t(m_lane == 7)));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__m256& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask = _mm256_or_ps(bit_mask(), m_mask);
+ } else {
+ m_mask = _mm256_andnot_ps(bit_mask(), m_mask);
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (_mm256_movemask_ps(m_mask) & (1 << m_lane)) != 0;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::avx2_fixed_size<8>;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(_mm256_castsi256_ps(_mm256_set1_epi32(-std::int32_t(value)))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : m_value(_mm256_castsi256_ps(_mm256_setr_epi32(
+ -std::int32_t(gen(std::integral_constant<std::size_t, 0>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 1>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 2>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 3>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 4>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 5>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 6>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 7>()))))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __m256 const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<__m256&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_mm256_or_ps(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_mm256_and_ps(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ auto const true_value = static_cast<__m256>(simd_mask(true));
+ return simd_mask(_mm256_andnot_ps(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return _mm256_movemask_ps(m_value) == _mm256_movemask_ps(other.m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <>
+class simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>> {
+ __m128i m_value;
+
+ public:
+ class reference {
+ __m128i& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __m128i bit_mask() const {
+ return _mm_setr_epi32(
+ -std::int32_t(m_lane == 0), -std::int32_t(m_lane == 1),
+ -std::int32_t(m_lane == 2), -std::int32_t(m_lane == 3));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__m128i& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask = _mm_or_si128(bit_mask(), m_mask);
+ } else {
+ m_mask = _mm_andnot_si128(bit_mask(), m_mask);
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (_mm_movemask_ps(_mm_castsi128_ps(m_mask)) & (1 << m_lane)) != 0;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(_mm_set1_epi32(-std::int32_t(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __m128i const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : m_value(_mm_setr_epi32(
+ -std::int32_t(gen(std::integral_constant<std::size_t, 0>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 1>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 2>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 3>())))) {}
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<U, abi_type> const& other) {
+ for (std::size_t i = 0; i < size(); ++i) (*this)[i] = other[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m128i()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<__m128i&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_mm_or_si128(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_mm_and_si128(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ auto const true_value = static_cast<__m128i>(simd_mask(true));
+ return simd_mask(_mm_andnot_si128(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return _mm_movemask_ps(_mm_castsi128_ps(m_value)) ==
+ _mm_movemask_ps(_mm_castsi128_ps(other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <>
+class simd_mask<std::int32_t, simd_abi::avx2_fixed_size<8>> {
+ __m256i m_value;
+
+ public:
+ class reference {
+ __m256i& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __m256i bit_mask() const {
+ return _mm256_setr_epi32(
+ -std::int32_t(m_lane == 0), -std::int32_t(m_lane == 1),
+ -std::int32_t(m_lane == 2), -std::int32_t(m_lane == 3),
+ -std::int32_t(m_lane == 4), -std::int32_t(m_lane == 5),
+ -std::int32_t(m_lane == 6), -std::int32_t(m_lane == 7));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__m256i& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask = _mm256_or_si256(bit_mask(), m_mask);
+ } else {
+ m_mask = _mm256_andnot_si256(bit_mask(), m_mask);
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (_mm256_movemask_ps(_mm256_castsi256_ps(m_mask)) &
+ (1 << m_lane)) != 0;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::avx2_fixed_size<8>;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(_mm256_set1_epi32(-std::int32_t(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __m256i const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : m_value(_mm256_setr_epi32(
+ -std::int32_t(gen(std::integral_constant<std::size_t, 0>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 1>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 2>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 3>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 4>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 5>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 6>())),
+ -std::int32_t(gen(std::integral_constant<std::size_t, 7>())))) {}
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<U, abi_type> const& other) {
+ for (std::size_t i = 0; i < size(); ++i) (*this)[i] = other[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<__m256i&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_mm256_or_si256(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_mm256_and_si256(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ auto const true_value = static_cast<__m256i>(simd_mask(true));
+ return simd_mask(_mm256_andnot_si256(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return _mm256_movemask_ps(_mm256_castsi256_ps(m_value)) ==
+ _mm256_movemask_ps(_mm256_castsi256_ps(other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <>
+class simd_mask<std::int64_t, simd_abi::avx2_fixed_size<4>> {
+ __m256i m_value;
+
+ public:
+ class reference {
+ __m256i& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __m256i bit_mask() const {
+ return _mm256_setr_epi64x(
+ -std::int64_t(m_lane == 0), -std::int64_t(m_lane == 1),
+ -std::int64_t(m_lane == 2), -std::int64_t(m_lane == 3));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__m256i& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask = _mm256_or_si256(bit_mask(), m_mask);
+ } else {
+ m_mask = _mm256_andnot_si256(bit_mask(), m_mask);
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (_mm256_movemask_pd(_mm256_castsi256_pd(m_mask)) &
+ (1 << m_lane)) != 0;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(_mm256_set1_epi64x(-std::int64_t(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __m256i const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : m_value(_mm256_setr_epi64x(
+ -std::int64_t(gen(std::integral_constant<std::size_t, 0>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 1>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 2>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 3>())))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<std::int32_t, abi_type> const& other)
+ : m_value(_mm256_cvtepi32_epi64(static_cast<__m128i>(other))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<__m256i&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_mm256_or_si256(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_mm256_and_si256(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ auto const true_value = static_cast<__m256i>(simd_mask(true));
+ return simd_mask(_mm256_andnot_si256(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return _mm256_movemask_pd(_mm256_castsi256_pd(m_value)) ==
+ _mm256_movemask_pd(_mm256_castsi256_pd(other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <>
+class simd_mask<std::uint64_t, simd_abi::avx2_fixed_size<4>> {
+ __m256i m_value;
+
+ public:
+ class reference {
+ __m256i& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __m256i bit_mask() const {
+ return _mm256_setr_epi64x(
+ -std::int64_t(m_lane == 0), -std::int64_t(m_lane == 1),
+ -std::int64_t(m_lane == 2), -std::int64_t(m_lane == 3));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__m256i& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask = _mm256_or_si256(bit_mask(), m_mask);
+ } else {
+ m_mask = _mm256_andnot_si256(bit_mask(), m_mask);
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (_mm256_movemask_pd(_mm256_castsi256_pd(m_mask)) &
+ (1 << m_lane)) != 0;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(_mm256_set1_epi64x(-std::int64_t(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<std::int32_t, abi_type> const& other)
+ : m_value(_mm256_cvtepi32_epi64(static_cast<__m128i>(other))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __m256i const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : m_value(_mm256_setr_epi64x(
+ -std::int64_t(gen(std::integral_constant<std::size_t, 0>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 1>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 2>())),
+ -std::int64_t(gen(std::integral_constant<std::size_t, 3>())))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<__m256i&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_mm256_or_si256(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_mm256_and_si256(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ auto const true_value = static_cast<__m256i>(simd_mask(true));
+ return simd_mask(_mm256_andnot_si256(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return _mm256_movemask_pd(_mm256_castsi256_pd(m_value)) ==
+ _mm256_movemask_pd(_mm256_castsi256_pd(other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd_mask<double, simd_abi::avx2_fixed_size<4>>::simd_mask(
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>> const& i32_mask)
+ : m_value(_mm256_castsi256_pd(
+ _mm256_cvtepi32_epi64(static_cast<__m128i>(i32_mask)))) {}
+
+template <>
+class simd<double, simd_abi::avx2_fixed_size<4>> {
+ __m256d m_value;
+
+ public:
+ using value_type = double;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_pd(value_type(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m256d const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(_mm256_setr_pd(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()))) {
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm256_loadu_pd(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm256_load_pd(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_storeu_pd(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_store_pd(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256d()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(
+ _mm256_sub_pd(_mm256_set1_pd(0.0), static_cast<__m256d>(m_value)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_mul_pd(static_cast<__m256d>(lhs), static_cast<__m256d>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_div_pd(static_cast<__m256d>(lhs), static_cast<__m256d>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_add_pd(static_cast<__m256d>(lhs), static_cast<__m256d>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_sub_pd(static_cast<__m256d>(lhs), static_cast<__m256d>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_pd(static_cast<__m256d>(lhs),
+ static_cast<__m256d>(rhs), _CMP_LT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_pd(static_cast<__m256d>(lhs),
+ static_cast<__m256d>(rhs), _CMP_GT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_pd(static_cast<__m256d>(lhs),
+ static_cast<__m256d>(rhs), _CMP_LE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_pd(static_cast<__m256d>(lhs),
+ static_cast<__m256d>(rhs), _CMP_GE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_pd(static_cast<__m256d>(lhs),
+ static_cast<__m256d>(rhs), _CMP_EQ_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_pd(static_cast<__m256d>(lhs),
+ static_cast<__m256d>(rhs), _CMP_NEQ_OS));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ copysign(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a,
+ Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& b) {
+ __m256d const sign_mask = _mm256_set1_pd(-0.0);
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_xor_pd(_mm256_andnot_pd(sign_mask, static_cast<__m256d>(a)),
+ _mm256_and_pd(sign_mask, static_cast<__m256d>(b))));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ abs(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ __m256d const sign_mask = _mm256_set1_pd(-0.0);
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_andnot_pd(sign_mask, static_cast<__m256d>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ floor(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_round_pd(static_cast<__m256d>(a),
+ (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ ceil(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_round_pd(static_cast<__m256d>(a),
+ (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ round(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_round_pd(static_cast<__m256d>(a),
+ (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ trunc(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_round_pd(static_cast<__m256d>(a),
+ (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ sqrt(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_sqrt_pd(static_cast<__m256d>(a)));
+}
+
+#ifdef __INTEL_COMPILER
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ cbrt(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_cbrt_pd(static_cast<__m256d>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ exp(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_exp_pd(static_cast<__m256d>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ log(Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_log_pd(static_cast<__m256d>(a)));
+}
+
+#endif
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ fma(Experimental::simd<double,
+ Experimental::simd_abi::avx2_fixed_size<4>> const& a,
+ Experimental::simd<double,
+ Experimental::simd_abi::avx2_fixed_size<4>> const& b,
+ Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& c) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_fmadd_pd(static_cast<__m256d>(a), static_cast<__m256d>(b),
+ static_cast<__m256d>(c)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ max(Experimental::simd<double,
+ Experimental::simd_abi::avx2_fixed_size<4>> const& a,
+ Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& b) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_max_pd(static_cast<__m256d>(a), static_cast<__m256d>(b)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ min(Experimental::simd<double,
+ Experimental::simd_abi::avx2_fixed_size<4>> const& a,
+ Experimental::simd<
+ double, Experimental::simd_abi::avx2_fixed_size<4>> const& b) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_min_pd(static_cast<__m256d>(a), static_cast<__m256d>(b)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<double, simd_abi::avx2_fixed_size<4>>
+ condition(simd_mask<double, simd_abi::avx2_fixed_size<4>> const& a,
+ simd<double, simd_abi::avx2_fixed_size<4>> const& b,
+ simd<double, simd_abi::avx2_fixed_size<4>> const& c) {
+ return simd<double, simd_abi::avx2_fixed_size<4>>(
+ _mm256_blendv_pd(static_cast<__m256d>(c), static_cast<__m256d>(b),
+ static_cast<__m256d>(a)));
+}
+
+template <>
+class simd<float, simd_abi::avx2_fixed_size<4>> {
+ __m128 m_value;
+
+ public:
+ using value_type = float;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm_set1_ps(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(G&& gen)
+ : m_value(_mm_setr_ps(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m128 const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm_loadu_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm_load_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm_storeu_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm_store_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m128()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm_sub_ps(_mm_set1_ps(0.0), m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm_mul_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm_div_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm_add_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm_sub_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm_cmplt_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm_cmpgt_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm_cmple_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm_cmpge_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm_cmpeq_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm_cmpneq_ps(lhs.m_value, rhs.m_value));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>>
+copysign(
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ b) {
+ __m128 const sign_mask = _mm_set1_ps(-0.0);
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_xor_ps(_mm_andnot_ps(sign_mask, static_cast<__m128>(a)),
+ _mm_and_ps(sign_mask, static_cast<__m128>(b))));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ abs(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ __m128 const sign_mask = _mm_set1_ps(-0.0);
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_andnot_ps(sign_mask, static_cast<__m128>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ floor(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_round_ps(static_cast<__m128>(a),
+ (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ ceil(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_round_ps(static_cast<__m128>(a),
+ (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ round(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_round_ps(static_cast<__m128>(a),
+ (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ trunc(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_round_ps(static_cast<__m128>(a),
+ (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ sqrt(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_sqrt_ps(static_cast<__m128>(a)));
+}
+
+#ifdef __INTEL_COMPILER
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ cbrt(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_cbrt_ps(static_cast<__m128>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ exp(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_exp_ps(static_cast<__m128>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>
+ log(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_log_ps(static_cast<__m128>(a)));
+}
+
+#endif
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>>
+fma(Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ b,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ c) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_fmadd_ps(static_cast<__m128>(a), static_cast<__m128>(b),
+ static_cast<__m128>(c)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>>
+max(Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_max_ps(static_cast<__m128>(a), static_cast<__m128>(b)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<4>>
+min(Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_min_ps(static_cast<__m128>(a), static_cast<__m128>(b)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<float, simd_abi::avx2_fixed_size<4>>
+ condition(simd_mask<float, simd_abi::avx2_fixed_size<4>> const& a,
+ simd<float, simd_abi::avx2_fixed_size<4>> const& b,
+ simd<float, simd_abi::avx2_fixed_size<4>> const& c) {
+ return simd<float, simd_abi::avx2_fixed_size<4>>(_mm_blendv_ps(
+ static_cast<__m128>(c), static_cast<__m128>(b), static_cast<__m128>(a)));
+}
+
+template <>
+class simd<float, simd_abi::avx2_fixed_size<8>> {
+ __m256 m_value;
+
+ public:
+ using value_type = float;
+ using abi_type = simd_abi::avx2_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_ps(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(G&& gen)
+ : m_value(_mm256_setr_ps(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m256 const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm256_loadu_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm256_load_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_storeu_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_store_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm256_sub_ps(_mm256_set1_ps(0.0), m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_mul_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_div_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_add_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_sub_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps(static_cast<__m256>(lhs),
+ static_cast<__m256>(rhs), _CMP_LT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps(static_cast<__m256>(lhs),
+ static_cast<__m256>(rhs), _CMP_GT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps(static_cast<__m256>(lhs),
+ static_cast<__m256>(rhs), _CMP_LE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps(static_cast<__m256>(lhs),
+ static_cast<__m256>(rhs), _CMP_GE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps(static_cast<__m256>(lhs),
+ static_cast<__m256>(rhs), _CMP_EQ_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps(static_cast<__m256>(lhs),
+ static_cast<__m256>(rhs), _CMP_NEQ_OS));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>>
+copysign(
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ b) {
+ __m256 const sign_mask = _mm256_set1_ps(-0.0);
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_xor_ps(_mm256_andnot_ps(sign_mask, static_cast<__m256>(a)),
+ _mm256_and_ps(sign_mask, static_cast<__m256>(b))));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ abs(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ __m256 const sign_mask = _mm256_set1_ps(-0.0);
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_andnot_ps(sign_mask, static_cast<__m256>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ floor(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_round_ps(static_cast<__m256>(a),
+ (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ ceil(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_round_ps(static_cast<__m256>(a),
+ (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ round(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_round_ps(static_cast<__m256>(a),
+ (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ trunc(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_round_ps(static_cast<__m256>(a),
+ (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ sqrt(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_sqrt_ps(static_cast<__m256>(a)));
+}
+
+#ifdef __INTEL_COMPILER
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ cbrt(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_cbrt_ps(static_cast<__m256>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ exp(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_exp_ps(static_cast<__m256>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ log(Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_log_ps(static_cast<__m256>(a)));
+}
+
+#endif
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>>
+fma(Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ b,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ c) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_fmadd_ps(static_cast<__m256>(a), static_cast<__m256>(b),
+ static_cast<__m256>(c)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>>
+max(Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_max_ps(static_cast<__m256>(a), static_cast<__m256>(b)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx2_fixed_size<8>>
+min(Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_min_ps(static_cast<__m256>(a), static_cast<__m256>(b)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<float, simd_abi::avx2_fixed_size<8>>
+ condition(simd_mask<float, simd_abi::avx2_fixed_size<8>> const& a,
+ simd<float, simd_abi::avx2_fixed_size<8>> const& b,
+ simd<float, simd_abi::avx2_fixed_size<8>> const& c) {
+ return simd<float, simd_abi::avx2_fixed_size<8>>(_mm256_blendv_ps(
+ static_cast<__m256>(c), static_cast<__m256>(b), static_cast<__m256>(a)));
+}
+
+template <>
+class simd<std::int32_t, simd_abi::avx2_fixed_size<4>> {
+ __m128i m_value;
+
+ public:
+ using value_type = std::int32_t;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm_set1_epi32(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(_mm_setr_epi32(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()))) {
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m128i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::uint64_t, abi_type> const& other);
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ // FIXME_HIP ROCm 5.6, 5.7, and 6.0 can't compile with the intrinsic used
+ // here.
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm_loadu_si128(reinterpret_cast<__m128i const*>(ptr));
+#else
+ m_value = _mm_maskload_epi32(ptr, static_cast<__m128i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ // FIXME_HIP ROCm 5.6 can't compile with the intrinsic used here.
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm_load_si128(reinterpret_cast<__m128i const*>(ptr));
+#else
+ m_value = _mm_maskload_epi32(ptr, static_cast<__m128i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm_maskstore_epi32(ptr, static_cast<__m128i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm_maskstore_epi32(ptr, static_cast<__m128i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m128i()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ _mm_cmpeq_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ _mm_cmpgt_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ _mm_cmplt_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return (lhs < rhs) || (lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return (lhs > rhs) || (lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm_sub_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm_add_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm_mullo_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm_srai_epi32(static_cast<__m128i>(lhs), rhs));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm_srav_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm_slli_epi32(static_cast<__m128i>(lhs), rhs));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm_sllv_epi32(static_cast<__m128i>(lhs), static_cast<__m128i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::avx2_fixed_size<4>>
+ abs(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ __m128i const rhs = static_cast<__m128i>(a);
+ return Experimental::simd<std::int32_t,
+ Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm_abs_epi32(rhs));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ floor(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_cvtepi32_pd(static_cast<__m128i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ ceil(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_cvtepi32_pd(static_cast<__m128i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ round(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_cvtepi32_pd(static_cast<__m128i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ trunc(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_cvtepi32_pd(static_cast<__m128i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>>
+ condition(simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>> const& a,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& b,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& c) {
+ return simd<std::int32_t, simd_abi::avx2_fixed_size<4>>(_mm_castps_si128(
+ _mm_blendv_ps(_mm_castsi128_ps(static_cast<__m128i>(c)),
+ _mm_castsi128_ps(static_cast<__m128i>(b)),
+ _mm_castsi128_ps(static_cast<__m128i>(a)))));
+}
+
+template <>
+class simd<std::int32_t, simd_abi::avx2_fixed_size<8>> {
+ __m256i m_value;
+
+ public:
+ using value_type = std::int32_t;
+ using abi_type = simd_abi::avx2_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_epi32(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(
+ _mm256_setr_epi32(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m256i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ // FIXME_HIP ROCm 5.6, 5.7, and 6.0 can't compile with the intrinsic used
+ // here.
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm256_loadu_si256(reinterpret_cast<__m256i const*>(ptr));
+#else
+ m_value = _mm256_maskload_epi32(ptr, static_cast<__m256i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ // FIXME_HIP ROCm 5.6, 5.7, and 6.0 can't compile with the intrinsic used
+ // here.
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm256_loadu_si256(reinterpret_cast<__m256i const*>(ptr));
+#else
+ m_value = _mm256_maskload_epi32(ptr, static_cast<__m256i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_maskstore_epi32(ptr, static_cast<__m256i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_maskstore_epi32(ptr, static_cast<__m256i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpeq_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpgt_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs >= rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return (lhs < rhs) || (lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return (lhs > rhs) || (lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_sub_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_add_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_mullo_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm256_srai_epi32(static_cast<__m256i>(lhs), rhs));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_srav_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm256_slli_epi32(static_cast<__m256i>(lhs), rhs));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_sllv_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::avx2_fixed_size<8>>
+ abs(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ __m256i const rhs = static_cast<__m256i>(a);
+ return Experimental::simd<std::int32_t,
+ Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_abs_epi32(rhs));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ floor(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_cvtepi32_ps(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ ceil(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_cvtepi32_ps(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ round(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_cvtepi32_ps(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>
+ trunc(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx2_fixed_size<8>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::avx2_fixed_size<8>>(
+ _mm256_cvtepi32_ps(static_cast<__m256i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>>
+ condition(simd_mask<std::int32_t, simd_abi::avx2_fixed_size<8>> const& a,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>> const& b,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>> const& c) {
+ return simd<std::int32_t, simd_abi::avx2_fixed_size<8>>(_mm256_castps_si256(
+ _mm256_blendv_ps(_mm256_castsi256_ps(static_cast<__m256i>(c)),
+ _mm256_castsi256_ps(static_cast<__m256i>(b)),
+ _mm256_castsi256_ps(static_cast<__m256i>(a)))));
+}
+
+template <>
+class simd<std::int64_t, simd_abi::avx2_fixed_size<4>> {
+ __m256i m_value;
+
+ static_assert(sizeof(long long) == 8);
+
+ public:
+ using value_type = std::int64_t;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_epi64x(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(_mm256_setr_epi64x(
+ gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m256i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(
+ simd<std::uint64_t, abi_type> const& other);
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(
+ simd<std::int32_t, abi_type> const& other)
+ : m_value(_mm256_cvtepi32_epi64(static_cast<__m128i>(other))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm256_loadu_si256(reinterpret_cast<__m256i const*>(ptr));
+#else
+ m_value = _mm256_maskload_epi64(reinterpret_cast<long long const*>(ptr),
+ static_cast<__m256i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm256_load_si256(reinterpret_cast<__m256i const*>(ptr));
+#else
+ m_value = _mm256_maskload_epi64(reinterpret_cast<long long const*>(ptr),
+ static_cast<__m256i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(ptr),
+ static_cast<__m256i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(ptr),
+ static_cast<__m256i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(
+ _mm256_sub_epi64(_mm256_set1_epi64x(0), static_cast<__m256i>(m_value)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_sub_epi64(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_add_epi64(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+
+ // fallback simd multiplication using generator constructor
+ // multiplying vectors of 64-bit signed integers is not available in AVX2
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd([&](std::size_t i) { return lhs[i] * rhs[i]; });
+ }
+
+ // AVX2 only has eq and gt comparisons for int64
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpeq_epi64(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpgt_epi64(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return rhs > lhs;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return (lhs < rhs) || (lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return (lhs > rhs) || (lhs == rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+
+ // fallback simd shift right arithmetic using generator constructor
+ // Shift right arithmetic for 64bit packed ints is not availalbe in AVX2
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd([&](std::size_t i) { return lhs[i] >> rhs; });
+ }
+
+ // fallback simd shift right arithmetic using generator constructor
+ // Shift right arithmetic for 64bit packed ints is not availalbe in AVX2
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd([&](std::size_t i) { return lhs[i] >> rhs[i]; });
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm256_slli_epi64(static_cast<__m256i>(lhs), rhs));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_sllv_epi64(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+// Manually computing absolute values, because _mm256_abs_epi64
+// is not in AVX2; it's available in AVX512.
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int64_t, Experimental::simd_abi::avx2_fixed_size<4>>
+ abs(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<std::int64_t,
+ Experimental::simd_abi::avx2_fixed_size<4>>(
+ [&](std::size_t i) { return (a[i] < 0) ? -a[i] : a[i]; });
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ floor(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ ceil(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ round(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ trunc(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int64_t, simd_abi::avx2_fixed_size<4>>
+ condition(simd_mask<std::int64_t, simd_abi::avx2_fixed_size<4>> const& a,
+ simd<std::int64_t, simd_abi::avx2_fixed_size<4>> const& b,
+ simd<std::int64_t, simd_abi::avx2_fixed_size<4>> const& c) {
+ return simd<std::int64_t, simd_abi::avx2_fixed_size<4>>(_mm256_castpd_si256(
+ _mm256_blendv_pd(_mm256_castsi256_pd(static_cast<__m256i>(c)),
+ _mm256_castsi256_pd(static_cast<__m256i>(b)),
+ _mm256_castsi256_pd(static_cast<__m256i>(a)))));
+}
+
+template <>
+class simd<std::uint64_t, simd_abi::avx2_fixed_size<4>> {
+ __m256i m_value;
+
+ public:
+ using value_type = std::uint64_t;
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_epi64x(
+ Kokkos::bit_cast<std::int64_t>(value_type(value)))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(_mm256_setr_epi64x(
+ gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr simd(__m256i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int32_t, abi_type> const& other)
+ : m_value(_mm256_cvtepi32_epi64(static_cast<__m128i>(other))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int64_t, abi_type> const& other)
+ : m_value(static_cast<__m256i>(other)) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm256_loadu_si256(reinterpret_cast<__m256i const*>(ptr));
+#else
+ m_value = _mm256_maskload_epi64(reinterpret_cast<long long const*>(ptr),
+ static_cast<__m256i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ m_value = _mm256_load_si256(reinterpret_cast<__m256i const*>(ptr));
+#else
+ m_value = _mm256_maskload_epi64(reinterpret_cast<long long const*>(ptr),
+ static_cast<__m256i>(mask_type(true)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(ptr),
+ static_cast<__m256i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(ptr),
+ static_cast<__m256i>(mask_type(true)), m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_add_epi64(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_sub_epi64(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+
+ // fallback simd multiplication using generator constructor
+ // multiplying vectors of 64-bit unsigned integers is not available in AVX2
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd([&](std::size_t i) { return lhs[i] * rhs[i]; });
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return _mm256_srli_epi64(static_cast<__m256i>(lhs), rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm256_srlv_epi64(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return _mm256_slli_epi64(static_cast<__m256i>(lhs), rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm256_sllv_epi64(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator&(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm256_and_si256(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator|(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm256_or_si256(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpeq_epi64(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+};
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int64_t, simd_abi::avx2_fixed_size<4>>::simd(
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>> const& other)
+ : m_value(static_cast<__m256i>(other)) {}
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx2_fixed_size<4>>
+abs(Experimental::simd<std::uint64_t,
+ Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ floor(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ ceil(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ round(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>
+ trunc(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx2_fixed_size<4>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::avx2_fixed_size<4>>(
+ _mm256_setr_pd(a[0], a[1], a[2], a[3]));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>
+ condition(simd_mask<std::uint64_t, simd_abi::avx2_fixed_size<4>> const& a,
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>> const& b,
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>> const& c) {
+ return simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>(_mm256_castpd_si256(
+ _mm256_blendv_pd(_mm256_castsi256_pd(static_cast<__m256i>(c)),
+ _mm256_castsi256_pd(static_cast<__m256i>(b)),
+ _mm256_castsi256_pd(static_cast<__m256i>(a)))));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int32_t, simd_abi::avx2_fixed_size<4>>::simd(
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>> const& other) {
+ for (std::size_t i = 0; i < 4; ++i) {
+ (*this)[i] = std::int32_t(other[i]);
+ }
+}
+
+template <>
+class const_where_expression<simd_mask<double, simd_abi::avx2_fixed_size<4>>,
+ simd<double, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using value_type = simd<double, abi_type>;
+ using mask_type = simd_mask<double, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(double* mem, element_aligned_tag) const {
+ _mm256_maskstore_pd(mem, _mm256_castpd_si256(static_cast<__m256d>(m_mask)),
+ static_cast<__m256d>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(double* mem, vector_aligned_tag) const {
+ _mm256_maskstore_pd(mem, _mm256_castpd_si256(static_cast<__m256d>(m_mask)),
+ static_cast<__m256d>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ double* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) const {
+ for (std::size_t lane = 0; lane < 4; ++lane) {
+ if (m_mask[lane]) mem[index[lane]] = m_value[lane];
+ }
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<double, simd_abi::avx2_fixed_size<4>>,
+ simd<double, simd_abi::avx2_fixed_size<4>>>
+ : public const_where_expression<
+ simd_mask<double, simd_abi::avx2_fixed_size<4>>,
+ simd<double, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ where_expression(
+ simd_mask<double, simd_abi::avx2_fixed_size<4>> const& mask_arg,
+ simd<double, simd_abi::avx2_fixed_size<4>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(double const* mem, element_aligned_tag) {
+ m_value = value_type(_mm256_maskload_pd(
+ mem, _mm256_castpd_si256(static_cast<__m256d>(m_mask))));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(double const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm256_maskload_pd(
+ mem, _mm256_castpd_si256(static_cast<__m256d>(m_mask))));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ double const* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) {
+ m_value = value_type(_mm256_mask_i32gather_pd(
+ static_cast<__m256d>(m_value), mem, static_cast<__m128i>(index),
+ static_cast<__m256d>(m_mask), 8));
+ }
+ template <class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<double, simd_abi::avx2_fixed_size<4>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<double, simd_abi::avx2_fixed_size<4>>>(
+ std::forward<U>(x));
+ m_value = simd<double, simd_abi::avx2_fixed_size<4>>(_mm256_blendv_pd(
+ static_cast<__m256d>(m_value), static_cast<__m256d>(x_as_value_type),
+ static_cast<__m256d>(m_mask)));
+ }
+};
+
+template <>
+class const_where_expression<simd_mask<float, simd_abi::avx2_fixed_size<4>>,
+ simd<float, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using value_type = simd<float, abi_type>;
+ using mask_type = simd_mask<float, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, element_aligned_tag) const {
+ _mm_maskstore_ps(mem, _mm_castps_si128(static_cast<__m128>(m_mask)),
+ static_cast<__m128>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, vector_aligned_tag) const {
+ _mm_maskstore_ps(mem, _mm_castps_si128(static_cast<__m128>(m_mask)),
+ static_cast<__m128>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ float* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) const {
+ for (std::size_t lane = 0; lane < 4; ++lane) {
+ if (m_mask[lane]) mem[index[lane]] = m_value[lane];
+ }
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<float, simd_abi::avx2_fixed_size<4>>,
+ simd<float, simd_abi::avx2_fixed_size<4>>>
+ : public const_where_expression<
+ simd_mask<float, simd_abi::avx2_fixed_size<4>>,
+ simd<float, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ where_expression(
+ simd_mask<float, simd_abi::avx2_fixed_size<4>> const& mask_arg,
+ simd<float, simd_abi::avx2_fixed_size<4>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, element_aligned_tag) {
+ m_value = value_type(
+ _mm_maskload_ps(mem, _mm_castps_si128(static_cast<__m128>(m_mask))));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, vector_aligned_tag) {
+ m_value = value_type(
+ _mm_maskload_ps(mem, _mm_castps_si128(static_cast<__m128>(m_mask))));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ float const* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) {
+ m_value = value_type(_mm_mask_i32gather_ps(static_cast<__m128>(m_value),
+ mem, static_cast<__m128i>(index),
+ static_cast<__m128>(m_mask), 4));
+ }
+ template <class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<float, simd_abi::avx2_fixed_size<4>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<float, simd_abi::avx2_fixed_size<4>>>(
+ std::forward<U>(x));
+ m_value = simd<float, simd_abi::avx2_fixed_size<4>>(_mm_blendv_ps(
+ static_cast<__m128>(m_value), static_cast<__m128>(x_as_value_type),
+ static_cast<__m128>(m_mask)));
+ }
+};
+
+template <>
+class const_where_expression<simd_mask<float, simd_abi::avx2_fixed_size<8>>,
+ simd<float, simd_abi::avx2_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx2_fixed_size<8>;
+ using value_type = simd<float, abi_type>;
+ using mask_type = simd_mask<float, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, element_aligned_tag) const {
+ _mm256_maskstore_ps(mem, _mm256_castps_si256(static_cast<__m256>(m_mask)),
+ static_cast<__m256>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, vector_aligned_tag) const {
+ _mm256_maskstore_ps(mem, _mm256_castps_si256(static_cast<__m256>(m_mask)),
+ static_cast<__m256>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ float* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>> const& index) const {
+ for (std::size_t lane = 0; lane < value_type::size(); ++lane) {
+ if (m_mask[lane]) mem[index[lane]] = m_value[lane];
+ }
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<float, simd_abi::avx2_fixed_size<8>>,
+ simd<float, simd_abi::avx2_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<float, simd_abi::avx2_fixed_size<8>>,
+ simd<float, simd_abi::avx2_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<float, simd_abi::avx2_fixed_size<8>> const& mask_arg,
+ simd<float, simd_abi::avx2_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, element_aligned_tag) {
+ m_value = value_type(_mm256_maskload_ps(
+ mem, _mm256_castps_si256(static_cast<__m256>(m_mask))));
+ }
+ void copy_from(float const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm256_maskload_ps(
+ mem, _mm256_castps_si256(static_cast<__m256>(m_mask))));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ float const* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>> const& index) {
+ m_value = value_type(_mm256_mask_i32gather_ps(
+ static_cast<__m256>(m_value), mem, static_cast<__m256i>(index),
+ static_cast<__m256>(m_mask), 4));
+ }
+ template <class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<float, simd_abi::avx2_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<float, simd_abi::avx2_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<float, simd_abi::avx2_fixed_size<8>>(_mm256_blendv_ps(
+ static_cast<__m256>(m_value), static_cast<__m256>(x_as_value_type),
+ static_cast<__m256>(m_mask)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using value_type = simd<std::int32_t, abi_type>;
+ using mask_type = simd_mask<std::int32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, element_aligned_tag) const {
+ _mm_maskstore_epi32(mem, static_cast<__m128i>(m_mask),
+ static_cast<__m128i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, vector_aligned_tag) const {
+ _mm_maskstore_epi32(mem, static_cast<__m128i>(m_mask),
+ static_cast<__m128i>(m_value));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int32_t* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) const {
+ for (std::size_t lane = 0; lane < 4; ++lane) {
+ if (m_mask[lane]) mem[index[lane]] = m_value[lane];
+ }
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>>>
+ : public const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ where_expression(
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<4>> const& mask_arg,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, element_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m128i tmp = _mm_loadu_si128(reinterpret_cast<__m128i const*>(mem));
+ m_value = value_type(_mm_and_si128(tmp, static_cast<__m128i>(m_mask)));
+#else
+ m_value = value_type(_mm_maskload_epi32(mem, static_cast<__m128i>(m_mask)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, vector_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m128i tmp = _mm_load_si128(reinterpret_cast<__m128i const*>(mem));
+ m_value = value_type(_mm_and_si128(tmp, static_cast<__m128i>(m_mask)));
+#else
+ m_value = value_type(_mm_maskload_epi32(mem, static_cast<__m128i>(m_mask)));
+#endif
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int32_t const* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) {
+ m_value = value_type(_mm_mask_i32gather_epi32(
+ static_cast<__m128i>(m_value), mem, static_cast<__m128i>(index),
+ static_cast<__m128i>(m_mask), 4));
+ }
+ template <
+ class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<std::int32_t, simd_abi::avx2_fixed_size<4>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::int32_t, simd_abi::avx2_fixed_size<4>>>(
+ std::forward<U>(x));
+ m_value = simd<std::int32_t, simd_abi::avx2_fixed_size<4>>(_mm_castps_si128(
+ _mm_blendv_ps(_mm_castsi128_ps(static_cast<__m128i>(m_value)),
+ _mm_castsi128_ps(static_cast<__m128i>(x_as_value_type)),
+ _mm_castsi128_ps(static_cast<__m128i>(m_mask)))));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<8>>,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx2_fixed_size<8>;
+ using value_type = simd<std::int32_t, abi_type>;
+ using mask_type = simd_mask<std::int32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, element_aligned_tag) const {
+ _mm256_maskstore_epi32(mem, static_cast<__m256i>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, vector_aligned_tag) const {
+ _mm256_maskstore_epi32(mem, static_cast<__m256i>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int32_t* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>> const& index) const {
+ for (std::size_t lane = 0; lane < value_type::size(); ++lane) {
+ if (m_mask[lane]) mem[index[lane]] = m_value[lane];
+ }
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int32_t, simd_abi::avx2_fixed_size<8>>,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<8>>,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<std::int32_t, simd_abi::avx2_fixed_size<8>> const& mask_arg,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, element_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m256i tmp = _mm256_loadu_si256(reinterpret_cast<__m256i const*>(mem));
+ m_value = value_type(_mm256_and_si256(tmp, static_cast<__m256i>(m_mask)));
+#else
+ m_value =
+ value_type(_mm256_maskload_epi32(mem, static_cast<__m256i>(m_mask)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, vector_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m256i tmp = _mm256_load_si256(reinterpret_cast<__m256i const*>(mem));
+ m_value = value_type(_mm256_and_si256(tmp, static_cast<__m256i>(m_mask)));
+#else
+ m_value =
+ value_type(_mm256_maskload_epi32(mem, static_cast<__m256i>(m_mask)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int32_t const* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<8>> const& index) {
+ m_value = value_type(_mm256_mask_i32gather_epi32(
+ static_cast<__m256i>(m_value), mem, static_cast<__m256i>(index),
+ static_cast<__m256i>(m_mask), 4));
+ }
+ template <
+ class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<std::int32_t, simd_abi::avx2_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::int32_t, simd_abi::avx2_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<std::int32_t, simd_abi::avx2_fixed_size<8>>(
+ _mm256_castps_si256(_mm256_blendv_ps(
+ _mm256_castsi256_ps(static_cast<__m256i>(m_value)),
+ _mm256_castsi256_ps(static_cast<__m256i>(x_as_value_type)),
+ _mm256_castsi256_ps(static_cast<__m256i>(m_mask)))));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int64_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::int64_t, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using value_type = simd<std::int64_t, abi_type>;
+ using mask_type = simd_mask<std::int64_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ std::int64_t* mem, element_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(mem),
+ static_cast<__m256i>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(std::int64_t* mem,
+ vector_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(mem),
+ static_cast<__m256i>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int64_t* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) const {
+ for (std::size_t lane = 0; lane < 4; ++lane) {
+ if (m_mask[lane]) mem[index[lane]] = m_value[lane];
+ }
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int64_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::int64_t, simd_abi::avx2_fixed_size<4>>>
+ : public const_where_expression<
+ simd_mask<std::int64_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::int64_t, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ where_expression(
+ simd_mask<std::int64_t, simd_abi::avx2_fixed_size<4>> const& mask_arg,
+ simd<std::int64_t, simd_abi::avx2_fixed_size<4>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(std::int64_t const* mem,
+ element_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m256i tmp = _mm256_loadu_si256(reinterpret_cast<__m256i const*>(mem));
+ m_value = value_type(_mm256_and_si256(tmp, static_cast<__m256i>(m_mask)));
+#else
+ m_value = value_type(_mm256_maskload_epi64(
+ reinterpret_cast<long long const*>(mem), static_cast<__m256i>(m_mask)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(std::int64_t const* mem,
+ vector_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m256i tmp = _mm256_load_si256(reinterpret_cast<__m256i const*>(mem));
+ m_value = value_type(_mm256_and_si256(tmp, static_cast<__m256i>(m_mask)));
+#else
+ m_value = value_type(_mm256_maskload_epi64(
+ reinterpret_cast<long long const*>(mem), static_cast<__m256i>(m_mask)));
+#endif
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int64_t const* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) {
+ m_value = value_type(_mm256_mask_i32gather_epi64(
+ static_cast<__m256i>(m_value), reinterpret_cast<long long const*>(mem),
+ static_cast<__m128i>(index), static_cast<__m256i>(m_mask), 8));
+ }
+ template <
+ class u,
+ std::enable_if_t<std::is_convertible_v<
+ u, simd<std::int64_t, simd_abi::avx2_fixed_size<4>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(u&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::int64_t, simd_abi::avx2_fixed_size<4>>>(
+ std::forward<u>(x));
+ m_value = simd<std::int64_t, simd_abi::avx2_fixed_size<4>>(
+ _mm256_castpd_si256(_mm256_blendv_pd(
+ _mm256_castsi256_pd(static_cast<__m256i>(m_value)),
+ _mm256_castsi256_pd(static_cast<__m256i>(x_as_value_type)),
+ _mm256_castsi256_pd(static_cast<__m256i>(m_mask)))));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::uint64_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ using abi_type = simd_abi::avx2_fixed_size<4>;
+ using value_type = simd<std::uint64_t, abi_type>;
+ using mask_type = simd_mask<std::uint64_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ std::uint64_t* mem, element_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(mem),
+ static_cast<__m256i>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(std::uint64_t* mem,
+ vector_aligned_tag) const {
+ _mm256_maskstore_epi64(reinterpret_cast<long long*>(mem),
+ static_cast<__m256i>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::uint64_t* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) const {
+ for (std::size_t lane = 0; lane < 4; ++lane) {
+ if (m_mask[lane]) mem[index[lane]] = m_value[lane];
+ }
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::uint64_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>>
+ : public const_where_expression<
+ simd_mask<std::uint64_t, simd_abi::avx2_fixed_size<4>>,
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>> {
+ public:
+ where_expression(
+ simd_mask<std::uint64_t, simd_abi::avx2_fixed_size<4>> const& mask_arg,
+ simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(std::uint64_t const* mem,
+ element_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m256i tmp = _mm256_loadu_si256(reinterpret_cast<__m256i const*>(mem));
+ m_value = value_type(_mm256_and_si256(tmp, static_cast<__m256i>(m_mask)));
+#else
+ m_value = value_type(_mm256_maskload_epi64(
+ reinterpret_cast<long long const*>(mem), static_cast<__m256i>(m_mask)));
+#endif
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(std::uint64_t const* mem,
+ vector_aligned_tag) {
+#ifdef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+ __m256i tmp = _mm256_load_si256(reinterpret_cast<__m256i const*>(mem));
+ m_value = value_type(_mm256_and_si256(tmp, static_cast<__m256i>(m_mask)));
+#else
+ m_value = value_type(_mm256_maskload_epi64(
+ reinterpret_cast<long long const*>(mem), static_cast<__m256i>(m_mask)));
+#endif
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::uint64_t const* mem,
+ simd<std::int32_t, simd_abi::avx2_fixed_size<4>> const& index) {
+ m_value = value_type(_mm256_mask_i32gather_epi64(
+ static_cast<__m256i>(m_value), reinterpret_cast<long long const*>(mem),
+ static_cast<__m128i>(index), static_cast<__m256i>(m_mask), 8));
+ }
+ template <class u,
+ std::enable_if_t<
+ std::is_convertible_v<
+ u, simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(u&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>>(
+ std::forward<u>(x));
+ m_value = simd<std::uint64_t, simd_abi::avx2_fixed_size<4>>(
+ _mm256_castpd_si256(_mm256_blendv_pd(
+ _mm256_castsi256_pd(static_cast<__m256i>(m_value)),
+ _mm256_castsi256_pd(static_cast<__m256i>(x_as_value_type)),
+ _mm256_castsi256_pd(static_cast<__m256i>(m_mask)))));
+ }
+};
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#undef KOKKOS_IMPL_WORKAROUND_ROCM_AVX2_ISSUE
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SIMD_AVX512_HPP
+#define KOKKOS_SIMD_AVX512_HPP
+
+#include <functional>
+#include <type_traits>
+
+#include <Kokkos_SIMD_Common.hpp>
+#include <Kokkos_BitManipulation.hpp> // bit_cast
+
+#include <immintrin.h>
+
+#ifdef KOKKOS_SIMD_COMMON_MATH_HPP
+#error \
+ "Kokkos_SIMD_AVX512.hpp must be included before Kokkos_SIMD_Common_Math.hpp!"
+#endif
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace simd_abi {
+
+template <int N>
+class avx512_fixed_size {};
+
+} // namespace simd_abi
+
+template <class T>
+class simd_mask<T, simd_abi::avx512_fixed_size<8>> {
+ __mmask8 m_value;
+
+ public:
+ class reference {
+ __mmask8& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __mmask8 bit_mask() const {
+ return __mmask8(std::int16_t(1 << m_lane));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__mmask8& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask |= bit_mask();
+ } else {
+ m_mask &= ~bit_mask();
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (m_mask & bit_mask()) != 0;
+ }
+ };
+ using value_type = bool;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(-std::int16_t(value)) {}
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<U, simd_abi::avx512_fixed_size<8>> const& other)
+ : m_value(static_cast<__mmask8>(other)) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(G&& gen) : m_value(false) {
+ reference(m_value, int(0)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 0>()));
+ reference(m_value, int(1)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 1>()));
+ reference(m_value, int(2)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 2>()));
+ reference(m_value, int(3)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 3>()));
+ reference(m_value, int(4)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 4>()));
+ reference(m_value, int(5)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 5>()));
+ reference(m_value, int(6)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 6>()));
+ reference(m_value, int(7)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 7>()));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __mmask8 const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __mmask8()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ auto const bit_mask = __mmask8(std::int16_t(1 << i));
+ return (m_value & bit_mask) != 0;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_kor_mask8(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_kand_mask8(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ static const __mmask8 true_value(static_cast<__mmask8>(simd_mask(true)));
+ return simd_mask(_kxor_mask8(true_value, m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return m_value == other.m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return m_value != other.m_value;
+ }
+};
+
+template <class T>
+class simd_mask<T, simd_abi::avx512_fixed_size<16>> {
+ __mmask16 m_value;
+
+ public:
+ class reference {
+ __mmask16& m_mask;
+ int m_lane;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __mmask16 bit_mask() const {
+ return __mmask16(std::int32_t(1 << m_lane));
+ }
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__mmask16& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ if (value) {
+ m_mask |= bit_mask();
+ } else {
+ m_mask &= ~bit_mask();
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ return (m_mask & bit_mask()) != 0;
+ }
+ };
+ using value_type = bool;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(-std::int32_t(value)) {}
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<U, simd_abi::avx512_fixed_size<16>> const& other)
+ : m_value(static_cast<__mmask16>(other)) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(G&& gen) : m_value(false) {
+ reference(m_value, int(0)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 0>()));
+ reference(m_value, int(1)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 1>()));
+ reference(m_value, int(2)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 2>()));
+ reference(m_value, int(3)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 3>()));
+ reference(m_value, int(4)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 4>()));
+ reference(m_value, int(5)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 5>()));
+ reference(m_value, int(6)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 6>()));
+ reference(m_value, int(7)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 7>()));
+ reference(m_value, int(8)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 8>()));
+ reference(m_value, int(9)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 9>()));
+ reference(m_value, int(10)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 10>()));
+ reference(m_value, int(11)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 11>()));
+ reference(m_value, int(12)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 12>()));
+ reference(m_value, int(13)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 13>()));
+ reference(m_value, int(14)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 14>()));
+ reference(m_value, int(15)) =
+ static_cast<bool>(gen(std::integral_constant<std::size_t, 15>()));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 16;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ __mmask16 const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __mmask16()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ auto const bit_mask = __mmask16(std::int32_t(1 << i));
+ return (m_value & bit_mask) != 0;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(_kor_mask16(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(_kand_mask16(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ static const __mmask16 true_value(static_cast<__mmask16>(simd_mask(true)));
+ return simd_mask(_kxor_mask16(true_value, m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ simd_mask const& other) const {
+ return m_value == other.m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ simd_mask const& other) const {
+ return m_value != other.m_value;
+ }
+};
+
+template <>
+class simd<double, simd_abi::avx512_fixed_size<8>> {
+ __m512d m_value;
+
+ public:
+ using value_type = double;
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm512_set1_pd(value_type(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m512d const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(_mm512_setr_pd(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm512_loadu_pd(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm512_load_pd(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm512_storeu_pd(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm512_store_pd(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512d()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm512_sub_pd(_mm512_set1_pd(0.0), m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_mul_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_div_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_add_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_sub_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_pd_mask(static_cast<__m512d>(lhs),
+ static_cast<__m512d>(rhs), _CMP_LT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_pd_mask(static_cast<__m512d>(rhs),
+ static_cast<__m512d>(lhs), _CMP_GT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_pd_mask(static_cast<__m512d>(lhs),
+ static_cast<__m512d>(rhs), _CMP_LE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_pd_mask(static_cast<__m512d>(rhs),
+ static_cast<__m512d>(lhs), _CMP_GE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_pd_mask(static_cast<__m512d>(lhs),
+ static_cast<__m512d>(rhs), _CMP_EQ_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_pd_mask(
+ static_cast<__m512d>(lhs), static_cast<__m512d>(rhs), _CMP_NEQ_OS));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+copysign(
+ Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b) {
+ static const __m512i sign_mask =
+ reinterpret_cast<__m512i>(static_cast<__m512d>(
+ Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>(-0.0)));
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ reinterpret_cast<__m512d>(_mm512_xor_epi64(
+ _mm512_andnot_epi64(
+ sign_mask, reinterpret_cast<__m512i>(static_cast<__m512d>(a))),
+ _mm512_and_epi64(
+ sign_mask, reinterpret_cast<__m512i>(static_cast<__m512d>(b))))));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ abs(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m512d const rhs = static_cast<__m512d>(a);
+#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU < 830)
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ (__m512d)_mm512_and_epi64((__m512i)rhs,
+ _mm512_set1_epi64(0x7fffffffffffffffLL)));
+#else
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_abs_pd(rhs));
+#endif
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ floor(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m512d const val = static_cast<__m512d>(a);
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_roundscale_pd(val, _MM_FROUND_TO_NEG_INF));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ ceil(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m512d const val = static_cast<__m512d>(a);
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_roundscale_pd(val, _MM_FROUND_TO_POS_INF));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ round(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m512d const val = static_cast<__m512d>(a);
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_roundscale_pd(val, _MM_FROUND_TO_NEAREST_INT));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ trunc(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m512d const val = static_cast<__m512d>(a);
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_roundscale_pd(val, _MM_FROUND_TO_ZERO));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ sqrt(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_sqrt_pd(static_cast<__m512d>(a)));
+}
+
+#ifdef __INTEL_COMPILER
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ cbrt(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cbrt_pd(static_cast<__m512d>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ exp(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_exp_pd(static_cast<__m512d>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ log(Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_log_pd(static_cast<__m512d>(a)));
+}
+
+#endif
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+fma(Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b,
+ Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& c) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_fmadd_pd(static_cast<__m512d>(a), static_cast<__m512d>(b),
+ static_cast<__m512d>(c)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+max(Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_max_pd(static_cast<__m512d>(a), static_cast<__m512d>(b)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+min(Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_min_pd(static_cast<__m512d>(a), static_cast<__m512d>(b)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<double, simd_abi::avx512_fixed_size<8>>
+ condition(simd_mask<double, simd_abi::avx512_fixed_size<8>> const& a,
+ simd<double, simd_abi::avx512_fixed_size<8>> const& b,
+ simd<double, simd_abi::avx512_fixed_size<8>> const& c) {
+ return simd<double, simd_abi::avx512_fixed_size<8>>(
+ _mm512_mask_blend_pd(static_cast<__mmask8>(a), static_cast<__m512d>(c),
+ static_cast<__m512d>(b)));
+}
+
+template <>
+class simd<float, simd_abi::avx512_fixed_size<8>> {
+ __m256 m_value;
+
+ public:
+ using value_type = float;
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_ps(value_type(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m256 const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(G&& gen)
+ : m_value(_mm256_setr_ps(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm256_loadu_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm256_load_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_storeu_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_store_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm256_sub_ps(_mm256_set1_ps(0.0), m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_mul_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_div_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_add_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_sub_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_LT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_GT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_LE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_GE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_EQ_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_NEQ_OS));
+ }
+};
+
+} // namespace Experimental
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>>
+copysign(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b) {
+ __m256 const sign_mask = _mm256_set1_ps(-0.0);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_xor_ps(_mm256_andnot_ps(sign_mask, static_cast<__m256>(a)),
+ _mm256_and_ps(sign_mask, static_cast<__m256>(b))));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> abs(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m256 const sign_mask = _mm256_set1_ps(-0.0);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_andnot_ps(sign_mask, static_cast<__m256>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>>
+ floor(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m256 const val = static_cast<__m256>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_roundscale_ps(val, _MM_FROUND_TO_NEG_INF));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>>
+ ceil(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m256 const val = static_cast<__m256>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_roundscale_ps(val, _MM_FROUND_TO_POS_INF));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>>
+ round(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m256 const val = static_cast<__m256>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_roundscale_ps(val, _MM_FROUND_TO_NEAREST_INT));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>>
+ trunc(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m256 const val = static_cast<__m256>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_roundscale_ps(val, _MM_FROUND_TO_ZERO));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> sqrt(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_sqrt_ps(static_cast<__m256>(a)));
+}
+
+#ifdef __INTEL_COMPILER
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> cbrt(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_cbrt_ps(static_cast<__m256>(a)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> exp(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_exp_ps(static_cast<__m256>(a)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> log(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_log_ps(static_cast<__m256>(a)));
+}
+
+#endif
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> fma(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b,
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& c) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_fmadd_ps(static_cast<__m256>(a), static_cast<__m256>(b),
+ static_cast<__m256>(c)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> max(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_max_ps(static_cast<__m256>(a), static_cast<__m256>(b)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<8>> min(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a,
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& b) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_min_ps(static_cast<__m256>(a), static_cast<__m256>(b)));
+}
+
+namespace Experimental {
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<float, simd_abi::avx512_fixed_size<8>> condition(
+ simd_mask<float, simd_abi::avx512_fixed_size<8>> const& a,
+ simd<float, simd_abi::avx512_fixed_size<8>> const& b,
+ simd<float, simd_abi::avx512_fixed_size<8>> const& c) {
+ return simd<float, simd_abi::avx512_fixed_size<8>>(
+ _mm256_mask_blend_ps(static_cast<__mmask8>(a), static_cast<__m256>(c),
+ static_cast<__m256>(b)));
+}
+
+template <>
+class simd<float, simd_abi::avx512_fixed_size<16>> {
+ __m512 m_value;
+
+ public:
+ using value_type = float;
+ using abi_type = simd_abi::avx512_fixed_size<16>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 16;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm512_set1_ps(value_type(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m512 const& value_in)
+ : m_value(value_in) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(G&& gen)
+ : m_value(
+ _mm512_setr_ps(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()),
+ gen(std::integral_constant<std::size_t, 8>()),
+ gen(std::integral_constant<std::size_t, 9>()),
+ gen(std::integral_constant<std::size_t, 10>()),
+ gen(std::integral_constant<std::size_t, 11>()),
+ gen(std::integral_constant<std::size_t, 12>()),
+ gen(std::integral_constant<std::size_t, 13>()),
+ gen(std::integral_constant<std::size_t, 14>()),
+ gen(std::integral_constant<std::size_t, 15>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm512_loadu_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm512_load_ps(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm512_storeu_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm512_store_ps(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm512_sub_ps(_mm512_set1_ps(0.0), m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_mul_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_div_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_add_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_sub_ps(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_LT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_GT_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_LE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_GE_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_EQ_OS));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmp_ps_mask(lhs.m_value, rhs.m_value, _CMP_NEQ_OS));
+ }
+};
+
+} // namespace Experimental
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>>
+copysign(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a,
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& b) {
+ __m512 const sign_mask = _mm512_set1_ps(-0.0);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_xor_ps(_mm512_andnot_ps(sign_mask, static_cast<__m512>(a)),
+ _mm512_and_ps(sign_mask, static_cast<__m512>(b))));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> abs(
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ __m512 const sign_mask = _mm512_set1_ps(-0.0);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_andnot_ps(sign_mask, static_cast<__m512>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>>
+ floor(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ __m512 const val = static_cast<__m512>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_roundscale_ps(val, _MM_FROUND_TO_NEG_INF));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>>
+ ceil(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ __m512 const val = static_cast<__m512>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_roundscale_ps(val, _MM_FROUND_TO_POS_INF));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>>
+ round(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ __m512 const val = static_cast<__m512>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_roundscale_ps(val, _MM_FROUND_TO_NEAREST_INT));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>>
+ trunc(Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ __m512 const val = static_cast<__m512>(a);
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_roundscale_ps(val, _MM_FROUND_TO_ZERO));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> sqrt(
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_sqrt_ps(static_cast<__m512>(a)));
+}
+
+#ifdef __INTEL_COMPILER
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> cbrt(
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cbrt_ps(static_cast<__m512>(a)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> exp(
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_exp_ps(static_cast<__m512>(a)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> log(
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_log_ps(static_cast<__m512>(a)));
+}
+
+#endif
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> fma(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>> const& a,
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>> const& b,
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& c) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_fmadd_ps(static_cast<__m512>(a), static_cast<__m512>(b),
+ static_cast<__m512>(c)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> max(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>> const& a,
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& b) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_max_ps(static_cast<__m512>(a), static_cast<__m512>(b)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+Experimental::simd<float, Experimental::simd_abi::avx512_fixed_size<16>> min(
+ Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>> const& a,
+ Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>> const& b) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_min_ps(static_cast<__m512>(a), static_cast<__m512>(b)));
+}
+
+namespace Experimental {
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<float, simd_abi::avx512_fixed_size<16>> condition(
+ simd_mask<float, simd_abi::avx512_fixed_size<16>> const& a,
+ simd<float, simd_abi::avx512_fixed_size<16>> const& b,
+ simd<float, simd_abi::avx512_fixed_size<16>> const& c) {
+ return simd<float, simd_abi::avx512_fixed_size<16>>(
+ _mm512_mask_blend_ps(static_cast<__mmask16>(a), static_cast<__m512>(c),
+ static_cast<__m512>(b)));
+}
+
+template <>
+class simd<std::int32_t, simd_abi::avx512_fixed_size<8>> {
+ __m256i m_value;
+
+ public:
+ using value_type = std::int32_t;
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_epi32(value_type(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m256i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::uint64_t, abi_type> const& other);
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(
+ _mm256_setr_epi32(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm256_mask_loadu_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm256_mask_load_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_mask_storeu_epi32(ptr, static_cast<__mmask8>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_mask_store_epi32(ptr, static_cast<__mmask8>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm256_sub_epi32(_mm256_set1_epi32(0), m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_mullo_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+ _mm256_add_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+ _mm256_sub_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmplt_epi32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmplt_epi32_mask(static_cast<__m256i>(rhs),
+ static_cast<__m256i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmple_epi32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmple_epi32_mask(static_cast<__m256i>(rhs),
+ static_cast<__m256i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpeq_epi32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpneq_epi32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm256_srai_epi32(static_cast<__m256i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_srav_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm256_slli_epi32(static_cast<__m256i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_sllv_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<8>>
+abs(Experimental::simd<std::int32_t,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m256i const rhs = static_cast<__m256i>(a);
+ return Experimental::simd<std::int32_t,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm256_abs_epi32(rhs));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+floor(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi32_pd(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ ceil(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi32_pd(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+round(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi32_pd(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+trunc(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi32_pd(static_cast<__m256i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
+ condition(simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>> const& a,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& b,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& c) {
+ return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+ _mm256_mask_blend_epi32(static_cast<__mmask8>(a), static_cast<__m256i>(c),
+ static_cast<__m256i>(b)));
+}
+
+template <>
+class simd<std::int32_t, simd_abi::avx512_fixed_size<16>> {
+ __m512i m_value;
+
+ public:
+ using value_type = std::int32_t;
+ using abi_type = simd_abi::avx512_fixed_size<16>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 16;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm512_set1_epi32(value_type(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m512i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::uint64_t, abi_type> const& other);
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(_mm512_setr_epi32(
+ gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()),
+ gen(std::integral_constant<std::size_t, 8>()),
+ gen(std::integral_constant<std::size_t, 9>()),
+ gen(std::integral_constant<std::size_t, 10>()),
+ gen(std::integral_constant<std::size_t, 11>()),
+ gen(std::integral_constant<std::size_t, 12>()),
+ gen(std::integral_constant<std::size_t, 13>()),
+ gen(std::integral_constant<std::size_t, 14>()),
+ gen(std::integral_constant<std::size_t, 15>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm512_mask_storeu_epi32(ptr, static_cast<__mmask16>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm512_mask_store_epi32(ptr, static_cast<__mmask16>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm512_mask_loadu_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm512_mask_load_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm512_sub_epi32(_mm512_set1_epi32(0), m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_mullo_epi32(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd<std::int32_t, simd_abi::avx512_fixed_size<16>>(
+ _mm512_add_epi32(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd<std::int32_t, simd_abi::avx512_fixed_size<16>>(
+ _mm512_sub_epi32(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epi32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epi32_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epi32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epi32_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpeq_epi32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpneq_epi32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm512_srai_epi32(static_cast<__m512i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_srav_epi32(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm512_slli_epi32(static_cast<__m512i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_sllv_epi32(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<16>>
+abs(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ __m512i const rhs = static_cast<__m512i>(a);
+ return Experimental::simd<std::int32_t,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_abs_epi32(rhs));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+floor(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepi32_ps(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+ceil(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepi32_ps(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+round(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepi32_ps(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+trunc(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepi32_ps(static_cast<__m512i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>>
+ condition(simd_mask<std::int32_t, simd_abi::avx512_fixed_size<16>> const& a,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& b,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& c) {
+ return simd<std::int32_t, simd_abi::avx512_fixed_size<16>>(
+ _mm512_mask_blend_epi32(static_cast<__mmask16>(a),
+ static_cast<__m512i>(c),
+ static_cast<__m512i>(b)));
+}
+
+template <>
+class simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> {
+ __m256i m_value;
+
+ public:
+ using value_type = std::uint32_t;
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm256_set1_epi32(
+ Kokkos::bit_cast<std::int32_t>(value_type(value)))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m256i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& other)
+ : m_value(static_cast<__m256i>(other)) {}
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(
+ _mm256_setr_epi32(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm256_mask_storeu_epi32(ptr, static_cast<__mmask8>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm256_mask_store_epi32(ptr, static_cast<__mmask8>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm256_mask_loadu_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm256_mask_load_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_mullo_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_add_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm256_sub_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmplt_epu32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmplt_epu32_mask(static_cast<__m256i>(rhs),
+ static_cast<__m256i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmple_epu32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmple_epu32_mask(static_cast<__m256i>(rhs),
+ static_cast<__m256i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpeq_epu32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm256_cmpneq_epu32_mask(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm256_srli_epi32(static_cast<__m256i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_srlv_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm256_slli_epi32(static_cast<__m256i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm256_sllv_epi32(static_cast<__m256i>(lhs),
+ static_cast<__m256i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<8>>
+abs(Experimental::simd<std::uint32_t,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+floor(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu32_pd(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+ceil(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu32_pd(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+round(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu32_pd(static_cast<__m256i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+trunc(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu32_pd(static_cast<__m256i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>
+ condition(simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& a,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& b,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& c) {
+ return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
+ _mm256_mask_blend_epi32(static_cast<__mmask8>(a), static_cast<__m256i>(c),
+ static_cast<__m256i>(b)));
+}
+
+template <>
+class simd<std::uint32_t, simd_abi::avx512_fixed_size<16>> {
+ __m512i m_value;
+
+ public:
+ using value_type = std::uint32_t;
+ using abi_type = simd_abi::avx512_fixed_size<16>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 16;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm512_set1_epi32(
+ Kokkos::bit_cast<std::int32_t>(value_type(value)))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ __m512i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& other)
+ : m_value(static_cast<__m512i>(other)) {}
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(_mm512_setr_epi32(
+ gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()),
+ gen(std::integral_constant<std::size_t, 8>()),
+ gen(std::integral_constant<std::size_t, 9>()),
+ gen(std::integral_constant<std::size_t, 10>()),
+ gen(std::integral_constant<std::size_t, 11>()),
+ gen(std::integral_constant<std::size_t, 12>()),
+ gen(std::integral_constant<std::size_t, 13>()),
+ gen(std::integral_constant<std::size_t, 14>()),
+ gen(std::integral_constant<std::size_t, 15>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm512_mask_loadu_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm512_mask_load_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(mask_type(true)), ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm512_mask_storeu_epi32(ptr, static_cast<__mmask16>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm512_mask_store_epi32(ptr, static_cast<__mmask16>(mask_type(true)),
+ m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
+ const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_mullo_epi32(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_add_epi32(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_sub_epi32(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epu32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epu32_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epu32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epu32_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpeq_epu32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpneq_epu32_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm512_srli_epi32(static_cast<__m512i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_srlv_epi32(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(_mm512_slli_epi32(static_cast<__m512i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_sllv_epi32(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<16>>
+abs(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+floor(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepu32_ps(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+ceil(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepu32_ps(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+round(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepu32_ps(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::avx512_fixed_size<16>>
+trunc(Experimental::simd<
+ std::uint32_t, Experimental::simd_abi::avx512_fixed_size<16>> const& a) {
+ return Experimental::simd<float,
+ Experimental::simd_abi::avx512_fixed_size<16>>(
+ _mm512_cvtepu32_ps(static_cast<__m512i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>
+ condition(
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<16>> const& a,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<16>> const& b,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<16>> const& c) {
+ return simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>(
+ _mm512_mask_blend_epi32(static_cast<__mmask16>(a),
+ static_cast<__m512i>(c),
+ static_cast<__m512i>(b)));
+}
+
+template <>
+class simd<std::int64_t, simd_abi::avx512_fixed_size<8>> {
+ __m512i m_value;
+
+ public:
+ using value_type = std::int64_t;
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm512_set1_epi64(value_type(value))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& other)
+ : m_value(_mm512_cvtepi32_epi64(static_cast<__m256i>(other))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other);
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(
+ _mm512_setr_epi64(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr simd(__m512i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm512_loadu_si512(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm512_load_si512(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm512_storeu_si512(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm512_store_si512(ptr, m_value);
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(_mm512_sub_epi64(_mm512_set1_epi64(0), m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_mullo_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_add_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_sub_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epi64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epi64_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epi64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epi64_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpeq_epi64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpneq_epi64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) {
+ return simd(_mm512_srai_epi64(static_cast<__m512i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) {
+ return simd(_mm512_srav_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) {
+ return simd(_mm512_slli_epi64(static_cast<__m512i>(lhs), rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) {
+ return simd(_mm512_sllv_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx512_fixed_size<8>>
+abs(Experimental::simd<std::int64_t,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ __m512i const rhs = static_cast<__m512i>(a);
+ return Experimental::simd<std::int64_t,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_abs_epi64(rhs));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+floor(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi64_pd(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::avx512_fixed_size<8>>
+ ceil(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi64_pd(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+round(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi64_pd(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+trunc(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepi64_pd(static_cast<__m512i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
+ condition(simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>> const& a,
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& b,
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& c) {
+ return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
+ _mm512_mask_blend_epi64(static_cast<__mmask8>(a), static_cast<__m512i>(c),
+ static_cast<__m512i>(b)));
+}
+
+template <>
+class simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> {
+ __m512i m_value;
+
+ public:
+ using value_type = std::uint64_t;
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ using reference = value_type&;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 8;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(_mm512_set1_epi64(
+ Kokkos::bit_cast<std::int64_t>(value_type(value)))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr simd(__m512i const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int32_t, abi_type> const& other)
+ : m_value(_mm512_cvtepi32_epi64(static_cast<__m256i>(other))) {}
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept
+ : m_value(
+ _mm512_setr_epi64(gen(std::integral_constant<std::size_t, 0>()),
+ gen(std::integral_constant<std::size_t, 1>()),
+ gen(std::integral_constant<std::size_t, 2>()),
+ gen(std::integral_constant<std::size_t, 3>()),
+ gen(std::integral_constant<std::size_t, 4>()),
+ gen(std::integral_constant<std::size_t, 5>()),
+ gen(std::integral_constant<std::size_t, 6>()),
+ gen(std::integral_constant<std::size_t, 7>()))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int64_t, abi_type> const& other)
+ : m_value(static_cast<__m512i>(other)) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reinterpret_cast<value_type*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reinterpret_cast<value_type const*>(&m_value)[i];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = _mm512_loadu_si512(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = _mm512_load_si512(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ _mm512_storeu_si512(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ _mm512_store_si512(ptr, m_value);
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(_mm512_mullo_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_add_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ _mm512_sub_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return _mm512_srli_epi64(static_cast<__m512i>(lhs), rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm512_srlv_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return _mm512_slli_epi64(static_cast<__m512i>(lhs), rhs);
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm512_sllv_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator&(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm512_and_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator|(
+ simd const& lhs, simd const& rhs) noexcept {
+ return _mm512_or_epi64(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epu64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmplt_epu64_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epu64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmple_epu64_mask(static_cast<__m512i>(rhs),
+ static_cast<__m512i>(lhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpeq_epu64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(_mm512_cmpneq_epu64_mask(static_cast<__m512i>(lhs),
+ static_cast<__m512i>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx512_fixed_size<8>>
+abs(Experimental::simd<std::uint64_t,
+ Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+floor(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu64_pd(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+ceil(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu64_pd(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+round(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu64_pd(static_cast<__m512i>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ double, Experimental::simd_abi::avx512_fixed_size<8>>
+trunc(Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::avx512_fixed_size<8>> const& a) {
+ return Experimental::simd<double,
+ Experimental::simd_abi::avx512_fixed_size<8>>(
+ _mm512_cvtepu64_pd(static_cast<__m512i>(a)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>
+ condition(simd_mask<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& a,
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& b,
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& c) {
+ return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
+ _mm512_mask_blend_epi64(static_cast<__mmask8>(a), static_cast<__m512i>(c),
+ static_cast<__m512i>(b)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int32_t, simd_abi::avx512_fixed_size<8>>::simd(
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other)
+ : m_value(_mm512_cvtepi64_epi32(static_cast<__m512i>(other))) {}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int64_t, simd_abi::avx512_fixed_size<8>>::simd(
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other)
+ : m_value(static_cast<__m512i>(other)) {}
+
+template <>
+class const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+ simd<double, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using value_type = simd<double, abi_type>;
+ using mask_type = simd_mask<double, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(double* mem, element_aligned_tag) const {
+ _mm512_mask_storeu_pd(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m512d>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(double* mem, vector_aligned_tag) const {
+ _mm512_mask_store_pd(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m512d>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ double* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
+ _mm512_mask_i32scatter_pd(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index),
+ static_cast<__m512d>(m_value), 8);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+ simd<double, simd_abi::avx512_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+ simd<double, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<double, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+ simd<double, simd_abi::avx512_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(double const* mem, element_aligned_tag) {
+ m_value = value_type(_mm512_mask_loadu_pd(
+ _mm512_set1_pd(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(double const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm512_mask_load_pd(
+ _mm512_set1_pd(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ double const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
+ m_value = value_type(_mm512_mask_i32gather_pd(
+ static_cast<__m512d>(m_value), static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index), mem, 8));
+ }
+ template <class U, std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<double, simd_abi::avx512_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<double, simd_abi::avx512_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<double, simd_abi::avx512_fixed_size<8>>(_mm512_mask_blend_pd(
+ static_cast<__mmask8>(m_mask), static_cast<__m512d>(m_value),
+ static_cast<__m512d>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<simd_mask<float, simd_abi::avx512_fixed_size<8>>,
+ simd<float, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using value_type = simd<float, abi_type>;
+ using mask_type = simd_mask<float, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, element_aligned_tag) const {
+ _mm256_mask_storeu_ps(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, vector_aligned_tag) const {
+ _mm256_mask_store_ps(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ float* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
+ _mm256_mask_i32scatter_ps(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index),
+ static_cast<__m256>(m_value), 4);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<float, simd_abi::avx512_fixed_size<8>>,
+ simd<float, simd_abi::avx512_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<float, simd_abi::avx512_fixed_size<8>>,
+ simd<float, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<float, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+ simd<float, simd_abi::avx512_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, element_aligned_tag) {
+ m_value = value_type(_mm256_mask_loadu_ps(
+ _mm256_set1_ps(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+ void copy_from(float const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm256_mask_load_ps(
+ _mm256_set1_ps(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ float const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
+ __m256 on = _mm256_castsi256_ps(_mm256_set1_epi32(-1));
+ __m256 mask = _mm256_maskz_mov_ps(static_cast<__mmask8>(m_mask), on);
+ m_value = value_type(
+ _mm256_mask_i32gather_ps(static_cast<__m256>(m_value), mem,
+ static_cast<__m256i>(index), mask, 4));
+ }
+ template <
+ class U,
+ std::enable_if_t<
+ std::is_convertible_v<U, simd<float, simd_abi::avx512_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<float, simd_abi::avx512_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<float, simd_abi::avx512_fixed_size<8>>(_mm256_mask_blend_ps(
+ static_cast<__mmask8>(m_mask), static_cast<__m256>(m_value),
+ static_cast<__m256>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<simd_mask<float, simd_abi::avx512_fixed_size<16>>,
+ simd<float, simd_abi::avx512_fixed_size<16>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<16>;
+ using value_type = simd<float, abi_type>;
+ using mask_type = simd_mask<float, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, element_aligned_tag) const {
+ _mm512_mask_storeu_ps(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, vector_aligned_tag) const {
+ _mm512_mask_store_ps(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ float* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& index) const {
+ _mm512_mask_i32scatter_ps(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(index),
+ static_cast<__m512>(m_value), 4);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<float, simd_abi::avx512_fixed_size<16>>,
+ simd<float, simd_abi::avx512_fixed_size<16>>>
+ : public const_where_expression<
+ simd_mask<float, simd_abi::avx512_fixed_size<16>>,
+ simd<float, simd_abi::avx512_fixed_size<16>>> {
+ public:
+ where_expression(
+ simd_mask<float, simd_abi::avx512_fixed_size<16>> const& mask_arg,
+ simd<float, simd_abi::avx512_fixed_size<16>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, element_aligned_tag) {
+ m_value = value_type(_mm512_mask_loadu_ps(
+ _mm512_set1_ps(0.0), static_cast<__mmask16>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm512_mask_load_ps(
+ _mm512_set1_ps(0.0), static_cast<__mmask16>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ float const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& index) {
+ m_value = value_type(_mm512_mask_i32gather_ps(
+ static_cast<__m512>(m_value), static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(index), mem, 4));
+ }
+ template <class U, std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<float, simd_abi::avx512_fixed_size<16>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<float, simd_abi::avx512_fixed_size<16>>>(
+ std::forward<U>(x));
+ m_value = simd<float, simd_abi::avx512_fixed_size<16>>(_mm512_mask_blend_ps(
+ static_cast<__mmask16>(m_mask), static_cast<__m512>(m_value),
+ static_cast<__m512>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using value_type = simd<std::int32_t, abi_type>;
+ using mask_type = simd_mask<std::int32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, element_aligned_tag) const {
+ _mm256_mask_storeu_epi32(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, vector_aligned_tag) const {
+ _mm256_mask_store_epi32(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int32_t* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
+ _mm256_mask_i32scatter_epi32(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index),
+ static_cast<__m256i>(m_value), 4);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, element_aligned_tag) {
+ m_value = value_type(_mm256_mask_loadu_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(m_mask), mem));
+ }
+ void copy_from(std::int32_t const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm256_mask_load_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(m_mask), mem));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int32_t const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
+ m_value = value_type(_mm256_mmask_i32gather_epi32(
+ static_cast<__m256i>(m_value), static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index), mem, 4));
+ }
+
+ template <class U,
+ std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<std::int32_t, simd_abi::avx512_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::int32_t, simd_abi::avx512_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+ _mm256_mask_blend_epi32(static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(m_value),
+ static_cast<__m256i>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx512_fixed_size<16>>,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<16>;
+ using value_type = simd<std::int32_t, abi_type>;
+ using mask_type = simd_mask<std::int32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, element_aligned_tag) const {
+ _mm512_mask_storeu_epi32(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, vector_aligned_tag) const {
+ _mm512_mask_store_epi32(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int32_t* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& index) const {
+ _mm512_mask_i32scatter_epi32(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(index),
+ static_cast<__m512i>(m_value), 4);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int32_t, simd_abi::avx512_fixed_size<16>>,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>>>
+ : public const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx512_fixed_size<16>>,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>>> {
+ public:
+ where_expression(
+ simd_mask<std::int32_t, simd_abi::avx512_fixed_size<16>> const& mask_arg,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, element_aligned_tag) {
+ m_value = value_type(_mm512_mask_loadu_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm512_mask_load_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int32_t const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& index) {
+ m_value = value_type(_mm512_mask_i32gather_epi32(
+ static_cast<__m512i>(m_value), static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(index), mem, 4));
+ }
+ template <class U,
+ std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<std::int32_t, simd_abi::avx512_fixed_size<16>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::int32_t, simd_abi::avx512_fixed_size<16>>>(
+ std::forward<U>(x));
+ m_value = simd<std::int32_t, simd_abi::avx512_fixed_size<16>>(
+ _mm512_mask_blend_epi32(static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(m_value),
+ static_cast<__m512i>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using value_type = simd<std::uint32_t, abi_type>;
+ using mask_type = simd_mask<std::uint32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint32_t* mem, element_aligned_tag) const {
+ _mm256_mask_storeu_epi32(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint32_t* mem, vector_aligned_tag) const {
+ _mm256_mask_store_epi32(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(m_value));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::uint32_t* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
+ _mm256_mask_i32scatter_epi32(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index),
+ static_cast<__m256i>(m_value), 4);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint32_t const* mem, element_aligned_tag) {
+ m_value = value_type(_mm256_mask_loadu_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint32_t const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm256_mask_load_epi32(
+ _mm256_set1_epi32(0), static_cast<__mmask8>(m_mask), mem));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::uint32_t const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
+ m_value = value_type(_mm256_mmask_i32gather_epi32(
+ static_cast<__m256i>(m_value), static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index), mem, 4));
+ }
+
+ template <class U,
+ std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
+ _mm256_mask_blend_epi32(static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(m_value),
+ static_cast<__m256i>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<16>>,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<16>;
+ using value_type = simd<std::uint32_t, abi_type>;
+ using mask_type = simd_mask<std::uint32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint32_t* mem, element_aligned_tag) const {
+ _mm512_mask_storeu_epi32(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint32_t* mem, vector_aligned_tag) const {
+ _mm512_mask_store_epi32(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::uint32_t* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& index) const {
+ _mm512_mask_i32scatter_epi32(mem, static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(index),
+ static_cast<__m512i>(m_value), 4);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<16>>,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>>
+ : public const_where_expression<
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<16>>,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>> {
+ public:
+ where_expression(
+ simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<16>> const& mask_arg,
+ simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint32_t const* mem, element_aligned_tag) {
+ m_value = value_type(_mm512_mask_loadu_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint32_t const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm512_mask_load_epi32(
+ _mm512_set1_epi32(0), static_cast<__mmask16>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::uint32_t const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<16>> const& index) {
+ m_value = value_type(_mm512_mask_i32gather_epi32(
+ static_cast<__m512i>(m_value), static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(index), mem, 4));
+ }
+ template <class U,
+ std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>>(
+ std::forward<U>(x));
+ m_value = simd<std::uint32_t, simd_abi::avx512_fixed_size<16>>(
+ _mm512_mask_blend_epi32(static_cast<__mmask16>(m_mask),
+ static_cast<__m512i>(m_value),
+ static_cast<__m512i>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using value_type = simd<std::int64_t, abi_type>;
+ using mask_type = simd_mask<std::int64_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int64_t* mem, element_aligned_tag) const {
+ _mm512_mask_storeu_epi64(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int64_t* mem, vector_aligned_tag) const {
+ _mm512_mask_store_epi64(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int64_t* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
+ _mm512_mask_i32scatter_epi64(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index),
+ static_cast<__m512i>(m_value), 8);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int64_t const* mem, element_aligned_tag) {
+ m_value = value_type(_mm512_mask_loadu_epi64(
+ _mm512_set1_epi64(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int64_t const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm512_mask_load_epi64(
+ _mm512_set1_epi64(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int64_t const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
+ m_value = value_type(_mm512_mask_i32gather_epi64(
+ static_cast<__m512i>(m_value), static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index), mem, 8));
+ }
+
+ template <class U,
+ std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<std::int64_t, simd_abi::avx512_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::int64_t, simd_abi::avx512_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
+ _mm512_mask_blend_epi64(static_cast<__mmask8>(m_mask),
+ static_cast<__m512i>(m_value),
+ static_cast<__m512i>(x_as_value_type)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::uint64_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ using abi_type = simd_abi::avx512_fixed_size<8>;
+ using value_type = simd<std::uint64_t, abi_type>;
+ using mask_type = simd_mask<std::uint64_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint64_t* mem, element_aligned_tag) const {
+ _mm512_mask_storeu_epi64(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint64_t* mem, vector_aligned_tag) const {
+ _mm512_mask_store_epi64(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m512i>(m_value));
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::uint64_t* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
+ _mm512_mask_i32scatter_epi64(mem, static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index),
+ static_cast<__m512i>(m_value), 8);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::uint64_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>>
+ : public const_where_expression<
+ simd_mask<std::uint64_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+ where_expression(
+ simd_mask<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+ simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint64_t const* mem, element_aligned_tag) {
+ m_value = value_type(_mm512_mask_loadu_epi64(
+ _mm512_set1_epi64(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint64_t const* mem, vector_aligned_tag) {
+ m_value = value_type(_mm512_mask_load_epi64(
+ _mm512_set1_epi64(0.0), static_cast<__mmask8>(m_mask), mem));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::uint64_t const* mem,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
+ m_value = value_type(_mm512_mask_i32gather_epi64(
+ static_cast<__m512i>(m_value), static_cast<__mmask8>(m_mask),
+ static_cast<__m256i>(index), mem, 8));
+ }
+
+ template <class U,
+ std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>>(
+ std::forward<U>(x));
+ m_value = simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
+ _mm512_mask_blend_epi64(static_cast<__mmask8>(m_mask),
+ static_cast<__m512i>(m_value),
+ static_cast<__m512i>(x_as_value_type)));
+ }
+};
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION std::int32_t hmax(
+ const_where_expression<
+ simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> const& x) {
+ return _mm512_mask_reduce_max_epi32(
+ static_cast<__mmask8>(x.impl_get_mask()),
+ _mm512_castsi256_si512(static_cast<__m256i>(x.impl_get_value())));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION double hmin(
+ const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+ simd<double, simd_abi::avx512_fixed_size<8>>> const&
+ x) {
+ return _mm512_mask_reduce_min_pd(static_cast<__mmask8>(x.impl_get_mask()),
+ static_cast<__m512d>(x.impl_get_value()));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION std::int64_t reduce(
+ const_where_expression<
+ simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
+ simd<std::int64_t, simd_abi::avx512_fixed_size<8>>> const& x,
+ std::int64_t, std::plus<>) {
+ return _mm512_mask_reduce_add_epi64(static_cast<__mmask8>(x.impl_get_mask()),
+ static_cast<__m512i>(x.impl_get_value()));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION double reduce(
+ const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+ simd<double, simd_abi::avx512_fixed_size<8>>> const&
+ x,
+ double, std::plus<>) {
+ return _mm512_mask_reduce_add_pd(static_cast<__mmask8>(x.impl_get_mask()),
+ static_cast<__m512d>(x.impl_get_value()));
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
-/*
//@HEADER
// ************************************************************************
//
-// Kokkos v. 3.0
-// Copyright (2020) National Technology & Engineering
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. Neither the name of the Corporation nor the names of the
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
-//
-// ************************************************************************
//@HEADER
-*/
#ifndef KOKKOS_SIMD_COMMON_HPP
#define KOKKOS_SIMD_COMMON_HPP
-#include <cmath>
#include <cstring>
#include <Kokkos_Core.hpp>
namespace Experimental {
-template <class To, class From>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION constexpr To bit_cast(
- From const& src) {
- To dst;
- std::memcpy(&dst, &src, sizeof(To));
- return dst;
-}
-
template <class T, class Abi>
class simd;
template <class T, class Abi>
class simd_mask;
-struct element_aligned_tag {};
+class simd_alignment_vector_aligned {};
+
+template <typename... Flags>
+struct simd_flags {};
+
+inline constexpr simd_flags<> simd_flag_default{};
+inline constexpr simd_flags<simd_alignment_vector_aligned> simd_flag_aligned{};
+
+using element_aligned_tag = simd_flags<>;
+using vector_aligned_tag = simd_flags<simd_alignment_vector_aligned>;
// class template declarations for const_where_expression and where_expression
};
template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
where_expression<simd_mask<T, Abi>, simd<T, Abi>>
where(typename simd<T, Abi>::mask_type const& mask, simd<T, Abi>& value) {
return where_expression(mask, value);
}
template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
const_where_expression<simd_mask<T, Abi>, simd<T, Abi>>
where(typename simd<T, Abi>::mask_type const& mask,
simd<T, Abi> const& value) {
// fallback implementations of reductions across simd_mask:
template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool all_of(
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool all_of(
simd_mask<T, Abi> const& a) {
return a == simd_mask<T, Abi>(true);
}
template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool any_of(
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool any_of(
simd_mask<T, Abi> const& a) {
return a != simd_mask<T, Abi>(false);
}
template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool none_of(
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool none_of(
simd_mask<T, Abi> const& a) {
return a == simd_mask<T, Abi>(false);
}
-} // namespace Experimental
+// A temporary device-callable implemenation of round half to nearest even
+template <typename T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto round_half_to_nearest_even(
+ T const& x) {
+ auto ceil = Kokkos::ceil(x);
+ auto floor = Kokkos::floor(x);
-template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> min(
- Experimental::simd<T, Abi> const& a, Experimental::simd<T, Abi> const& b) {
- Experimental::simd<T, Abi> result;
- for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) {
- result[i] = Kokkos::min(a[i], b[i]);
+ if (Kokkos::abs(ceil - x) == Kokkos::abs(floor - x)) {
+ auto rem = Kokkos::remainder(ceil, 2.0);
+ return (rem == 0) ? ceil : floor;
}
- return result;
+ return Kokkos::round(x);
}
-template <class T, class Abi>
-[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> max(
- Experimental::simd<T, Abi> const& a, Experimental::simd<T, Abi> const& b) {
- Experimental::simd<T, Abi> result;
- for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) {
- result[i] = Kokkos::max(a[i], b[i]);
- }
- return result;
-}
-
-// fallback implementations of <cmath> functions.
-// individual Abi types may provide overloads with more efficient
-// implementations.
-// These are not in the Experimental namespace because their double
-// overloads are not either
-
-#define KOKKOS_IMPL_SIMD_UNARY_FUNCTION(FUNC) \
- template <class Abi> \
- [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<double, Abi> \
- FUNC(Experimental::simd<double, Abi> const& a) { \
- Experimental::simd<double, Abi> result; \
- for (std::size_t i = 0; i < Experimental::simd<double, Abi>::size(); \
- ++i) { \
- result[i] = Kokkos::FUNC(a[i]); \
- } \
- return result; \
- }
-
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(abs)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(exp)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(exp2)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log10)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log2)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sqrt)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cbrt)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sin)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cos)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tan)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(asin)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(acos)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(atan)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sinh)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cosh)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tanh)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(asinh)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(acosh)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(atanh)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(erf)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(erfc)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tgamma)
-KOKKOS_IMPL_SIMD_UNARY_FUNCTION(lgamma)
-
-#define KOKKOS_IMPL_SIMD_BINARY_FUNCTION(FUNC) \
- template <class Abi> \
- [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<double, Abi> \
- FUNC(Experimental::simd<double, Abi> const& a, \
- Experimental::simd<double, Abi> const& b) { \
- Experimental::simd<double, Abi> result; \
- for (std::size_t i = 0; i < Experimental::simd<double, Abi>::size(); \
- ++i) { \
- result[i] = Kokkos::FUNC(a[i], b[i]); \
- } \
- return result; \
- }
-
-KOKKOS_IMPL_SIMD_BINARY_FUNCTION(pow)
-KOKKOS_IMPL_SIMD_BINARY_FUNCTION(hypot)
-KOKKOS_IMPL_SIMD_BINARY_FUNCTION(atan2)
-KOKKOS_IMPL_SIMD_BINARY_FUNCTION(copysign)
-
-#define KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(FUNC) \
- template <class Abi> \
- [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<double, Abi> \
- FUNC(Experimental::simd<double, Abi> const& a, \
- Experimental::simd<double, Abi> const& b, \
- Experimental::simd<double, Abi> const& c) { \
- Experimental::simd<double, Abi> result; \
- for (std::size_t i = 0; i < Experimental::simd<double, Abi>::size(); \
- ++i) { \
- result[i] = Kokkos::FUNC(a[i], b[i], c[i]); \
- } \
- return result; \
- }
-
-KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(fma)
-KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(hypot)
-
+} // namespace Experimental
} // namespace Kokkos
#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SIMD_COMMON_MATH_HPP
+#define KOKKOS_SIMD_COMMON_MATH_HPP
+
+#include <Kokkos_Core.hpp> // Kokkos::min, etc.
+
+namespace Kokkos {
+
+namespace Experimental {
+
+template <class T, class Abi>
+class simd;
+
+template <class T, class Abi>
+class simd_mask;
+
+template <class M, class T>
+class const_where_expression;
+
+template <typename T, typename Abi>
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION T
+hmin(const_where_expression<simd_mask<T, Abi>, simd<T, Abi>> const& x) {
+ auto const& v = x.impl_get_value();
+ auto const& m = x.impl_get_mask();
+ auto result = Kokkos::reduction_identity<T>::min();
+ for (std::size_t i = 0; i < v.size(); ++i) {
+ if (m[i]) result = Kokkos::min(result, v[i]);
+ }
+ return result;
+}
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION T
+hmax(const_where_expression<simd_mask<T, Abi>, simd<T, Abi>> const& x) {
+ auto const& v = x.impl_get_value();
+ auto const& m = x.impl_get_mask();
+ auto result = Kokkos::reduction_identity<T>::max();
+ for (std::size_t i = 0; i < v.size(); ++i) {
+ if (m[i]) result = Kokkos::max(result, v[i]);
+ }
+ return result;
+}
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION T
+reduce(const_where_expression<simd_mask<T, Abi>, simd<T, Abi>> const& x, T,
+ std::plus<>) {
+ auto const& v = x.impl_get_value();
+ auto const& m = x.impl_get_mask();
+ auto result = Kokkos::reduction_identity<T>::sum();
+ for (std::size_t i = 0; i < v.size(); ++i) {
+ if (m[i]) result += v[i];
+ }
+ return result;
+}
+
+} // namespace Experimental
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> min(
+ Experimental::simd<T, Abi> const& a, Experimental::simd<T, Abi> const& b) {
+ Experimental::simd<T, Abi> result;
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) {
+ result[i] = Kokkos::min(a[i], b[i]);
+ }
+ return result;
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+namespace Experimental {
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_DEPRECATED KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<T, Abi>
+ min(Experimental::simd<T, Abi> const& a,
+ Experimental::simd<T, Abi> const& b) {
+ return Kokkos::min(a, b);
+}
+} // namespace Experimental
+#endif
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> max(
+ Experimental::simd<T, Abi> const& a, Experimental::simd<T, Abi> const& b) {
+ Experimental::simd<T, Abi> result;
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) {
+ result[i] = Kokkos::max(a[i], b[i]);
+ }
+ return result;
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+namespace Experimental {
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_DEPRECATED KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<T, Abi>
+ max(Experimental::simd<T, Abi> const& a,
+ Experimental::simd<T, Abi> const& b) {
+ return Kokkos::max(a, b);
+}
+} // namespace Experimental
+#endif
+
+// fallback implementations of <cmath> functions.
+// individual Abi types may provide overloads with more efficient
+// implementations.
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#define KOKKOS_IMPL_SIMD_UNARY_FUNCTION(FUNC) \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> FUNC( \
+ Experimental::simd<T, Abi> const& a) { \
+ Experimental::simd<T, Abi> result; \
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) { \
+ result[i] = Kokkos::FUNC(a[i]); \
+ } \
+ return result; \
+ } \
+ namespace Experimental { \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_DEPRECATED KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
+ simd<T, Abi> \
+ FUNC(simd<T, Abi> const& a) { \
+ return Kokkos::FUNC(a); \
+ } \
+ }
+#else
+#define KOKKOS_IMPL_SIMD_UNARY_FUNCTION(FUNC) \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> FUNC( \
+ Experimental::simd<T, Abi> const& a) { \
+ Experimental::simd<T, Abi> result; \
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) { \
+ result[i] = Kokkos::FUNC(a[i]); \
+ } \
+ return result; \
+ }
+#endif
+
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(abs)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(exp)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(exp2)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log10)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log2)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sqrt)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cbrt)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sin)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cos)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tan)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(asin)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(acos)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(atan)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sinh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cosh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tanh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(asinh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(acosh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(atanh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(erf)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(erfc)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tgamma)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(lgamma)
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#define KOKKOS_IMPL_SIMD_BINARY_FUNCTION(FUNC) \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> FUNC( \
+ Experimental::simd<T, Abi> const& a, \
+ Experimental::simd<T, Abi> const& b) { \
+ Experimental::simd<T, Abi> result; \
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) { \
+ result[i] = Kokkos::FUNC(a[i], b[i]); \
+ } \
+ return result; \
+ } \
+ namespace Experimental { \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_DEPRECATED KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
+ simd<T, Abi> \
+ FUNC(simd<T, Abi> const& a, simd<T, Abi> const& b) { \
+ Kokkos::FUNC(a, b); \
+ } \
+ }
+#else
+#define KOKKOS_IMPL_SIMD_BINARY_FUNCTION(FUNC) \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> FUNC( \
+ Experimental::simd<T, Abi> const& a, \
+ Experimental::simd<T, Abi> const& b) { \
+ Experimental::simd<T, Abi> result; \
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) { \
+ result[i] = Kokkos::FUNC(a[i], b[i]); \
+ } \
+ return result; \
+ }
+#endif
+
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(pow)
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(hypot)
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(atan2)
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(copysign)
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_4
+#define KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(FUNC) \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> FUNC( \
+ Experimental::simd<T, Abi> const& a, \
+ Experimental::simd<T, Abi> const& b, \
+ Experimental::simd<T, Abi> const& c) { \
+ Experimental::simd<T, Abi> result; \
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) { \
+ result[i] = Kokkos::FUNC(a[i], b[i], c[i]); \
+ } \
+ return result; \
+ } \
+ namespace Experimental { \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_DEPRECATED KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
+ simd<T, Abi> \
+ FUNC(simd<T, Abi> const& a, simd<T, Abi> const& b, \
+ simd<T, Abi> const& c) { \
+ return Kokkos::FUNC(a, b, c); \
+ } \
+ }
+#else
+#define KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(FUNC) \
+ template <class T, class Abi> \
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> FUNC( \
+ Experimental::simd<T, Abi> const& a, \
+ Experimental::simd<T, Abi> const& b, \
+ Experimental::simd<T, Abi> const& c) { \
+ Experimental::simd<T, Abi> result; \
+ for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) { \
+ result[i] = Kokkos::FUNC(a[i], b[i], c[i]); \
+ } \
+ return result; \
+ }
+#endif
+
+KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(fma)
+KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(hypot)
+
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SIMD_NEON_HPP
+#define KOKKOS_SIMD_NEON_HPP
+
+#include <functional>
+#include <type_traits>
+
+#include <Kokkos_SIMD_Common.hpp>
+
+#include <arm_neon.h>
+
+#ifdef KOKKOS_SIMD_COMMON_MATH_HPP
+#error \
+ "Kokkos_SIMD_NEON.hpp must be included before Kokkos_SIMD_Common_Math.hpp!"
+#endif
+
+namespace Kokkos {
+
+namespace Experimental {
+
+namespace simd_abi {
+
+template <int N>
+class neon_fixed_size {};
+
+} // namespace simd_abi
+
+namespace Impl {
+
+template <class Derived, int Bits, int Size>
+class neon_mask;
+
+template <class Derived>
+class neon_mask<Derived, 64, 2> {
+ uint64x2_t m_value;
+
+ public:
+ class reference {
+ uint64x2_t& m_mask;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(uint64x2_t& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ // this switch statement is needed because the lane argument has to be a
+ // constant
+ switch (m_lane) {
+ case 0:
+ m_mask = vsetq_lane_u64(value ? 0xFFFFFFFFFFFFFFFFULL : 0, m_mask, 0);
+ break;
+ case 1:
+ m_mask = vsetq_lane_u64(value ? 0xFFFFFFFFFFFFFFFFULL : 0, m_mask, 1);
+ break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ switch (m_lane) {
+ case 0: return vgetq_lane_u64(m_mask, 0) != 0;
+ case 1: return vgetq_lane_u64(m_mask, 1) != 0;
+ }
+ return false;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using implementation_type = uint64x2_t;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION neon_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit neon_mask(value_type value)
+ : m_value(vmovq_n_u64(value ? 0xFFFFFFFFFFFFFFFFULL : 0)) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit neon_mask(
+ G&& gen) noexcept {
+ m_value = vsetq_lane_u64(
+ (gen(std::integral_constant<std::size_t, 0>()) ? 0xFFFFFFFFFFFFFFFFULL
+ : 0),
+ m_value, 0);
+ m_value = vsetq_lane_u64(
+ (gen(std::integral_constant<std::size_t, 1>()) ? 0xFFFFFFFFFFFFFFFFULL
+ : 0),
+ m_value, 1);
+ }
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION neon_mask(
+ neon_mask<U, 32, 2> const& other) {
+ operator[](0) = bool(other[0]);
+ operator[](1) = bool(other[1]);
+ }
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION neon_mask(
+ neon_mask<U, 64, 2> const& other)
+ : neon_mask(static_cast<uint64x2_t>(other)) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 2;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit neon_mask(
+ uint64x2_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator uint64x2_t()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<uint64x2_t&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived
+ operator||(neon_mask const& other) const {
+ return Derived(vorrq_u64(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived
+ operator&&(neon_mask const& other) const {
+ return Derived(vandq_u64(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived operator!() const {
+ auto const true_value = static_cast<uint64x2_t>(neon_mask(true));
+ return Derived(veorq_u64(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ neon_mask const& other) const {
+ uint64x2_t const elementwise_equality = vceqq_u64(m_value, other.m_value);
+ uint32x2_t const narrow_elementwise_equality =
+ vqmovn_u64(elementwise_equality);
+ uint64x1_t const overall_equality_neon =
+ vreinterpret_u64_u32(narrow_elementwise_equality);
+ uint64_t const overall_equality = vget_lane_u64(overall_equality_neon, 0);
+ return overall_equality == 0xFFFFFFFFFFFFFFFFULL;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ neon_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <class Derived>
+class neon_mask<Derived, 32, 2> {
+ uint32x2_t m_value;
+
+ public:
+ class reference {
+ uint32x2_t& m_mask;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(uint32x2_t& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ switch (m_lane) {
+ case 0:
+ m_mask = vset_lane_u32(value ? 0xFFFFFFFFU : 0, m_mask, 0);
+ break;
+ case 1:
+ m_mask = vset_lane_u32(value ? 0xFFFFFFFFU : 0, m_mask, 1);
+ break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ switch (m_lane) {
+ case 0: return vget_lane_u32(m_mask, 0) != 0;
+ case 1: return vget_lane_u32(m_mask, 1) != 0;
+ }
+ return false;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using implementation_type = uint32x2_t;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION neon_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit neon_mask(value_type value)
+ : m_value(vmov_n_u32(value ? 0xFFFFFFFFU : 0)) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit neon_mask(
+ G&& gen) noexcept {
+ m_value = vset_lane_u32(
+ (gen(std::integral_constant<std::size_t, 0>()) ? 0xFFFFFFFFU : 0),
+ m_value, 0);
+ m_value = vset_lane_u32(
+ (gen(std::integral_constant<std::size_t, 1>()) ? 0xFFFFFFFFU : 0),
+ m_value, 1);
+ }
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION neon_mask(
+ neon_mask<U, 64, 2> const& other)
+ : m_value(vqmovn_u64(static_cast<uint64x2_t>(other))) {}
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION neon_mask(
+ neon_mask<U, 32, 2> const& other)
+ : m_value(static_cast<uint32x2_t>(other)) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 2;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit neon_mask(
+ uint32x2_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator uint32x2_t()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<uint32x2_t&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived
+ operator||(neon_mask const& other) const {
+ return Derived(vorr_u32(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived
+ operator&&(neon_mask const& other) const {
+ return Derived(vand_u32(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived operator!() const {
+ auto const true_value = static_cast<uint32x2_t>(neon_mask(true));
+ return Derived(veor_u32(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ neon_mask const& other) const {
+ uint32x2_t const elementwise_equality = vceq_u32(m_value, other.m_value);
+ uint64x1_t const overall_equality_neon =
+ vreinterpret_u64_u32(elementwise_equality);
+ uint64_t const overall_equality = vget_lane_u64(overall_equality_neon, 0);
+ return overall_equality == 0xFFFFFFFFFFFFFFFFULL;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ neon_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+template <class Derived>
+class neon_mask<Derived, 32, 4> {
+ uint32x4_t m_value;
+
+ public:
+ class reference {
+ uint32x4_t& m_mask;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(uint32x4_t& mask_arg,
+ int lane_arg)
+ : m_mask(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(bool value) const {
+ switch (m_lane) {
+ case 0:
+ m_mask = vsetq_lane_u32(value ? 0xFFFFFFFFU : 0, m_mask, 0);
+ break;
+ case 1:
+ m_mask = vsetq_lane_u32(value ? 0xFFFFFFFFU : 0, m_mask, 1);
+ break;
+ case 2:
+ m_mask = vsetq_lane_u32(value ? 0xFFFFFFFFU : 0, m_mask, 2);
+ break;
+ case 3:
+ m_mask = vsetq_lane_u32(value ? 0xFFFFFFFFU : 0, m_mask, 3);
+ break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+ switch (m_lane) {
+ case 0: return vgetq_lane_u32(m_mask, 0) != 0;
+ case 1: return vgetq_lane_u32(m_mask, 1) != 0;
+ case 2: return vgetq_lane_u32(m_mask, 2) != 0;
+ case 3: return vgetq_lane_u32(m_mask, 3) != 0;
+ }
+ return false;
+ }
+ };
+ using value_type = bool;
+ using abi_type = simd_abi::neon_fixed_size<4>;
+ using implementation_type = uint32x4_t;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION neon_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit neon_mask(value_type value)
+ : m_value(vmovq_n_u32(value ? 0xFFFFFFFFU : 0)) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit neon_mask(
+ G&& gen) noexcept {
+ m_value = vsetq_lane_u32(
+ (gen(std::integral_constant<std::size_t, 0>()) ? 0xFFFFFFFFU : 0),
+ m_value, 0);
+ m_value = vsetq_lane_u32(
+ (gen(std::integral_constant<std::size_t, 1>()) ? 0xFFFFFFFFU : 0),
+ m_value, 1);
+ m_value = vsetq_lane_u32(
+ (gen(std::integral_constant<std::size_t, 2>()) ? 0xFFFFFFFFU : 0),
+ m_value, 2);
+ m_value = vsetq_lane_u32(
+ (gen(std::integral_constant<std::size_t, 3>()) ? 0xFFFFFFFFU : 0),
+ m_value, 3);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit neon_mask(
+ uint32x4_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator uint32x4_t()
+ const {
+ return m_value;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return static_cast<value_type>(
+ reference(const_cast<uint32x4_t&>(m_value), int(i)));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived
+ operator||(neon_mask const& other) const {
+ return Derived(vorrq_u32(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived
+ operator&&(neon_mask const& other) const {
+ return Derived(vandq_u32(m_value, other.m_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Derived operator!() const {
+ auto const true_value = static_cast<uint32x4_t>(neon_mask(true));
+ return Derived(veorq_u32(m_value, true_value));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+ neon_mask const& other) const {
+ uint32x4_t const elementwise_equality = vceqq_u32(m_value, other.m_value);
+ uint64x2_t const overall_equality_neon =
+ vreinterpretq_u64_u32(elementwise_equality);
+ return (overall_equality_neon[0] == 0xFFFFFFFFFFFFFFFFULL) &&
+ (overall_equality_neon[1] == 0xFFFFFFFFFFFFFFFFULL);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+ neon_mask const& other) const {
+ return !operator==(other);
+ }
+};
+
+} // namespace Impl
+
+template <class T>
+class simd_mask<T, simd_abi::neon_fixed_size<2>>
+ : public Impl::neon_mask<simd_mask<T, simd_abi::neon_fixed_size<2>>,
+ sizeof(T) * 8, 2> {
+ using base_type = Impl::neon_mask<simd_mask<T, simd_abi::neon_fixed_size<2>>,
+ sizeof(T) * 8, 2>;
+
+ public:
+ using implementation_type = typename base_type::implementation_type;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(bool value)
+ : base_type(value) {}
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<U, simd_abi::neon_fixed_size<2>> const& other)
+ : base_type(other) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ implementation_type const& value)
+ : base_type(value) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<typename base_type::value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : base_type(gen) {}
+};
+
+template <class T>
+class simd_mask<T, simd_abi::neon_fixed_size<4>>
+ : public Impl::neon_mask<simd_mask<T, simd_abi::neon_fixed_size<4>>,
+ sizeof(T) * 8, 4> {
+ using base_type = Impl::neon_mask<simd_mask<T, simd_abi::neon_fixed_size<4>>,
+ sizeof(T) * 8, 4>;
+
+ public:
+ using implementation_type = typename base_type::implementation_type;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(bool value)
+ : base_type(value) {}
+ template <class U>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<U, simd_abi::neon_fixed_size<4>> const& other)
+ : base_type(other) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ implementation_type const& value)
+ : base_type(value) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<typename base_type::value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+ G&& gen) noexcept
+ : base_type(gen) {}
+};
+
+template <>
+class simd<double, simd_abi::neon_fixed_size<2>> {
+ float64x2_t m_value;
+
+ public:
+ using value_type = double;
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ class reference {
+ float64x2_t& m_value;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(float64x2_t& mask_arg,
+ int lane_arg)
+ : m_value(mask_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(double value) const {
+ switch (m_lane) {
+ case 0: m_value = vsetq_lane_f64(value, m_value, 0); break;
+ case 1: m_value = vsetq_lane_f64(value, m_value, 1); break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator double() const {
+ switch (m_lane) {
+ case 0: return vgetq_lane_f64(m_value, 0);
+ case 1: return vgetq_lane_f64(m_value, 1);
+ }
+ return 0;
+ }
+ };
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 2;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(vmovq_n_f64(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept {
+ m_value = vsetq_lane_f64(gen(std::integral_constant<std::size_t, 0>()),
+ m_value, 0);
+ m_value = vsetq_lane_f64(gen(std::integral_constant<std::size_t, 1>()),
+ m_value, 1);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ float64x2_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reference(const_cast<simd*>(this)->m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = vld1q_f64(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = vld1q_f64(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ vst1q_f64(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ vst1q_f64(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit
+ operator float64x2_t() const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(vnegq_f64(m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vmulq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vdivq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vaddq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vsubq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcltq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcgtq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcleq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcgeq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vceqq_f64(static_cast<float64x2_t>(lhs),
+ static_cast<float64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(operator==(lhs, rhs));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ abs(Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vabsq_f64(static_cast<float64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ floor(Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrndmq_f64(static_cast<float64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ ceil(Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrndpq_f64(static_cast<float64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ round(Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrndxq_f64(static_cast<float64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ trunc(Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrndq_f64(static_cast<float64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ copysign(Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& a,
+ Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& b) {
+ uint64x2_t const sign_mask = vreinterpretq_u64_f64(vmovq_n_f64(-0.0));
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vreinterpretq_f64_u64(vorrq_u64(
+ vreinterpretq_u64_f64(static_cast<float64x2_t>(abs(a))),
+ vandq_u64(sign_mask,
+ vreinterpretq_u64_f64(static_cast<float64x2_t>(b))))));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ sqrt(Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vsqrtq_f64(static_cast<float64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ fma(Experimental::simd<double,
+ Experimental::simd_abi::neon_fixed_size<2>> const& a,
+ Experimental::simd<double,
+ Experimental::simd_abi::neon_fixed_size<2>> const& b,
+ Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& c) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vfmaq_f64(static_cast<float64x2_t>(c), static_cast<float64x2_t>(b),
+ static_cast<float64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ max(Experimental::simd<double,
+ Experimental::simd_abi::neon_fixed_size<2>> const& a,
+ Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& b) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vmaxq_f64(static_cast<float64x2_t>(a), static_cast<float64x2_t>(b)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>
+ min(Experimental::simd<double,
+ Experimental::simd_abi::neon_fixed_size<2>> const& a,
+ Experimental::simd<
+ double, Experimental::simd_abi::neon_fixed_size<2>> const& b) {
+ return Experimental::simd<double, Experimental::simd_abi::neon_fixed_size<2>>(
+ vminq_f64(static_cast<float64x2_t>(a), static_cast<float64x2_t>(b)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<double, simd_abi::neon_fixed_size<2>>
+ condition(simd_mask<double, simd_abi::neon_fixed_size<2>> const& a,
+ simd<double, simd_abi::neon_fixed_size<2>> const& b,
+ simd<double, simd_abi::neon_fixed_size<2>> const& c) {
+ return simd<double, simd_abi::neon_fixed_size<2>>(
+ vbslq_f64(static_cast<uint64x2_t>(a), static_cast<float64x2_t>(b),
+ static_cast<float64x2_t>(c)));
+}
+
+template <>
+class simd<float, simd_abi::neon_fixed_size<2>> {
+ float32x2_t m_value;
+
+ public:
+ using value_type = float;
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ class reference {
+ float32x2_t& m_value;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(float32x2_t& value_arg,
+ int lane_arg)
+ : m_value(value_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(float value) const {
+ switch (m_lane) {
+ case 0: m_value = vset_lane_f32(value, m_value, 0); break;
+ case 1: m_value = vset_lane_f32(value, m_value, 1); break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator float() const {
+ switch (m_lane) {
+ case 0: return vget_lane_f32(m_value, 0);
+ case 1: return vget_lane_f32(m_value, 1);
+ }
+ return 0;
+ }
+ };
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 2;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(vmov_n_f32(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(G&& gen) {
+ m_value = vset_lane_f32(gen(std::integral_constant<std::size_t, 0>()),
+ m_value, 0);
+ m_value = vset_lane_f32(gen(std::integral_constant<std::size_t, 1>()),
+ m_value, 1);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ float32x2_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reference(const_cast<simd*>(this)->m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = vld1_f32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = vld1_f32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ vst1_f32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ vst1_f32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit
+ operator float32x2_t() const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(vneg_f32(m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vmul_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vdiv_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vadd_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vsub_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vclt_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcgt_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcle_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcge_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vceq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>
+ abs(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vabs_f32(static_cast<float32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>
+ floor(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrndm_f32(static_cast<float32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>
+ ceil(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrndp_f32(static_cast<float32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>
+ round(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrndx_f32(static_cast<float32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>
+ trunc(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vrnd_f32(static_cast<float32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>>
+copysign(
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ b) {
+ uint32x2_t const sign_mask = vreinterpret_u32_f32(vmov_n_f32(-0.0));
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vreinterpret_f32_u32(vorr_u32(
+ vreinterpret_u32_f32(static_cast<float32x2_t>(abs(a))),
+ vand_u32(sign_mask,
+ vreinterpret_u32_f32(static_cast<float32x2_t>(b))))));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>
+ sqrt(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vsqrt_f32(static_cast<float32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>>
+fma(Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ b,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ c) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vfma_f32(static_cast<float32x2_t>(c), static_cast<float32x2_t>(b),
+ static_cast<float32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>>
+max(Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vmax_f32(static_cast<float32x2_t>(a), static_cast<float32x2_t>(b)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<2>>
+min(Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<2>>(
+ vmin_f32(static_cast<float32x2_t>(a), static_cast<float32x2_t>(b)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<float, simd_abi::neon_fixed_size<2>>
+ condition(simd_mask<float, simd_abi::neon_fixed_size<2>> const& a,
+ simd<float, simd_abi::neon_fixed_size<2>> const& b,
+ simd<float, simd_abi::neon_fixed_size<2>> const& c) {
+ return simd<float, simd_abi::neon_fixed_size<2>>(
+ vbsl_f32(static_cast<uint32x2_t>(a), static_cast<float32x2_t>(b),
+ static_cast<float32x2_t>(c)));
+}
+
+template <>
+class simd<float, simd_abi::neon_fixed_size<4>> {
+ float32x4_t m_value;
+
+ public:
+ using value_type = float;
+ using abi_type = simd_abi::neon_fixed_size<4>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ class reference {
+ float32x4_t& m_value;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(float32x4_t& value_arg,
+ int lane_arg)
+ : m_value(value_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(float value) const {
+ switch (m_lane) {
+ case 0: m_value = vsetq_lane_f32(value, m_value, 0); break;
+ case 1: m_value = vsetq_lane_f32(value, m_value, 1); break;
+ case 2: m_value = vsetq_lane_f32(value, m_value, 2); break;
+ case 3: m_value = vsetq_lane_f32(value, m_value, 3); break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator float() const {
+ switch (m_lane) {
+ case 0: return vgetq_lane_f32(m_value, 0);
+ case 1: return vgetq_lane_f32(m_value, 1);
+ case 2: return vgetq_lane_f32(m_value, 2);
+ case 3: return vgetq_lane_f32(m_value, 3);
+ }
+ return 0;
+ }
+ };
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(vmovq_n_f32(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(G&& gen) {
+ m_value = vsetq_lane_f32(gen(std::integral_constant<std::size_t, 0>()),
+ m_value, 0);
+ m_value = vsetq_lane_f32(gen(std::integral_constant<std::size_t, 1>()),
+ m_value, 1);
+ m_value = vsetq_lane_f32(gen(std::integral_constant<std::size_t, 2>()),
+ m_value, 2);
+ m_value = vsetq_lane_f32(gen(std::integral_constant<std::size_t, 3>()),
+ m_value, 3);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ float32x4_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reference(const_cast<simd*>(this)->m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = vld1q_f32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = vld1q_f32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ vst1q_f32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ vst1q_f32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit
+ operator float32x4_t() const {
+ return m_value;
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(vnegq_f32(m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vmulq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vdivq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vaddq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vsubq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcltq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcgtq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcleq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vcgeq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(vceqq_f32(lhs.m_value, rhs.m_value));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>
+ abs(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vabsq_f32(static_cast<float32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>
+ floor(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vrndmq_f32(static_cast<float32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>
+ ceil(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vrndpq_f32(static_cast<float32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>
+ round(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vrndxq_f32(static_cast<float32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>
+ trunc(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vrndq_f32(static_cast<float32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>>
+copysign(
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ b) {
+ uint32x4_t const sign_mask = vreinterpretq_u32_f32(vmovq_n_f32(-0.0));
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vreinterpretq_f32_u32(vorrq_u32(
+ vreinterpretq_u32_f32(static_cast<float32x4_t>(abs(a))),
+ vandq_u32(sign_mask,
+ vreinterpretq_u32_f32(static_cast<float32x4_t>(b))))));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>
+ sqrt(Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vsqrtq_f32(static_cast<float32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>>
+fma(Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ b,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ c) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vfmaq_f32(static_cast<float32x4_t>(c), static_cast<float32x4_t>(b),
+ static_cast<float32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>>
+max(Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vmaxq_f32(static_cast<float32x4_t>(a), static_cast<float32x4_t>(b)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ float, Experimental::simd_abi::neon_fixed_size<4>>
+min(Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ a,
+ Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>> const&
+ b) {
+ return Experimental::simd<float, Experimental::simd_abi::neon_fixed_size<4>>(
+ vminq_f32(static_cast<float32x4_t>(a), static_cast<float32x4_t>(b)));
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<float, simd_abi::neon_fixed_size<4>>
+ condition(simd_mask<float, simd_abi::neon_fixed_size<4>> const& a,
+ simd<float, simd_abi::neon_fixed_size<4>> const& b,
+ simd<float, simd_abi::neon_fixed_size<4>> const& c) {
+ return simd<float, simd_abi::neon_fixed_size<4>>(
+ vbslq_f32(static_cast<uint32x4_t>(a), static_cast<float32x4_t>(b),
+ static_cast<float32x4_t>(c)));
+}
+
+template <>
+class simd<std::int32_t, simd_abi::neon_fixed_size<2>> {
+ int32x2_t m_value;
+
+ public:
+ using value_type = std::int32_t;
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ class reference {
+ int32x2_t& m_value;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(int32x2_t& value_arg,
+ int lane_arg)
+ : m_value(value_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(std::int32_t value) const {
+ switch (m_lane) {
+ case 0: m_value = vset_lane_s32(value, m_value, 0); break;
+ case 1: m_value = vset_lane_s32(value, m_value, 1); break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator std::int32_t() const {
+ switch (m_lane) {
+ case 0: return vget_lane_s32(m_value, 0);
+ case 1: return vget_lane_s32(m_value, 1);
+ }
+ return 0;
+ }
+ };
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 2;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(vmov_n_s32(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept {
+ m_value = vset_lane_s32(gen(std::integral_constant<std::size_t, 0>()),
+ m_value, 0);
+ m_value = vset_lane_s32(gen(std::integral_constant<std::size_t, 1>()),
+ m_value, 1);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ int32x2_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::uint64_t, abi_type> const& other);
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reference(const_cast<simd*>(this)->m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = vld1_s32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = vld1_s32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ vst1_s32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ vst1_s32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator int32x2_t()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(vneg_s32(m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vsub_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vadd_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vmul_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vceq_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcgt_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vclt_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcle_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcge_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(vshl_s32(static_cast<int32x2_t>(lhs),
+ vneg_s32(vmov_n_s32(std::int32_t(rhs)))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vshl_s32(static_cast<int32x2_t>(lhs),
+ vneg_s32(static_cast<int32x2_t>(rhs))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(
+ vshl_s32(static_cast<int32x2_t>(lhs), vmov_n_s32(std::int32_t(rhs))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vshl_s32(static_cast<int32x2_t>(lhs), static_cast<int32x2_t>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<2>>
+ abs(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<std::int32_t,
+ Experimental::simd_abi::neon_fixed_size<2>>(
+ vabs_s32(static_cast<int32x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<2>>
+ floor(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<2>>
+ ceil(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<2>>
+ round(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<2>>
+ trunc(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>>
+ condition(simd_mask<std::int32_t, simd_abi::neon_fixed_size<2>> const& a,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& b,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& c) {
+ return simd<std::int32_t, simd_abi::neon_fixed_size<2>>(
+ vbsl_s32(static_cast<uint32x2_t>(a), static_cast<int32x2_t>(b),
+ static_cast<int32x2_t>(c)));
+}
+
+template <>
+class simd<std::int32_t, simd_abi::neon_fixed_size<4>> {
+ int32x4_t m_value;
+
+ public:
+ using value_type = std::int32_t;
+ using abi_type = simd_abi::neon_fixed_size<4>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ class reference {
+ int32x4_t& m_value;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(int32x4_t& value_arg,
+ int lane_arg)
+ : m_value(value_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(std::int32_t value) const {
+ switch (m_lane) {
+ case 0: m_value = vsetq_lane_s32(value, m_value, 0); break;
+ case 1: m_value = vsetq_lane_s32(value, m_value, 1); break;
+ case 2: m_value = vsetq_lane_s32(value, m_value, 2); break;
+ case 3: m_value = vsetq_lane_s32(value, m_value, 3); break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator std::int32_t() const {
+ switch (m_lane) {
+ case 0: return vgetq_lane_s32(m_value, 0);
+ case 1: return vgetq_lane_s32(m_value, 1);
+ case 2: return vgetq_lane_s32(m_value, 2);
+ case 3: return vgetq_lane_s32(m_value, 3);
+ }
+ return 0;
+ }
+ };
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 4;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(vmovq_n_s32(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept {
+ m_value = vsetq_lane_s32(gen(std::integral_constant<std::size_t, 0>()),
+ m_value, 0);
+ m_value = vsetq_lane_s32(gen(std::integral_constant<std::size_t, 1>()),
+ m_value, 1);
+ m_value = vsetq_lane_s32(gen(std::integral_constant<std::size_t, 2>()),
+ m_value, 2);
+ m_value = vsetq_lane_s32(gen(std::integral_constant<std::size_t, 3>()),
+ m_value, 3);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ int32x4_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::uint64_t, abi_type> const& other);
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reference(const_cast<simd*>(this)->m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = vld1q_s32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = vld1q_s32(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ vst1q_s32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ vst1q_s32(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator int32x4_t()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(vnegq_s32(m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vsubq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vaddq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vmulq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vceqq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcgtq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcltq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcleq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcgeq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(vshlq_s32(static_cast<int32x4_t>(lhs),
+ vnegq_s32(vmovq_n_s32(std::int32_t(rhs)))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vshlq_s32(static_cast<int32x4_t>(lhs),
+ vnegq_s32(static_cast<int32x4_t>(rhs))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(
+ vshlq_s32(static_cast<int32x4_t>(lhs), vmovq_n_s32(std::int32_t(rhs))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vshlq_s32(static_cast<int32x4_t>(lhs), static_cast<int32x4_t>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<4>>
+ abs(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return Experimental::simd<std::int32_t,
+ Experimental::simd_abi::neon_fixed_size<4>>(
+ vabsq_s32(static_cast<int32x4_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<4>>
+ floor(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<4>>
+ ceil(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<4>>
+ round(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int32_t, Experimental::simd_abi::neon_fixed_size<4>>
+ trunc(Experimental::simd<
+ std::int32_t, Experimental::simd_abi::neon_fixed_size<4>> const& a) {
+ return a;
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>>
+ condition(simd_mask<std::int32_t, simd_abi::neon_fixed_size<4>> const& a,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>> const& b,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>> const& c) {
+ return simd<std::int32_t, simd_abi::neon_fixed_size<4>>(
+ vbslq_s32(static_cast<uint32x4_t>(a), static_cast<int32x4_t>(b),
+ static_cast<int32x4_t>(c)));
+}
+
+template <>
+class simd<std::int64_t, simd_abi::neon_fixed_size<2>> {
+ int64x2_t m_value;
+
+ public:
+ using value_type = std::int64_t;
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ class reference {
+ int64x2_t& m_value;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(int64x2_t& value_arg,
+ int lane_arg)
+ : m_value(value_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(std::int64_t value) const {
+ switch (m_lane) {
+ case 0: m_value = vsetq_lane_s64(value, m_value, 0); break;
+ case 1: m_value = vsetq_lane_s64(value, m_value, 1); break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator std::int64_t() const {
+ switch (m_lane) {
+ case 0: return vgetq_lane_s64(m_value, 0);
+ case 1: return vgetq_lane_s64(m_value, 1);
+ }
+ return 0;
+ }
+ };
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 2;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(vmovq_n_s64(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept {
+ m_value = vsetq_lane_s64(gen(std::integral_constant<std::size_t, 0>()),
+ m_value, 0);
+ m_value = vsetq_lane_s64(gen(std::integral_constant<std::size_t, 1>()),
+ m_value, 1);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ int64x2_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::uint64_t, abi_type> const&);
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reference(const_cast<simd*>(this)->m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = vld1q_s64(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = vld1q_s64(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ vst1q_s64(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ vst1q_s64(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator int64x2_t()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+ operator-() const noexcept {
+ return simd(vnegq_s64(m_value));
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vsubq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vaddq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd([&](std::size_t i) { return lhs[i] * rhs[i]; });
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vceqq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcgtq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcltq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcleq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vcgeq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(vshlq_s64(static_cast<int64x2_t>(lhs),
+ vnegq_s64(vmovq_n_s64(std::int64_t(rhs)))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vshlq_s64(static_cast<int64x2_t>(lhs),
+ vnegq_s64(static_cast<int64x2_t>(rhs))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(
+ vshlq_s64(static_cast<int64x2_t>(lhs), vmovq_n_s64(std::int64_t(rhs))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vshlq_s64(static_cast<int64x2_t>(lhs), static_cast<int64x2_t>(rhs)));
+ }
+};
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int64_t, Experimental::simd_abi::neon_fixed_size<2>>
+ abs(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return Experimental::simd<std::int64_t,
+ Experimental::simd_abi::neon_fixed_size<2>>(
+ vabsq_s64(static_cast<int64x2_t>(a)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int64_t, Experimental::simd_abi::neon_fixed_size<2>>
+ floor(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int64_t, Experimental::simd_abi::neon_fixed_size<2>>
+ ceil(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int64_t, Experimental::simd_abi::neon_fixed_size<2>>
+ round(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ Experimental::simd<std::int64_t, Experimental::simd_abi::neon_fixed_size<2>>
+ trunc(Experimental::simd<
+ std::int64_t, Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::int64_t, simd_abi::neon_fixed_size<2>>
+ condition(simd_mask<std::int64_t, simd_abi::neon_fixed_size<2>> const& a,
+ simd<std::int64_t, simd_abi::neon_fixed_size<2>> const& b,
+ simd<std::int64_t, simd_abi::neon_fixed_size<2>> const& c) {
+ return simd<std::int64_t, simd_abi::neon_fixed_size<2>>(
+ vbslq_s64(static_cast<uint64x2_t>(a), static_cast<int64x2_t>(b),
+ static_cast<int64x2_t>(c)));
+}
+
+template <>
+class simd<std::uint64_t, simd_abi::neon_fixed_size<2>> {
+ uint64x2_t m_value;
+
+ public:
+ using value_type = std::uint64_t;
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using mask_type = simd_mask<value_type, abi_type>;
+ class reference {
+ uint64x2_t& m_value;
+ int m_lane;
+
+ public:
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(uint64x2_t& value_arg,
+ int lane_arg)
+ : m_value(value_arg), m_lane(lane_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+ operator=(std::uint64_t value) const {
+ switch (m_lane) {
+ case 0: m_value = vsetq_lane_u64(value, m_value, 0); break;
+ case 1: m_value = vsetq_lane_u64(value, m_value, 1); break;
+ }
+ return *this;
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator std::uint64_t() const {
+ switch (m_lane) {
+ case 0: return vgetq_lane_u64(m_value, 0);
+ case 1: return vgetq_lane_u64(m_value, 1);
+ }
+ return 0;
+ }
+ };
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd() = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+ return 2;
+ }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+ : m_value(vmovq_n_u64(value_type(value))) {}
+ template <class G,
+ std::enable_if_t<
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ G&& gen) noexcept {
+ m_value = vsetq_lane_u64(gen(std::integral_constant<std::size_t, 0>()),
+ m_value, 0);
+ m_value = vsetq_lane_u64(gen(std::integral_constant<std::size_t, 1>()),
+ m_value, 1);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+ uint64x2_t const& value_in)
+ : m_value(value_in) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+ simd<std::int32_t, abi_type> const& other)
+ : m_value(
+ vreinterpretq_u64_s64(vmovl_s32(static_cast<int32x2_t>(other)))) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+ return reference(m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+ operator[](std::size_t i) const {
+ return reference(const_cast<simd*>(this)->m_value, int(i));
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ element_aligned_tag) {
+ m_value = vld1q_u64(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+ vector_aligned_tag) {
+ m_value = vld1q_u64(ptr);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+ value_type* ptr, element_aligned_tag) const {
+ vst1q_u64(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(value_type* ptr,
+ vector_aligned_tag) const {
+ vst1q_u64(ptr, m_value);
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator uint64x2_t()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vsubq_u64(static_cast<uint64x2_t>(lhs), static_cast<uint64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vaddq_u64(static_cast<uint64x2_t>(lhs), static_cast<uint64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd([&](std::size_t i) { return lhs[i] * rhs[i]; });
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator&(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vandq_u64(static_cast<uint64x2_t>(lhs), static_cast<uint64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator|(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(
+ vorrq_u64(static_cast<uint64x2_t>(lhs), static_cast<uint64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(
+ vceqq_u64(static_cast<uint64x2_t>(lhs), static_cast<uint64x2_t>(rhs)));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return !(lhs == rhs);
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(vshlq_u64(static_cast<uint64x2_t>(lhs),
+ vnegq_s64(vmovq_n_s64(std::int64_t(rhs)))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vshlq_u64(
+ static_cast<uint64x2_t>(lhs),
+ vnegq_s64(vreinterpretq_s64_u64(static_cast<uint64x2_t>(rhs)))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(vshlq_u64(static_cast<uint64x2_t>(lhs),
+ vmovq_n_s64(std::int64_t(rhs))));
+ }
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION friend simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(vshlq_u64(static_cast<uint64x2_t>(lhs),
+ vreinterpretq_s64_u64(static_cast<uint64x2_t>(rhs))));
+ }
+};
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int32_t, simd_abi::neon_fixed_size<2>>::simd(
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>> const& other)
+ : m_value(
+ vmovn_s64(vreinterpretq_s64_u64(static_cast<uint64x2_t>(other)))) {}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int64_t, simd_abi::neon_fixed_size<2>>::simd(
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>> const& other)
+ : m_value(vreinterpretq_s64_u64(static_cast<uint64x2_t>(other))) {}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>>
+ abs(simd<std::uint64_t, simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+} // namespace Experimental
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::neon_fixed_size<2>>
+floor(Experimental::simd<std::uint64_t,
+ Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::neon_fixed_size<2>>
+ceil(Experimental::simd<std::uint64_t,
+ Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::neon_fixed_size<2>>
+round(Experimental::simd<std::uint64_t,
+ Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION Experimental::simd<
+ std::uint64_t, Experimental::simd_abi::neon_fixed_size<2>>
+trunc(Experimental::simd<std::uint64_t,
+ Experimental::simd_abi::neon_fixed_size<2>> const& a) {
+ return a;
+}
+
+namespace Experimental {
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>>
+ condition(simd_mask<std::uint64_t, simd_abi::neon_fixed_size<2>> const& a,
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>> const& b,
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>> const& c) {
+ return simd<std::uint64_t, simd_abi::neon_fixed_size<2>>(
+ vbslq_u64(static_cast<uint64x2_t>(a), static_cast<uint64x2_t>(b),
+ static_cast<uint64x2_t>(c)));
+}
+
+template <>
+class const_where_expression<simd_mask<double, simd_abi::neon_fixed_size<2>>,
+ simd<double, simd_abi::neon_fixed_size<2>>> {
+ public:
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using value_type = simd<double, abi_type>;
+ using mask_type = simd_mask<double, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(double* mem, element_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(double* mem, vector_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ double* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) const {
+ if (m_mask[0]) mem[index[0]] = m_value[0];
+ if (m_mask[1]) mem[index[1]] = m_value[1];
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<double, simd_abi::neon_fixed_size<2>>,
+ simd<double, simd_abi::neon_fixed_size<2>>>
+ : public const_where_expression<
+ simd_mask<double, simd_abi::neon_fixed_size<2>>,
+ simd<double, simd_abi::neon_fixed_size<2>>> {
+ public:
+ where_expression(
+ simd_mask<double, simd_abi::neon_fixed_size<2>> const& mask_arg,
+ simd<double, simd_abi::neon_fixed_size<2>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(double const* mem, element_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(double const* mem, vector_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ double const* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) {
+ if (m_mask[0]) m_value[0] = mem[index[0]];
+ if (m_mask[1]) m_value[1] = mem[index[1]];
+ }
+ template <class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<double, simd_abi::neon_fixed_size<2>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<double, simd_abi::neon_fixed_size<2>>>(
+ std::forward<U>(x));
+ m_value = static_cast<simd<double, simd_abi::neon_fixed_size<2>>>(
+ vbslq_f64(static_cast<uint64x2_t>(m_mask),
+ static_cast<float64x2_t>(x_as_value_type),
+ static_cast<float64x2_t>(m_value)));
+ }
+};
+
+template <>
+class const_where_expression<simd_mask<float, simd_abi::neon_fixed_size<2>>,
+ simd<float, simd_abi::neon_fixed_size<2>>> {
+ public:
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using value_type = simd<float, abi_type>;
+ using mask_type = simd_mask<float, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, element_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, vector_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ float* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) const {
+ if (m_mask[0]) mem[index[0]] = m_value[0];
+ if (m_mask[1]) mem[index[1]] = m_value[1];
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<float, simd_abi::neon_fixed_size<2>>,
+ simd<float, simd_abi::neon_fixed_size<2>>>
+ : public const_where_expression<
+ simd_mask<float, simd_abi::neon_fixed_size<2>>,
+ simd<float, simd_abi::neon_fixed_size<2>>> {
+ public:
+ where_expression(
+ simd_mask<float, simd_abi::neon_fixed_size<2>> const& mask_arg,
+ simd<float, simd_abi::neon_fixed_size<2>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, element_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+ void copy_from(float const* mem, vector_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ float const* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) {
+ if (m_mask[0]) m_value[0] = mem[index[0]];
+ if (m_mask[1]) m_value[1] = mem[index[1]];
+ }
+ template <class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<float, simd_abi::neon_fixed_size<2>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<float, simd_abi::neon_fixed_size<2>>>(
+ std::forward<U>(x));
+ m_value = static_cast<simd<float, simd_abi::neon_fixed_size<2>>>(
+ vbsl_f32(static_cast<uint32x2_t>(m_mask),
+ static_cast<float32x2_t>(x_as_value_type),
+ static_cast<float32x2_t>(m_value)));
+ }
+};
+
+template <>
+class const_where_expression<simd_mask<float, simd_abi::neon_fixed_size<4>>,
+ simd<float, simd_abi::neon_fixed_size<4>>> {
+ public:
+ using abi_type = simd_abi::neon_fixed_size<4>;
+ using value_type = simd<float, abi_type>;
+ using mask_type = simd_mask<float, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, element_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ if (m_mask[2]) mem[2] = m_value[2];
+ if (m_mask[3]) mem[3] = m_value[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(float* mem, vector_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ if (m_mask[2]) mem[2] = m_value[2];
+ if (m_mask[3]) mem[3] = m_value[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ float* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>> const& index) const {
+ if (m_mask[0]) mem[index[0]] = m_value[0];
+ if (m_mask[1]) mem[index[1]] = m_value[1];
+ if (m_mask[2]) mem[index[2]] = m_value[2];
+ if (m_mask[3]) mem[index[3]] = m_value[3];
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<float, simd_abi::neon_fixed_size<4>>,
+ simd<float, simd_abi::neon_fixed_size<4>>>
+ : public const_where_expression<
+ simd_mask<float, simd_abi::neon_fixed_size<4>>,
+ simd<float, simd_abi::neon_fixed_size<4>>> {
+ public:
+ where_expression(
+ simd_mask<float, simd_abi::neon_fixed_size<4>> const& mask_arg,
+ simd<float, simd_abi::neon_fixed_size<4>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, element_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ if (m_mask[2]) m_value[2] = mem[2];
+ if (m_mask[3]) m_value[3] = mem[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(float const* mem, vector_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ if (m_mask[2]) m_value[2] = mem[2];
+ if (m_mask[3]) m_value[3] = mem[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ float const* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>> const& index) {
+ if (m_mask[0]) m_value[0] = mem[index[0]];
+ if (m_mask[1]) m_value[1] = mem[index[1]];
+ if (m_mask[2]) m_value[2] = mem[index[2]];
+ if (m_mask[3]) m_value[3] = mem[index[3]];
+ }
+ template <class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<float, simd_abi::neon_fixed_size<4>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<float, simd_abi::neon_fixed_size<4>>>(
+ std::forward<U>(x));
+ m_value = static_cast<simd<float, simd_abi::neon_fixed_size<4>>>(
+ vbslq_f32(static_cast<uint32x4_t>(m_mask),
+ static_cast<float32x4_t>(x_as_value_type),
+ static_cast<float32x4_t>(m_value)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int32_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>>> {
+ public:
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using value_type = simd<std::int32_t, abi_type>;
+ using mask_type = simd_mask<std::int32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, element_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, vector_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int32_t* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) const {
+ if (m_mask[0]) mem[index[0]] = m_value[0];
+ if (m_mask[1]) mem[index[1]] = m_value[1];
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int32_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>>>
+ : public const_where_expression<
+ simd_mask<std::int32_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>>> {
+ public:
+ where_expression(
+ simd_mask<std::int32_t, simd_abi::neon_fixed_size<2>> const& mask_arg,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, element_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, vector_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int32_t const* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) {
+ if (m_mask[0]) m_value[0] = mem[index[0]];
+ if (m_mask[1]) m_value[1] = mem[index[1]];
+ }
+
+ template <
+ class U,
+ std::enable_if_t<
+ std::is_convertible_v<U, simd<int32_t, simd_abi::neon_fixed_size<2>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<int32_t, simd_abi::neon_fixed_size<2>>>(
+ std::forward<U>(x));
+ m_value = static_cast<simd<int32_t, simd_abi::neon_fixed_size<2>>>(
+ vbsl_s32(static_cast<uint32x2_t>(m_mask),
+ static_cast<int32x2_t>(x_as_value_type),
+ static_cast<int32x2_t>(m_value)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int32_t, simd_abi::neon_fixed_size<4>>,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>>> {
+ public:
+ using abi_type = simd_abi::neon_fixed_size<4>;
+ using value_type = simd<std::int32_t, abi_type>;
+ using mask_type = simd_mask<std::int32_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, element_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ if (m_mask[2]) mem[2] = m_value[2];
+ if (m_mask[3]) mem[3] = m_value[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int32_t* mem, vector_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ if (m_mask[2]) mem[2] = m_value[2];
+ if (m_mask[3]) mem[3] = m_value[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int32_t* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>> const& index) const {
+ if (m_mask[0]) mem[index[0]] = m_value[0];
+ if (m_mask[1]) mem[index[1]] = m_value[1];
+ if (m_mask[2]) mem[index[2]] = m_value[2];
+ if (m_mask[3]) mem[index[3]] = m_value[3];
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int32_t, simd_abi::neon_fixed_size<4>>,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>>>
+ : public const_where_expression<
+ simd_mask<std::int32_t, simd_abi::neon_fixed_size<4>>,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>>> {
+ public:
+ where_expression(
+ simd_mask<std::int32_t, simd_abi::neon_fixed_size<4>> const& mask_arg,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, element_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ if (m_mask[2]) m_value[2] = mem[2];
+ if (m_mask[3]) m_value[3] = mem[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int32_t const* mem, vector_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ if (m_mask[2]) m_value[2] = mem[2];
+ if (m_mask[3]) m_value[3] = mem[3];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int32_t const* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<4>> const& index) {
+ if (m_mask[0]) m_value[0] = mem[index[0]];
+ if (m_mask[1]) m_value[1] = mem[index[1]];
+ if (m_mask[2]) m_value[2] = mem[index[2]];
+ if (m_mask[3]) m_value[3] = mem[index[3]];
+ }
+ template <
+ class U,
+ std::enable_if_t<
+ std::is_convertible_v<U, simd<int32_t, simd_abi::neon_fixed_size<4>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<int32_t, simd_abi::neon_fixed_size<4>>>(
+ std::forward<U>(x));
+ m_value = static_cast<simd<int32_t, simd_abi::neon_fixed_size<4>>>(
+ vbslq_s32(static_cast<uint32x4_t>(m_mask),
+ static_cast<int32x4_t>(x_as_value_type),
+ static_cast<int32x4_t>(m_value)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::int64_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::int64_t, simd_abi::neon_fixed_size<2>>> {
+ public:
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using value_type = simd<std::int64_t, abi_type>;
+ using mask_type = simd_mask<std::int64_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int64_t* mem, element_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::int64_t* mem, vector_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::int64_t* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) const {
+ if (m_mask[0]) mem[index[0]] = m_value[0];
+ if (m_mask[1]) mem[index[1]] = m_value[1];
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::int64_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::int64_t, simd_abi::neon_fixed_size<2>>>
+ : public const_where_expression<
+ simd_mask<std::int64_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::int64_t, simd_abi::neon_fixed_size<2>>> {
+ public:
+ where_expression(
+ simd_mask<std::int64_t, simd_abi::neon_fixed_size<2>> const& mask_arg,
+ simd<std::int64_t, simd_abi::neon_fixed_size<2>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int64_t const* mem, element_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::int64_t const* mem, vector_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::int64_t const* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) {
+ if (m_mask[0]) m_value[0] = mem[index[0]];
+ if (m_mask[1]) m_value[1] = mem[index[1]];
+ }
+
+ template <
+ class U,
+ std::enable_if_t<std::is_convertible_v<
+ U, simd<std::int64_t, simd_abi::neon_fixed_size<2>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::int64_t, simd_abi::neon_fixed_size<2>>>(
+ std::forward<U>(x));
+ m_value = static_cast<simd<std::int64_t, simd_abi::neon_fixed_size<2>>>(
+ vbslq_s64(static_cast<uint64x2_t>(m_mask),
+ static_cast<int64x2_t>(x_as_value_type),
+ static_cast<int64x2_t>(m_value)));
+ }
+};
+
+template <>
+class const_where_expression<
+ simd_mask<std::uint64_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>>> {
+ public:
+ using abi_type = simd_abi::neon_fixed_size<2>;
+ using value_type = simd<std::uint64_t, abi_type>;
+ using mask_type = simd_mask<std::uint64_t, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint64_t* mem, element_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_to(std::uint64_t* mem, vector_aligned_tag) const {
+ if (m_mask[0]) mem[0] = m_value[0];
+ if (m_mask[1]) mem[1] = m_value[1];
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void scatter_to(
+ std::uint64_t* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) const {
+ if (m_mask[0]) mem[index[0]] = m_value[0];
+ if (m_mask[1]) mem[index[1]] = m_value[1];
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type const&
+ impl_get_value() const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type const&
+ impl_get_mask() const {
+ return m_mask;
+ }
+};
+
+template <>
+class where_expression<simd_mask<std::uint64_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>>>
+ : public const_where_expression<
+ simd_mask<std::uint64_t, simd_abi::neon_fixed_size<2>>,
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>>> {
+ public:
+ where_expression(
+ simd_mask<std::uint64_t, simd_abi::neon_fixed_size<2>> const& mask_arg,
+ simd<std::uint64_t, simd_abi::neon_fixed_size<2>>& value_arg)
+ : const_where_expression(mask_arg, value_arg) {}
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint64_t const* mem, element_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void copy_from(std::uint64_t const* mem, vector_aligned_tag) {
+ if (m_mask[0]) m_value[0] = mem[0];
+ if (m_mask[1]) m_value[1] = mem[1];
+ }
+
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+ void gather_from(
+ std::uint64_t const* mem,
+ simd<std::int32_t, simd_abi::neon_fixed_size<2>> const& index) {
+ if (m_mask[0]) m_value[0] = mem[index[0]];
+ if (m_mask[1]) m_value[1] = mem[index[1]];
+ }
+
+ template <class U,
+ std::enable_if_t<
+ std::is_convertible_v<
+ U, simd<std::uint64_t, simd_abi::neon_fixed_size<2>>>,
+ bool> = false>
+ KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ auto const x_as_value_type =
+ static_cast<simd<std::uint64_t, simd_abi::neon_fixed_size<2>>>(
+ std::forward<U>(x));
+ m_value = static_cast<simd<std::uint64_t, simd_abi::neon_fixed_size<2>>>(
+ vbslq_u64(static_cast<uint64x2_t>(m_mask),
+ static_cast<uint64x2_t>(x_as_value_type),
+ static_cast<uint64x2_t>(m_value)));
+ }
+};
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef KOKKOS_SIMD_SCALAR_HPP
+#define KOKKOS_SIMD_SCALAR_HPP
+
+#include <type_traits>
+#include <climits>
+#include <cfloat>
+
+#include <Kokkos_SIMD_Common.hpp>
+
+#ifdef KOKKOS_SIMD_COMMON_MATH_HPP
+#error \
+ "Kokkos_SIMD_Scalar.hpp must be included before Kokkos_SIMD_Common_Math.hpp!"
+#endif
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace simd_abi {
+
+class scalar {};
+
+} // namespace simd_abi
+
+template <class T>
+class simd_mask<T, simd_abi::scalar> {
+ bool m_value;
+
+ public:
+ using value_type = bool;
+ using simd_type = simd<T, simd_abi::scalar>;
+ using abi_type = simd_abi::scalar;
+ using reference = value_type&;
+ KOKKOS_DEFAULTED_FUNCTION simd_mask() = default;
+ KOKKOS_FORCEINLINE_FUNCTION static constexpr std::size_t size() { return 1; }
+ KOKKOS_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+ : m_value(value) {}
+ template <
+ class G,
+ std::enable_if_t<std::is_invocable_r_v<
+ value_type, G, std::integral_constant<bool, false>>,
+ bool> = false>
+ KOKKOS_FORCEINLINE_FUNCTION constexpr explicit simd_mask(G&& gen) noexcept
+ : m_value(gen(0)) {}
+ template <class U>
+ KOKKOS_FORCEINLINE_FUNCTION simd_mask(
+ simd_mask<U, simd_abi::scalar> const& other)
+ : m_value(static_cast<bool>(other)) {}
+ KOKKOS_FORCEINLINE_FUNCTION constexpr explicit operator bool() const {
+ return m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION reference operator[](std::size_t) {
+ return m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION value_type operator[](std::size_t) const {
+ return m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION simd_mask
+ operator||(simd_mask const& other) const {
+ return simd_mask(m_value || other.m_value);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION simd_mask
+ operator&&(simd_mask const& other) const {
+ return simd_mask(m_value && other.m_value);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION simd_mask operator!() const {
+ return simd_mask(!m_value);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION bool operator==(simd_mask const& other) const {
+ return m_value == other.m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION bool operator!=(simd_mask const& other) const {
+ return m_value != other.m_value;
+ }
+};
+
+template <class T>
+class simd<T, simd_abi::scalar> {
+ T m_value;
+
+ public:
+ using value_type = T;
+ using abi_type = simd_abi::scalar;
+ using mask_type = simd_mask<T, abi_type>;
+ using reference = value_type&;
+ KOKKOS_DEFAULTED_FUNCTION simd() = default;
+ KOKKOS_DEFAULTED_FUNCTION simd(simd const&) = default;
+ KOKKOS_DEFAULTED_FUNCTION simd(simd&&) = default;
+ KOKKOS_DEFAULTED_FUNCTION simd& operator=(simd const&) = default;
+ KOKKOS_DEFAULTED_FUNCTION simd& operator=(simd&&) = default;
+ KOKKOS_FORCEINLINE_FUNCTION static constexpr std::size_t size() { return 1; }
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_FORCEINLINE_FUNCTION simd(U&& value) : m_value(value) {}
+ template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+ bool> = false>
+ KOKKOS_FORCEINLINE_FUNCTION explicit simd(simd<U, abi_type> const& other)
+ : m_value(static_cast<U>(other)) {}
+ template <class G,
+ std::enable_if_t<
+ // basically, can you do { value_type r =
+ // gen(std::integral_constant<std::size_t, i>()); }
+ std::is_invocable_r_v<value_type, G,
+ std::integral_constant<std::size_t, 0>>,
+ bool> = false>
+ KOKKOS_FORCEINLINE_FUNCTION constexpr explicit simd(G&& gen) noexcept
+ : m_value(gen(0)) {}
+ KOKKOS_FORCEINLINE_FUNCTION constexpr explicit operator T() const {
+ return m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION void copy_from(T const* ptr,
+ element_aligned_tag) {
+ m_value = *ptr;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION void copy_from(T const* ptr, vector_aligned_tag) {
+ m_value = *ptr;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION void copy_to(T* ptr, element_aligned_tag) const {
+ *ptr = m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION void copy_to(T* ptr, vector_aligned_tag) const {
+ *ptr = m_value;
+ }
+
+ KOKKOS_FORCEINLINE_FUNCTION reference operator[](std::size_t) {
+ return m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION value_type operator[](std::size_t) const {
+ return m_value;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION simd operator-() const noexcept {
+ return simd(-m_value);
+ }
+
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator*(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(lhs.m_value * rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator/(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(lhs.m_value / rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator+(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(lhs.m_value + rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator-(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(lhs.m_value - rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator>>(
+ simd const& lhs, int rhs) noexcept {
+ return simd(lhs.m_value >> rhs);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator>>(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(lhs.m_value >> rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator<<(
+ simd const& lhs, int rhs) noexcept {
+ return simd(lhs.m_value << rhs);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator<<(
+ simd const& lhs, simd const& rhs) noexcept {
+ return simd(lhs.m_value << rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator&(
+ simd const& lhs, simd const& rhs) noexcept {
+ return lhs.m_value & rhs.m_value;
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr simd operator|(
+ simd const& lhs, simd const& rhs) noexcept {
+ return lhs.m_value | rhs.m_value;
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr mask_type
+ operator<(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(lhs.m_value < rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr mask_type
+ operator>(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(lhs.m_value > rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr mask_type
+ operator<=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(lhs.m_value <= rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr mask_type
+ operator>=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(lhs.m_value >= rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr mask_type
+ operator==(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(lhs.m_value == rhs.m_value);
+ }
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION friend constexpr mask_type
+ operator!=(simd const& lhs, simd const& rhs) noexcept {
+ return mask_type(lhs.m_value != rhs.m_value);
+ }
+};
+
+} // namespace Experimental
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+ Experimental::simd<T, Experimental::simd_abi::scalar>
+ abs(Experimental::simd<T, Experimental::simd_abi::scalar> const& a) {
+ if constexpr (std::is_signed_v<T>) {
+ return (a < 0 ? -a : a);
+ }
+ return a;
+}
+
+template <typename T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto floor(
+ Experimental::simd<T, Experimental::simd_abi::scalar> const& a) {
+ using data_type = std::conditional_t<std::is_floating_point_v<T>, T, double>;
+ return Experimental::simd<data_type, Experimental::simd_abi::scalar>(
+ Kokkos::floor(static_cast<data_type>(a[0])));
+}
+
+template <typename T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto ceil(
+ Experimental::simd<T, Experimental::simd_abi::scalar> const& a) {
+ using data_type = std::conditional_t<std::is_floating_point_v<T>, T, double>;
+ return Experimental::simd<data_type, Experimental::simd_abi::scalar>(
+ Kokkos::ceil(static_cast<data_type>(a[0])));
+}
+
+template <typename T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto round(
+ Experimental::simd<T, Experimental::simd_abi::scalar> const& a) {
+ using data_type = std::conditional_t<std::is_floating_point_v<T>, T, double>;
+ return Experimental::simd<data_type, Experimental::simd_abi::scalar>(
+ Experimental::round_half_to_nearest_even(static_cast<data_type>(a[0])));
+}
+
+template <typename T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto trunc(
+ Experimental::simd<T, Experimental::simd_abi::scalar> const& a) {
+ using data_type = std::conditional_t<std::is_floating_point_v<T>, T, double>;
+ return Experimental::simd<data_type, Experimental::simd_abi::scalar>(
+ Kokkos::trunc(static_cast<data_type>(a[0])));
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+ Experimental::simd<T, Experimental::simd_abi::scalar>
+ sqrt(Experimental::simd<T, Experimental::simd_abi::scalar> const& a) {
+ return Experimental::simd<T, Experimental::simd_abi::scalar>(
+ std::sqrt(static_cast<T>(a)));
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION
+ Experimental::simd<T, Experimental::simd_abi::scalar>
+ fma(Experimental::simd<T, Experimental::simd_abi::scalar> const& x,
+ Experimental::simd<T, Experimental::simd_abi::scalar> const& y,
+ Experimental::simd<T, Experimental::simd_abi::scalar> const& z) {
+ return Experimental::simd<T, Experimental::simd_abi::scalar>(
+ (static_cast<T>(x) * static_cast<T>(y)) + static_cast<T>(z));
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+ Experimental::simd<T, Experimental::simd_abi::scalar>
+ copysign(Experimental::simd<T, Experimental::simd_abi::scalar> const& a,
+ Experimental::simd<T, Experimental::simd_abi::scalar> const& b) {
+ return std::copysign(static_cast<T>(a), static_cast<T>(b));
+}
+
+namespace Experimental {
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> condition(
+ desul::Impl::dont_deduce_this_parameter_t<
+ simd_mask<T, simd_abi::scalar>> const& a,
+ simd<T, simd_abi::scalar> const& b, simd<T, simd_abi::scalar> const& c) {
+ return simd<T, simd_abi::scalar>(static_cast<bool>(a) ? static_cast<T>(b)
+ : static_cast<T>(c));
+}
+
+template <class T>
+class const_where_expression<simd_mask<T, simd_abi::scalar>,
+ simd<T, simd_abi::scalar>> {
+ public:
+ using abi_type = simd_abi::scalar;
+ using value_type = simd<T, abi_type>;
+ using mask_type = simd_mask<T, abi_type>;
+
+ protected:
+ value_type& m_value;
+ mask_type const& m_mask;
+
+ public:
+ KOKKOS_FORCEINLINE_FUNCTION
+ const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+ : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+
+ KOKKOS_FORCEINLINE_FUNCTION
+ void copy_to(T* mem, element_aligned_tag) const {
+ if (static_cast<bool>(m_mask)) *mem = static_cast<T>(m_value);
+ }
+ KOKKOS_FORCEINLINE_FUNCTION
+ void copy_to(T* mem, vector_aligned_tag) const {
+ if (static_cast<bool>(m_mask)) *mem = static_cast<T>(m_value);
+ }
+ template <class Integral>
+ KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<std::is_integral_v<Integral>>
+ scatter_to(T* mem, simd<Integral, simd_abi::scalar> const& index) const {
+ if (static_cast<bool>(m_mask))
+ mem[static_cast<Integral>(index)] = static_cast<T>(m_value);
+ }
+
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION value_type const& impl_get_value()
+ const {
+ return m_value;
+ }
+
+ [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION mask_type const& impl_get_mask()
+ const {
+ return m_mask;
+ }
+};
+
+template <class T>
+class where_expression<simd_mask<T, simd_abi::scalar>,
+ simd<T, simd_abi::scalar>>
+ : public const_where_expression<simd_mask<T, simd_abi::scalar>,
+ simd<T, simd_abi::scalar>> {
+ using base_type = const_where_expression<simd_mask<T, simd_abi::scalar>,
+ simd<T, simd_abi::scalar>>;
+
+ public:
+ using typename base_type::value_type;
+ KOKKOS_FORCEINLINE_FUNCTION
+ where_expression(simd_mask<T, simd_abi::scalar> const& mask_arg,
+ simd<T, simd_abi::scalar>& value_arg)
+ : base_type(mask_arg, value_arg) {}
+ KOKKOS_FORCEINLINE_FUNCTION
+ void copy_from(T const* mem, element_aligned_tag) {
+ if (static_cast<bool>(this->m_mask)) this->m_value = *mem;
+ }
+ KOKKOS_FORCEINLINE_FUNCTION
+ void copy_from(T const* mem, vector_aligned_tag) {
+ if (static_cast<bool>(this->m_mask)) this->m_value = *mem;
+ }
+ template <class Integral>
+ KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<std::is_integral_v<Integral>>
+ gather_from(T const* mem, simd<Integral, simd_abi::scalar> const& index) {
+ if (static_cast<bool>(this->m_mask))
+ this->m_value = mem[static_cast<Integral>(index)];
+ }
+ template <class U, std::enable_if_t<
+ std::is_convertible_v<U, simd<T, simd_abi::scalar>>,
+ bool> = false>
+ KOKKOS_FORCEINLINE_FUNCTION void operator=(U&& x) {
+ if (static_cast<bool>(this->m_mask))
+ this->m_value =
+ static_cast<simd<T, simd_abi::scalar>>(std::forward<U>(x));
+ }
+};
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+ where_expression<simd_mask<T, Kokkos::Experimental::simd_abi::scalar>,
+ simd<T, Kokkos::Experimental::simd_abi::scalar>>
+ where(typename simd<
+ T, Kokkos::Experimental::simd_abi::scalar>::mask_type const& mask,
+ simd<T, Kokkos::Experimental::simd_abi::scalar>& value) {
+ return where_expression(mask, value);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+ const_where_expression<simd_mask<T, Kokkos::Experimental::simd_abi::scalar>,
+ simd<T, Kokkos::Experimental::simd_abi::scalar>>
+ where(typename simd<
+ T, Kokkos::Experimental::simd_abi::scalar>::mask_type const& mask,
+ simd<T, Kokkos::Experimental::simd_abi::scalar> const& value) {
+ return const_where_expression(mask, value);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool all_of(
+ simd_mask<T, Kokkos::Experimental::simd_abi::scalar> const& a) {
+ return a == simd_mask<T, Kokkos::Experimental::simd_abi::scalar>(true);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool any_of(
+ simd_mask<T, Kokkos::Experimental::simd_abi::scalar> const& a) {
+ return a != simd_mask<T, Kokkos::Experimental::simd_abi::scalar>(false);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool none_of(
+ simd_mask<T, Kokkos::Experimental::simd_abi::scalar> const& a) {
+ return a == simd_mask<T, Kokkos::Experimental::simd_abi::scalar>(false);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
+reduce(const_where_expression<simd_mask<T, simd_abi::scalar>,
+ simd<T, simd_abi::scalar>> const& x,
+ T identity_element, std::plus<>) {
+ return static_cast<bool>(x.impl_get_mask())
+ ? static_cast<T>(x.impl_get_value())
+ : identity_element;
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
+hmax(const_where_expression<simd_mask<T, simd_abi::scalar>,
+ simd<T, simd_abi::scalar>> const& x) {
+ return static_cast<bool>(x.impl_get_mask())
+ ? static_cast<T>(x.impl_get_value())
+ : Kokkos::reduction_identity<T>::max();
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
+hmin(const_where_expression<simd_mask<T, simd_abi::scalar>,
+ simd<T, simd_abi::scalar>> const& x) {
+ return static_cast<bool>(x.impl_get_mask())
+ ? static_cast<T>(x.impl_get_value())
+ : Kokkos::reduction_identity<T>::min();
+}
+
+} // namespace Experimental
+} // namespace Kokkos
+
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+// This file is needed in order to get the linker language
+// for the header only submodule.
+// While we set the language properties in our normal cmake
+// path it does not get set in the Trilinos environment.
+// Furthermore, setting LINKER_LANGUAGE is only supported
+// in CMAKE 3.19 and up.
+void KOKKOS_SIMD_SRC_DUMMY_PREVENT_LINK_ERROR() {}
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_CONFIG_HPP_
+#define DESUL_ATOMICS_CONFIG_HPP_
+
+#cmakedefine DESUL_ATOMICS_ENABLE_CUDA
+#cmakedefine DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION
+#cmakedefine DESUL_ATOMICS_ENABLE_HIP
+#cmakedefine DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION
+#cmakedefine DESUL_ATOMICS_ENABLE_SYCL
+#cmakedefine DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+#cmakedefine DESUL_ATOMICS_ENABLE_OPENMP
+#cmakedefine DESUL_ATOMICS_ENABLE_OPENACC
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_ADAPT_CXX_HPP_
+#define DESUL_ATOMICS_ADAPT_CXX_HPP_
+
+#include <atomic>
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+namespace Impl {
+
+template <class MemoryOrderDesul>
+struct CXXMemoryOrder;
+
+template <>
+struct CXXMemoryOrder<MemoryOrderRelaxed> {
+ static constexpr std::memory_order value = std::memory_order_relaxed;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderAcquire> {
+ static constexpr std::memory_order value = std::memory_order_acquire;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderRelease> {
+ static constexpr std::memory_order value = std::memory_order_release;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderAcqRel> {
+ static constexpr std::memory_order value = std::memory_order_acq_rel;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderSeqCst> {
+ static constexpr std::memory_order value = std::memory_order_seq_cst;
+};
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_ADAPT_GCC_HPP_
+#define DESUL_ATOMICS_ADAPT_GCC_HPP_
+
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+namespace Impl {
+
+template <class MemoryOrder>
+struct GCCMemoryOrder;
+
+template <>
+struct GCCMemoryOrder<MemoryOrderRelaxed> {
+ static constexpr int value = __ATOMIC_RELAXED;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderAcquire> {
+ static constexpr int value = __ATOMIC_ACQUIRE;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderRelease> {
+ static constexpr int value = __ATOMIC_RELEASE;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderAcqRel> {
+ static constexpr int value = __ATOMIC_ACQ_REL;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderSeqCst> {
+ static constexpr int value = __ATOMIC_SEQ_CST;
+};
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_ADAPT_HIP_HPP_
+#define DESUL_ATOMICS_ADAPT_HIP_HPP_
+
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+namespace Impl {
+
+// FIXME same code as GCCMemoryOrder
+template <class MemoryOrder>
+struct HIPMemoryOrder;
+
+template <>
+struct HIPMemoryOrder<MemoryOrderRelaxed> {
+ static constexpr int value = __ATOMIC_RELAXED;
+};
+
+template <>
+struct HIPMemoryOrder<MemoryOrderAcquire> {
+ static constexpr int value = __ATOMIC_ACQUIRE;
+};
+
+template <>
+struct HIPMemoryOrder<MemoryOrderRelease> {
+ static constexpr int value = __ATOMIC_RELEASE;
+};
+
+template <>
+struct HIPMemoryOrder<MemoryOrderAcqRel> {
+ static constexpr int value = __ATOMIC_ACQ_REL;
+};
+
+template <>
+struct HIPMemoryOrder<MemoryOrderSeqCst> {
+ static constexpr int value = __ATOMIC_SEQ_CST;
+};
+
+// __HIP_MEMORY_SCOPE_SYSTEM
+// __HIP_MEMORY_SCOPE_AGENT
+// __HIP_MEMORY_SCOPE_WORKGROUP
+// __HIP_MEMORY_SCOPE_WAVEFRONT
+// __HIP_MEMORY_SCOPE_SINGLETHREAD
+template <class MemoryScope>
+struct HIPMemoryScope;
+
+template <>
+struct HIPMemoryScope<MemoryScopeCore> {
+ static constexpr int value = __HIP_MEMORY_SCOPE_WORKGROUP;
+};
+
+template <>
+struct HIPMemoryScope<MemoryScopeDevice> {
+ static constexpr int value = __HIP_MEMORY_SCOPE_AGENT;
+};
+
+template <>
+struct HIPMemoryScope<MemoryScopeNode> {
+ static constexpr int value = __HIP_MEMORY_SCOPE_SYSTEM;
+};
+
+template <>
+struct HIPMemoryScope<MemoryScopeSystem> {
+ static constexpr int value = __HIP_MEMORY_SCOPE_SYSTEM;
+};
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_ADAPT_SYCL_HPP_
+#define DESUL_ATOMICS_ADAPT_SYCL_HPP_
+
+#include <desul/atomics/Common.hpp>
+
+// FIXME_SYCL SYCL2020 dictates that <sycl/sycl.hpp> is the header to include
+// but icpx 2022.1.0 and earlier versions only provide <CL/sycl.hpp>
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
+#include <CL/sycl.hpp>
+#endif
+
+namespace desul {
+namespace Impl {
+
+//<editor-fold desc="SYCL memory order">
+// The default memory order for sycl::atomic_ref
+// can be seq_cst, acq_rel, or relaxed according to the
+// "SYCL 2020 Specification (revision 6)", see
+// https://registry.khronos.org/SYCL/specs/sycl-2020/html/sycl-2020.html#sec:atomic-references.
+// Thus, we map MemoryOrderAcquire and MemoryOrderRelease to acq_rel.
+template <class MemoryOrder>
+struct SYCLMemoryOrder;
+
+template <>
+struct SYCLMemoryOrder<MemoryOrderSeqCst> {
+ static constexpr sycl::memory_order value = sycl::memory_order::seq_cst;
+};
+template <>
+struct SYCLMemoryOrder<MemoryOrderAcquire> {
+ static constexpr sycl::memory_order value = sycl::memory_order::acq_rel;
+};
+template <>
+struct SYCLMemoryOrder<MemoryOrderRelease> {
+ static constexpr sycl::memory_order value = sycl::memory_order::acq_rel;
+};
+template <>
+struct SYCLMemoryOrder<MemoryOrderAcqRel> {
+ static constexpr sycl::memory_order value = sycl::memory_order::acq_rel;
+};
+template <>
+struct SYCLMemoryOrder<MemoryOrderRelaxed> {
+ static constexpr sycl::memory_order value = sycl::memory_order::relaxed;
+};
+//</editor-fold>
+
+//<editor-fold desc="SYCL memory scope">
+template <class MemoryScope>
+struct SYCLMemoryScope;
+
+template <>
+struct SYCLMemoryScope<MemoryScopeCore> {
+ static constexpr sycl::memory_scope value = sycl::memory_scope::work_group;
+};
+
+template <>
+struct SYCLMemoryScope<MemoryScopeDevice> {
+ static constexpr sycl::memory_scope value = sycl::memory_scope::device;
+};
+
+template <>
+struct SYCLMemoryScope<MemoryScopeSystem> {
+ static constexpr sycl::memory_scope value = sycl::memory_scope::system;
+};
+//</editor-fold>
+
+// FIXME_SYCL generic_space isn't available yet for CUDA.
+#ifdef __NVPTX__
+template <class T, class MemoryOrder, class MemoryScope>
+using sycl_atomic_ref = sycl::atomic_ref<T,
+ SYCLMemoryOrder<MemoryOrder>::value,
+ SYCLMemoryScope<MemoryScope>::value,
+ sycl::access::address_space::global_space>;
+#else
+template <class T, class MemoryOrder, class MemoryScope>
+using sycl_atomic_ref = sycl::atomic_ref<T,
+ SYCLMemoryOrder<MemoryOrder>::value,
+ SYCLMemoryScope<MemoryScope>::value,
+ sycl::access::address_space::generic_space>;
+#endif
+
+#ifdef DESUL_SYCL_DEVICE_GLOBAL_SUPPORTED
+#ifdef SYCL_EXT_ONEAPI_DEVICE_GLOBAL
+template <class T>
+using sycl_device_global = sycl::ext::oneapi::experimental::device_global<T>;
+#else
+template <class T>
+using sycl_device_global = sycl::ext::oneapi::experimental::device_global<
+ T,
+ decltype(sycl::ext::oneapi::experimental::properties(
+ sycl::ext::oneapi::experimental::device_image_scope))>;
+#endif
+#endif
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMIC_REF_HPP_
+#define DESUL_ATOMIC_REF_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Generic.hpp>
+#include <desul/atomics/Macros.hpp>
+
+namespace desul {
+
+template <typename T, typename MemoryOrder, typename MemoryScope>
+class AtomicRef {
+ T* ptr_;
+
+ public:
+ using value_type = T;
+ using memory_order = MemoryOrder;
+ using memory_scope = MemoryScope;
+
+ DESUL_FUNCTION explicit AtomicRef(T& obj) : ptr_(&obj) {}
+
+ DESUL_FUNCTION T operator=(T desired) const noexcept {
+ store(desired);
+ return desired;
+ }
+
+ DESUL_FUNCTION operator T() const noexcept { return load(); }
+
+ DESUL_FUNCTION T load() const noexcept {
+ return desul::atomic_load(ptr_, MemoryOrder(), MemoryScope());
+ }
+
+ DESUL_FUNCTION void store(T desired) const noexcept {
+ return desul::atomic_store(ptr_, desired, MemoryOrder(), MemoryScope());
+ }
+
+ DESUL_FUNCTION T exchange(T desired) const noexcept {
+ return desul::atomic_exchange(ptr_, desired, MemoryOrder(), MemoryScope());
+ }
+
+ // TODO compare_exchange_{weak,strong} and is_lock_free
+
+#define DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(FETCH_OP, OP_FETCH) \
+ DESUL_FUNCTION T FETCH_OP(T arg) const noexcept { \
+ return desul::atomic_##FETCH_OP(ptr_, arg, MemoryOrder(), MemoryScope()); \
+ } \
+ DESUL_FUNCTION T OP_FETCH(T arg) const noexcept { \
+ return desul::atomic_##OP_FETCH(ptr_, arg, MemoryOrder(), MemoryScope()); \
+ }
+
+#define DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(COMPD_ASGMT, OP_FETCH) \
+ DESUL_FUNCTION T operator COMPD_ASGMT(T arg) const noexcept { return OP_FETCH(arg); }
+
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_add, add_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(+=, add_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_sub, sub_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(-=, sub_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_min, min_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_max, max_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_mul, mul_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(*=, mul_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_div, div_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(/=, div_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_mod, mod_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(%=, mod_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_and, and_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(&=, and_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_or, or_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(|=, or_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_xor, xor_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP(^=, xor_fetch)
+ DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP(fetch_nand, nand_fetch)
+
+#undef DESUL_IMPL_DEFINE_ATOMIC_COMPOUND_ASSIGNMENT_OP
+#undef DESUL_IMPL_DEFINE_ATOMIC_FETCH_OP
+
+#define DESUL_IMPL_DEFINE_ATOMIC_INCREMENT_DECREMENT(OPER, NAME) \
+ DESUL_FUNCTION T fetch_##NAME() const noexcept { \
+ return desul::atomic_fetch_##NAME(ptr_, MemoryOrder(), MemoryScope()); \
+ } \
+ DESUL_FUNCTION T NAME##_fetch() const noexcept { \
+ return desul::atomic_##NAME##_fetch(ptr_, MemoryOrder(), MemoryScope()); \
+ } \
+ DESUL_FUNCTION T operator OPER() const noexcept { return NAME##_fetch(); } \
+ DESUL_FUNCTION T operator OPER(int) const noexcept { return fetch_##NAME(); }
+
+ DESUL_IMPL_DEFINE_ATOMIC_INCREMENT_DECREMENT(++, inc)
+ DESUL_IMPL_DEFINE_ATOMIC_INCREMENT_DECREMENT(--, dec)
+
+#undef DESUL_IMPL_DEFINE_ATOMIC_INCREMENT_DECREMENT
+
+#define DESUL_IMPL_DEFINE_ATOMIC_BITWISE_SHIFT(COMPD_ASGMT, SHFT) \
+ DESUL_FUNCTION T fetch_##SHFT(unsigned int arg) const noexcept { \
+ return desul::atomic_fetch_##SHFT(ptr_, arg, MemoryOrder(), MemoryScope()); \
+ } \
+ DESUL_FUNCTION T SHFT##_fetch(unsigned int arg) const noexcept { \
+ return desul::atomic_##SHFT##_fetch(ptr_, arg, MemoryOrder(), MemoryScope()); \
+ } \
+ DESUL_FUNCTION T operator COMPD_ASGMT(unsigned int arg) const noexcept { \
+ return SHFT##_fetch(arg); \
+ }
+
+ DESUL_IMPL_DEFINE_ATOMIC_BITWISE_SHIFT(<<=, lshift)
+ DESUL_IMPL_DEFINE_ATOMIC_BITWISE_SHIFT(>>=, rshift)
+
+#undef DESUL_IMPL_DEFINE_ATOMIC_BITWISE_SHIFT
+};
+
+} // namespace desul
+
+#endif
#ifndef DESUL_ATOMICS_COMMON_HPP_
#define DESUL_ATOMICS_COMMON_HPP_
-#include <atomic>
+
#include <cstdint>
+#include <desul/atomics/Macros.hpp>
#include <type_traits>
-#include "desul/atomics/Macros.hpp"
-
namespace desul {
struct alignas(16) Dummy16ByteValue {
int64_t value1;
};
} // namespace desul
-// MemoryOrder Tags
-
+//<editor-fold desc="Memory Order Tags">
namespace desul {
// Memory order sequential consistent
struct MemoryOrderSeqCst {};
// Memory order relaxed
struct MemoryOrderRelaxed {};
} // namespace desul
+//</editor-fold>
-// Memory Scope Tags
-
+//<editor-fold desc="Memory Scope Tags">
namespace desul {
// Entire machine scope (e.g. for global arrays)
struct MemoryScopeSystem {};
// Caller scoped (i.e. NOT atomic!)
struct MemoryScopeCaller {};
} // namespace desul
-
-#ifdef __ATOMIC_RELAXED
-#define KOKKOS_ATOMIC_RELAXED __ATOMIC_RELAXED
-#else
-#define KOKKOS_ATOMIC_RELAXED 0
-#endif
-#ifdef __ATOMIC_CONSUME
-#define KOKKOS_ATOMIC_CONSUME __ATOMIC_CONSUME
-#else
-#define KOKKOS_ATOMIC_CONSUME 1
-#endif
-#ifdef __ATOMIC_ACQUIRE
-#define KOKKOS_ATOMIC_ACQUIRE __ATOMIC_ACQUIRE
-#else
-#define KOKKOS_ATOMIC_ACQUIRE 2
-#endif
-#ifdef __ATOMIC_RELEASE
-#define KOKKOS_ATOMIC_RELEASE __ATOMIC_RELEASE
-#else
-#define KOKKOS_ATOMIC_RELEASE 3
-#endif
-#ifdef __ATOMIC_ACQ_REL
-#define KOKKOS_ATOMIC_ACQ_REL __ATOMIC_ACQ_REL
-#else
-#define KOKKOS_ATOMIC_ACQ_REL 4
-#endif
-#ifdef __ATOMIC_SEQ_CST
-#define KOKKOS_ATOMIC_SEQ_CST __ATOMIC_SEQ_CST
-#else
-#define KOKKOS_ATOMIC_SEQ_CST 5
-#endif
+//</editor-fold>
namespace desul {
-template <class MemoryOrderDesul>
-struct GCCMemoryOrder;
-
-template <>
-struct GCCMemoryOrder<MemoryOrderRelaxed> {
- static constexpr int value = KOKKOS_ATOMIC_RELAXED;
-};
-
-template <>
-struct GCCMemoryOrder<MemoryOrderAcquire> {
- static constexpr int value = KOKKOS_ATOMIC_ACQUIRE;
-};
-
-template <>
-struct GCCMemoryOrder<MemoryOrderRelease> {
- static constexpr int value = KOKKOS_ATOMIC_RELEASE;
-};
-
-template <>
-struct GCCMemoryOrder<MemoryOrderAcqRel> {
- static constexpr int value = KOKKOS_ATOMIC_ACQ_REL;
-};
-
-template <>
-struct GCCMemoryOrder<MemoryOrderSeqCst> {
- static constexpr int value = KOKKOS_ATOMIC_SEQ_CST;
-};
-
-template <class MemoryOrderDesul>
-struct CXXMemoryOrder;
-
-template <>
-struct CXXMemoryOrder<MemoryOrderRelaxed> {
- static constexpr std::memory_order value = std::memory_order_relaxed;
-};
-
-template <>
-struct CXXMemoryOrder<MemoryOrderAcquire> {
- static constexpr std::memory_order value = std::memory_order_acquire;
-};
-
-template <>
-struct CXXMemoryOrder<MemoryOrderRelease> {
- static constexpr std::memory_order value = std::memory_order_release;
-};
-
-template <>
-struct CXXMemoryOrder<MemoryOrderAcqRel> {
- static constexpr std::memory_order value = std::memory_order_acq_rel;
-};
-
-template <>
-struct CXXMemoryOrder<MemoryOrderSeqCst> {
- static constexpr std::memory_order value = std::memory_order_seq_cst;
-};
-
namespace Impl {
-template <typename MemoryOrder>
+template <class MemoryOrder>
struct CmpExchFailureOrder {
using memory_order = std::conditional_t<
std::is_same<MemoryOrder, MemoryOrderAcqRel>{},
MemoryOrderRelaxed,
MemoryOrder>>;
};
-template <typename MemoryOrder>
+template <class MemoryOrder>
using cmpexch_failure_memory_order =
typename CmpExchFailureOrder<MemoryOrder>::memory_order;
} // namespace Impl
-
} // namespace desul
// We should in principle use std::numeric_limits, but that requires constexpr function
-// support on device Currently that is still considered experimetal on CUDA and
+// support on device Currently that is still considered experimental on CUDA and
// sometimes not reliable.
namespace desul {
namespace Impl {
template <>
struct numeric_limits_max<uint32_t> {
- static constexpr uint32_t value = 0xffffffffu;
+ static constexpr auto value = static_cast<uint32_t>(-1);
};
template <>
struct numeric_limits_max<uint64_t> {
- static constexpr uint64_t value = 0xfffffffflu;
+ static constexpr auto value = static_cast<uint64_t>(-1);
};
constexpr bool atomic_always_lock_free(std::size_t size) {
;
}
-template <std::size_t N>
-struct atomic_compare_exchange_type;
+//<editor-fold desc="Underlying type for atomic compare exchange">
+template <std::size_t Bytes>
+struct atomic_compare_exchange_helper;
template <>
-struct atomic_compare_exchange_type<4> {
+struct atomic_compare_exchange_helper<4> {
using type = int32_t;
};
template <>
-struct atomic_compare_exchange_type<8> {
+struct atomic_compare_exchange_helper<8> {
using type = int64_t;
};
template <>
-struct atomic_compare_exchange_type<16> {
+struct atomic_compare_exchange_helper<16> {
using type = Dummy16ByteValue;
};
+template <class T>
+using atomic_compare_exchange_t =
+ typename atomic_compare_exchange_helper<sizeof(T)>::type;
+//</editor-fold>
+
template <class T>
struct dont_deduce_this_parameter {
using type = T;
} // namespace Impl
} // namespace desul
+
#endif
#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_HPP_
#define DESUL_ATOMICS_COMPARE_EXCHANGE_HPP_
-#include "desul/atomics/Compare_Exchange_ScopeCaller.hpp"
-#include "desul/atomics/Macros.hpp"
+#include <desul/atomics/Macros.hpp>
#ifdef DESUL_HAVE_GCC_ATOMICS
-#include "desul/atomics/Compare_Exchange_GCC.hpp"
+#include <desul/atomics/Compare_Exchange_GCC.hpp>
#endif
#ifdef DESUL_HAVE_MSVC_ATOMICS
-#include "desul/atomics/Compare_Exchange_MSVC.hpp"
-#endif
-#ifdef DESUL_HAVE_SERIAL_ATOMICS
-#include "desul/atomics/Compare_Exchange_Serial.hpp"
+#include <desul/atomics/Compare_Exchange_MSVC.hpp>
#endif
#ifdef DESUL_HAVE_CUDA_ATOMICS
-#include "desul/atomics/Compare_Exchange_CUDA.hpp"
+#include <desul/atomics/Compare_Exchange_CUDA.hpp>
#endif
#ifdef DESUL_HAVE_HIP_ATOMICS
-#include "desul/atomics/Compare_Exchange_HIP.hpp"
+#include <desul/atomics/Compare_Exchange_HIP.hpp>
#endif
#ifdef DESUL_HAVE_OPENMP_ATOMICS
-#include "desul/atomics/Compare_Exchange_OpenMP.hpp"
+#include <desul/atomics/Compare_Exchange_OpenMP.hpp>
+#endif
+#ifdef DESUL_HAVE_OPENACC_ATOMICS
+#include <desul/atomics/Compare_Exchange_OpenACC.hpp>
#endif
#ifdef DESUL_HAVE_SYCL_ATOMICS
-#include "desul/atomics/Compare_Exchange_SYCL.hpp"
+#include <desul/atomics/Compare_Exchange_SYCL.hpp>
#endif
+
+#include <desul/atomics/Compare_Exchange_ScopeCaller.hpp>
+
#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_CUDA_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_CUDA_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array_CUDA.hpp>
+#include <desul/atomics/Thread_Fence_CUDA.hpp>
+#include <type_traits>
+
+// Including CUDA ptx based exchange atomics
+// When building with clang we need to include the device functions always
+// since clang must see a consistent overload set in both device and host compilation
+// but that means we need to know on the host what to make visible, i.e. we need
+// a host side compile knowledge of architecture.
+// We simply can say DESUL proper doesn't support clang CUDA build pre Volta,
+// Kokkos has that knowledge and so I use it here, allowing in Kokkos to use
+// clang with pre Volta as CUDA compiler
+#ifndef DESUL_CUDA_ARCH_IS_PRE_VOLTA
+
+#include <desul/atomics/cuda/CUDA_asm_exchange.hpp>
+
+#else
+
+namespace desul {
+namespace Impl {
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4, T> device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
+ static_assert(sizeof(unsigned int) == 4,
+ "this function assumes an unsigned int is 32-bit");
+ unsigned int return_val = atomicCAS(reinterpret_cast<unsigned int*>(dest),
+ reinterpret_cast<unsigned int&>(compare),
+ reinterpret_cast<unsigned int&>(value));
+ return reinterpret_cast<T&>(return_val);
+}
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 8, T> device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
+ static_assert(sizeof(unsigned long long int) == 8,
+ "this function assumes an unsigned long long is 64-bit");
+ unsigned long long int return_val =
+ atomicCAS(reinterpret_cast<unsigned long long int*>(dest),
+ reinterpret_cast<unsigned long long int&>(compare),
+ reinterpret_cast<unsigned long long int&>(value));
+ return reinterpret_cast<T&>(return_val);
+}
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4 || sizeof(T) == 8, T>
+device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
+ T return_val = device_atomic_compare_exchange(
+ dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return return_val;
+}
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4 || sizeof(T) == 8, T>
+device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrderAcquire, MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val = device_atomic_compare_exchange(
+ dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+ return return_val;
+}
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4 || sizeof(T) == 8, T>
+device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val = device_atomic_compare_exchange(
+ dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return return_val;
+}
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4, T> device_atomic_exchange(
+ T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
+ static_assert(sizeof(unsigned int) == 4,
+ "this function assumes an unsigned int is 32-bit");
+ unsigned int return_val = atomicExch(reinterpret_cast<unsigned int*>(dest),
+ reinterpret_cast<unsigned int&>(value));
+ return reinterpret_cast<T&>(return_val);
+}
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 8, T> device_atomic_exchange(
+ T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
+ static_assert(sizeof(unsigned long long int) == 8,
+ "this function assumes an unsigned long long is 64-bit");
+ unsigned long long int return_val =
+ atomicExch(reinterpret_cast<unsigned long long int*>(dest),
+ reinterpret_cast<unsigned long long int&>(value));
+ return reinterpret_cast<T&>(return_val);
+}
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4 || sizeof(T) == 8, T> device_atomic_exchange(
+ T* const dest, T value, MemoryOrderRelease, MemoryScope) {
+ T return_val =
+ device_atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return reinterpret_cast<T&>(return_val);
+}
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4 || sizeof(T) == 8, T> device_atomic_exchange(
+ T* const dest, T value, MemoryOrderAcquire, MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val =
+ device_atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+ return reinterpret_cast<T&>(return_val);
+}
+
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4 || sizeof(T) == 8, T> device_atomic_exchange(
+ T* const dest, T value, MemoryOrderAcqRel, MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val =
+ device_atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return reinterpret_cast<T&>(return_val);
+}
+} // namespace Impl
+} // namespace desul
+
+#endif
+
+// SeqCst is not directly supported by PTX, need the additional fences:
+
+namespace desul {
+namespace Impl {
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4, T> device_atomic_exchange(T* const dest,
+ T value,
+ MemoryOrderSeqCst,
+ MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val =
+ device_atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return return_val;
+}
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 8, T> device_atomic_exchange(T* const dest,
+ T value,
+ MemoryOrderSeqCst,
+ MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val =
+ device_atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return return_val;
+}
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 4, T> device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val = device_atomic_compare_exchange(
+ dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return return_val;
+}
+template <class T, class MemoryScope>
+__device__ std::enable_if_t<sizeof(T) == 8, T> device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ T return_val = device_atomic_compare_exchange(
+ dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+ device_atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ return return_val;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+__device__ std::enable_if_t<(sizeof(T) != 8) && (sizeof(T) != 4), T>
+device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrder, MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned int mask = __activemask();
+ unsigned int active = __ballot_sync(mask, 1);
+ unsigned int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_cuda((void*)dest, scope)) {
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ if (return_val == compare) {
+ *dest = value;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ }
+ unlock_address_cuda((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot_sync(mask, done);
+ }
+ return return_val;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+__device__ std::enable_if_t<(sizeof(T) != 8) && (sizeof(T) != 4), T>
+device_atomic_exchange(T* const dest, T value, MemoryOrder, MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned int mask = __activemask();
+ unsigned int active = __ballot_sync(mask, 1);
+ unsigned int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_cuda((void*)dest, scope)) {
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ *dest = value;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_cuda((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot_sync(mask, done);
+ }
+ return return_val;
+}
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_GCC_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_GCC_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array.hpp>
+#include <desul/atomics/Thread_Fence_GCC.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+template <class T>
+struct host_atomic_exchange_available_gcc {
+ constexpr static bool value =
+#ifndef DESUL_HAVE_LIBATOMIC
+ ((sizeof(T) == 4 && alignof(T) == 4) ||
+#ifdef DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
+ (sizeof(T) == 16 && alignof(T) == 16) ||
+#endif
+ (sizeof(T) == 8 && alignof(T) == 8)) &&
+#endif
+ std::is_trivially_copyable<T>::value;
+};
+
+// clang-format off
+// Disable warning for large atomics on clang 7 and up (checked with godbolt)
+// error: large atomic operation may incur significant performance penalty [-Werror,-Watomic-alignment]
+// https://godbolt.org/z/G7YhqhbG6
+// clang-format on
+#if defined(__clang__) && (__clang_major__ >= 7) && !defined(__APPLE__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Watomic-alignment"
+#endif
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<host_atomic_exchange_available_gcc<T>::value, T> host_atomic_exchange(
+ T* dest, T value, MemoryOrder, MemoryScope) {
+ T return_val;
+ __atomic_exchange(dest, &value, &return_val, GCCMemoryOrder<MemoryOrder>::value);
+ return return_val;
+}
+
+// Failure mode for host_atomic_compare_exchange_n cannot be RELEASE nor ACQREL so
+// Those two get handled separately.
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<host_atomic_exchange_available_gcc<T>::value, T>
+host_atomic_compare_exchange(T* dest, T compare, T value, MemoryOrder, MemoryScope) {
+ (void)__atomic_compare_exchange(dest,
+ &compare,
+ &value,
+ false,
+ GCCMemoryOrder<MemoryOrder>::value,
+ GCCMemoryOrder<MemoryOrder>::value);
+ return compare;
+}
+
+template <class T, class MemoryScope>
+std::enable_if_t<host_atomic_exchange_available_gcc<T>::value, T>
+host_atomic_compare_exchange(
+ T* dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
+ (void)__atomic_compare_exchange(
+ dest, &compare, &value, false, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+ return compare;
+}
+
+template <class T, class MemoryScope>
+std::enable_if_t<host_atomic_exchange_available_gcc<T>::value, T>
+host_atomic_compare_exchange(
+ T* dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
+ (void)__atomic_compare_exchange(
+ dest, &compare, &value, false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+ return compare;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!host_atomic_exchange_available_gcc<T>::value, T> host_atomic_exchange(
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // Acquire a lock for the address
+ // clang-format off
+ while (!lock_address((void*)dest, scope)) {}
+ // clang-format on
+
+ host_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ *dest = val;
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!host_atomic_exchange_available_gcc<T>::value, T>
+host_atomic_compare_exchange(T* const dest,
+ dont_deduce_this_parameter_t<const T> compare,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // Acquire a lock for the address
+ // clang-format off
+ while (!lock_address((void*)dest, scope)) {}
+ // clang-format on
+
+ host_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ if (return_val == compare) {
+ *dest = val;
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+ }
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+#if defined(__clang__) && (__clang_major__ >= 7) && !defined(__APPLE__)
+#pragma GCC diagnostic pop
+#endif
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_HIP_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_HIP_HPP_
+
+#include <desul/atomics/Adapt_HIP.hpp>
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array_HIP.hpp>
+#include <desul/atomics/Thread_Fence_HIP.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+template <class T>
+struct atomic_exchange_available_hip {
+ constexpr static bool value =
+ ((sizeof(T) == 1 && alignof(T) == 1) || (sizeof(T) == 4 && alignof(T) == 4) ||
+ (sizeof(T) == 8 && alignof(T) == 8)) &&
+ std::is_trivially_copyable<T>::value;
+};
+
+template <class T, class MemoryOrder, class MemoryScope>
+__device__ std::enable_if_t<atomic_exchange_available_hip<T>::value, T>
+device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrder, MemoryScope) {
+ (void)__hip_atomic_compare_exchange_strong(
+ dest,
+ &compare,
+ value,
+ HIPMemoryOrder<MemoryOrder>::value,
+ HIPMemoryOrder<cmpexch_failure_memory_order<MemoryOrder>>::value,
+ HIPMemoryScope<MemoryScope>::value);
+ return compare;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+__device__ std::enable_if_t<atomic_exchange_available_hip<T>::value, T>
+device_atomic_exchange(T* const dest, T value, MemoryOrder, MemoryScope) {
+ T return_val = __hip_atomic_exchange(dest,
+ value,
+ HIPMemoryOrder<MemoryOrder>::value,
+ HIPMemoryScope<MemoryScope>::value);
+ return return_val;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+__device__ std::enable_if_t<!atomic_exchange_available_hip<T>::value, T>
+device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrder, MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned long long int active = __ballot(1);
+ unsigned long long int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_hip((void*)dest, scope)) {
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ atomic_thread_fence(MemoryOrderRelease(), scope);
+ atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ if (return_val == compare) {
+ *dest = value;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ }
+ unlock_address_hip((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot(done);
+ }
+ return return_val;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+__device__ std::enable_if_t<!atomic_exchange_available_hip<T>::value, T>
+device_atomic_exchange(T* const dest, T value, MemoryOrder, MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned long long int active = __ballot(1);
+ unsigned long long int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_hip((void*)dest, scope)) {
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ atomic_thread_fence(MemoryOrderRelease(), scope);
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ *dest = value;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_hip((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot(done);
+ }
+ return return_val;
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_MSVC_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_MSVC_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Thread_Fence_MSVC.hpp>
+#include <type_traits>
+
+#ifndef DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
+#define DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
+#endif
+
+namespace desul {
+namespace Impl {
+
+// Forward declare these functions. They use compare_exchange themselves
+// so the actual header file with them comes after this file is included.
+template <class MemoryScope>
+bool lock_address(void* ptr, MemoryScope ms);
+
+template <class MemoryScope>
+void unlock_address(void* ptr, MemoryScope ms);
+
+} // namespace Impl
+} // namespace desul
+
+namespace desul {
+namespace Impl {
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 1, T> host_atomic_exchange(T* const dest,
+ T val,
+ MemoryOrder,
+ MemoryScope) {
+ char return_val = _InterlockedExchange8((char*)dest, *((char*)&val));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 2, T> host_atomic_exchange(T* const dest,
+ T val,
+ MemoryOrder,
+ MemoryScope) {
+ short return_val = _InterlockedExchange16((short*)dest, *((short*)&val));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 4, T> host_atomic_exchange(T* const dest,
+ T val,
+ MemoryOrder,
+ MemoryScope) {
+ long return_val = _InterlockedExchange((long*)dest, *((long*)&val));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 8, T> host_atomic_exchange(T* const dest,
+ T val,
+ MemoryOrder,
+ MemoryScope) {
+ __int64 return_val = _InterlockedExchange64((__int64*)dest, *((__int64*)&val));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<(sizeof(T) != 1 && sizeof(T) != 2 && sizeof(T) != 4 && sizeof(T) != 8),
+ T>
+host_atomic_exchange(T* const dest, T val, MemoryOrder, MemoryScope scope) {
+ while (!lock_address((void*)dest, scope)) {
+ }
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+ host_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ *dest = val;
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 1, T> host_atomic_compare_exchange(
+ T* const dest, T compare, T val, MemoryOrder, MemoryScope) {
+ char return_val =
+ _InterlockedCompareExchange8((char*)dest, *((char*)&val), *((char*)&compare));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 2, T> host_atomic_compare_exchange(
+ T* const dest, T compare, T val, MemoryOrder, MemoryScope) {
+ short return_val =
+ _InterlockedCompareExchange16((short*)dest, *((short*)&val), *((short*)&compare));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 4, T> host_atomic_compare_exchange(
+ T* const dest, T compare, T val, MemoryOrder, MemoryScope) {
+ long return_val =
+ _InterlockedCompareExchange((long*)dest, *((long*)&val), *((long*)&compare));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 8, T> host_atomic_compare_exchange(
+ T* const dest, T compare, T val, MemoryOrder, MemoryScope) {
+ __int64 return_val = _InterlockedCompareExchange64(
+ (__int64*)dest, *((__int64*)&val), *((__int64*)&compare));
+ return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 16, T> host_atomic_compare_exchange(
+ T* const dest, T compare, T val, MemoryOrder, MemoryScope) {
+ Dummy16ByteValue* val16 = reinterpret_cast<Dummy16ByteValue*>(&val);
+ (void)_InterlockedCompareExchange128(reinterpret_cast<__int64*>(dest),
+ val16->value2,
+ val16->value1,
+ (reinterpret_cast<__int64*>(&compare)));
+ return compare;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<(sizeof(T) != 1 && sizeof(T) != 2 && sizeof(T) != 4 &&
+ sizeof(T) != 8 && sizeof(T) != 16),
+ T>
+host_atomic_compare_exchange(
+ T* const dest, T compare, T val, MemoryOrder, MemoryScope scope) {
+ while (!lock_address((void*)dest, scope)) {
+ }
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+ host_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ if (return_val == compare) {
+ *dest = val;
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+ }
+
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_OPENACC_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_OPENACC_HPP_
+
+#include <openacc.h>
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Thread_Fence_OpenACC.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+#ifdef __NVCOMPILER
+
+#pragma acc routine seq
+template <class T, class MemoryOrder, class MemoryScope>
+T device_atomic_exchange(T* dest, T value, MemoryOrder, MemoryScope /*scope*/) {
+ if constexpr (std::is_arithmetic_v<T> && ((sizeof(T) == 4) || (sizeof(T) == 8))) {
+ T return_val;
+#pragma acc atomic capture
+ {
+ return_val = *dest;
+ *dest = value;
+ }
+ return return_val;
+ } else {
+ // FIXME_OPENACC
+ if (acc_on_device(acc_device_not_host)) {
+ printf(
+ "DESUL error in device_atomic_exchange(): Not supported atomic operation in "
+ "the OpenACC backend\n");
+ }
+ // Acquire a lock for the address
+ // while (!lock_address_openacc((void*)dest, scope)) {
+ // }
+ // device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ *dest = value;
+ // device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ // unlock_address_openacc((void*)dest, scope);
+ return return_val;
+ }
+}
+
+#pragma acc routine seq
+template <class T, class MemoryOrder, class MemoryScope>
+T device_atomic_compare_exchange(
+ T* dest, T compare, T value, MemoryOrder, MemoryScope scope) {
+ // Floating point types treated separetely to work around compiler errors
+ // "parse invalid cast opcode for cast from 'i32' to 'float'".
+ // Also not just "forwarding" arguments to atomicCAS because it does not have an
+ // overload that takes int64_t
+ if constexpr (std::is_integral_v<T> && ((sizeof(T) == 4) || (sizeof(T) == 8))) {
+ static_assert(sizeof(unsigned int) == 4);
+ static_assert(sizeof(unsigned long long int) == 8);
+ using cas_t =
+ std::conditional_t<(sizeof(T) == 4), unsigned int, unsigned long long int>;
+ cas_t return_val = atomicCAS(reinterpret_cast<cas_t*>(dest),
+ reinterpret_cast<cas_t&>(compare),
+ reinterpret_cast<cas_t&>(value));
+ return reinterpret_cast<T&>(return_val);
+#ifdef DESUL_CUDA_ARCH_IS_PRE_PASCAL
+ } else if constexpr (std::is_same_v<T, float>) {
+#else
+ } else if constexpr (std::is_same_v<T, float> || std::is_same_v<T, double>) {
+#endif
+ return atomicCAS(dest, compare, value);
+ } else {
+ // FIXME_OPENACC
+ if (acc_on_device(acc_device_not_host)) {
+ printf(
+ "DESUL error in device_atomic_compare_exchange(): Not supported atomic "
+ "operation in the OpenACC backend\n");
+ }
+ T current_val = *dest;
+ // Acquire a lock for the address
+ // while (!lock_address_openacc((void*)dest, scope)) {
+ //}
+ // device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ if (current_val == compare) {
+ *dest = value;
+ // device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ }
+ // unlock_address_openacc((void*)dest, scope);
+ return current_val;
+ }
+}
+
+#else // not NVHPC
+
+#pragma acc routine seq
+template <class T, class MemoryOrder, class MemoryScope>
+T device_atomic_exchange(T* dest, T value, MemoryOrder, MemoryScope) {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T return_val;
+#pragma acc atomic capture
+ {
+ return_val = *dest;
+ *dest = value;
+ }
+ return return_val;
+ } else {
+ // FIXME_OPENACC
+ printf(
+ "DESUL error in device_atomic_exchange(): Not supported atomic operation in "
+ "the OpenACC backend\n");
+ // Acquire a lock for the address
+ // while (!lock_address_openacc((void*)dest, scope)) {
+ // }
+ // device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ *dest = value;
+ // device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ // unlock_address_openacc((void*)dest, scope);
+ return return_val;
+ }
+}
+
+#pragma acc routine seq
+template <class T, class MemoryOrder, class MemoryScope>
+T device_atomic_compare_exchange(
+ T* dest, T compare, T value, MemoryOrder, MemoryScope scope) {
+ // FIXME_OPENACC
+ printf(
+ "DESUL error in device_atomic_compare_exchange(): Not supported atomic operation "
+ "in the OpenACC backend\n");
+ T current_val = *dest;
+ // Acquire a lock for the address
+ // while (!lock_address_openacc((void*)dest, scope)) {
+ //}
+ // device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ if (current_val == compare) {
+ *dest = value;
+ // device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ }
+ // unlock_address_openacc((void*)dest, scope);
+ return current_val;
+}
+
+#endif
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_OPENMP_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_OPENMP_HPP_
+
+#include <omp.h>
+
+#include <desul/atomics/Adapt_GCC.hpp>
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Thread_Fence_OpenMP.hpp>
+
+namespace desul {
+namespace Impl {
+
+template <class T, class MemoryOrder, class MemoryScope>
+T host_atomic_exchange(T* dest, T value, MemoryOrder, MemoryScope) {
+ T return_val;
+ if (!std::is_same<MemoryOrder, MemoryOrderRelaxed>::value) {
+ atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+ }
+ T& x = *dest;
+#pragma omp atomic capture
+ {
+ return_val = x;
+ x = value;
+ }
+ if (!std::is_same<MemoryOrder, MemoryOrderRelaxed>::value) {
+ atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+ }
+ return return_val;
+}
+
+// OpenMP doesn't have compare exchange, so we use built-in functions and rely on
+// testing that this works Note that means we test this in OpenMPTarget offload regions!
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<atomic_always_lock_free(sizeof(T)), T> host_atomic_compare_exchange(
+ T* dest, T compare, T value, MemoryOrder, MemoryScope) {
+ using cas_t = atomic_compare_exchange_t<T>;
+ cas_t retval = __sync_val_compare_and_swap(reinterpret_cast<volatile cas_t*>(dest),
+ reinterpret_cast<cas_t&>(compare),
+ reinterpret_cast<cas_t&>(value));
+ return reinterpret_cast<T&>(retval);
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!atomic_always_lock_free(sizeof(T)), T> // FIXME_OPENMP
+host_atomic_compare_exchange(T* dest, T compare, T value, MemoryOrder, MemoryScope) {
+#if 0
+ (void)__atomic_compare_exchange(dest,
+ &compare,
+ &value,
+ false,
+ GCCMemoryOrder<MemoryOrder>::value,
+ GCCMemoryOrder<MemoryOrder>::value);
+#else
+ (void)dest;
+ (void)value;
+#endif
+ return compare;
+}
+
+#if 0 // FIXME_OPENMP
+
+// Disable warning for large atomics on clang 7 and up (checked with godbolt)
+// clang-format off
+// error: large atomic operation may incur significant performance penalty [-Werror,-Watomic-alignment]
+// clang-format on
+#if defined(__clang__) && (__clang_major__ >= 7)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Watomic-alignment"
+#endif
+
+// Make 16 byte cas work on host at least
+#pragma omp begin declare variant match(device = {kind(host)})
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!atomic_always_lock_free(sizeof(T)) && (sizeof(T) == 16), T>
+host_atomic_compare_exchange(T* dest, T compare, T value, MemoryOrder, MemoryScope) {
+ (void)__atomic_compare_exchange(dest,
+ &compare,
+ &value,
+ false,
+ GCCMemoryOrder<MemoryOrder>::value,
+ GCCMemoryOrder<MemoryOrder>::value);
+ return compare;
+}
+#pragma omp end declare variant
+
+#pragma omp begin declare variant match(device = {kind(nohost)})
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!atomic_always_lock_free(sizeof(T)) && (sizeof(T) == 16), T>
+device_atomic_compare_exchange(
+ T* /*dest*/, T /*compare*/, T value, MemoryOrder, MemoryScope) {
+ // FIXME_OPENMP make sure this never gets called
+ return value;
+}
+#pragma omp end declare variant
+
+#if defined(__clang__) && (__clang_major__ >= 7)
+#pragma GCC diagnostic pop
+#endif
+
+#endif
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_SYCL_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_SYCL_HPP_
+
+#include <desul/atomics/Adapt_SYCL.hpp>
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array_SYCL.hpp>
+#include <desul/atomics/Thread_Fence_SYCL.hpp>
+
+// FIXME_SYCL SYCL2020 dictates that <sycl/sycl.hpp> is the header to include
+// but icpx 2022.1.0 and earlier versions only provide <CL/sycl.hpp>
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
+#include <CL/sycl.hpp>
+#endif
+
+namespace desul {
+namespace Impl {
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 4, T> device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrder, MemoryScope) {
+ static_assert(sizeof(unsigned int) == 4,
+ "this function assumes an unsigned int is 32-bit");
+ sycl_atomic_ref<unsigned int, MemoryOrder, MemoryScope> dest_ref(
+ *reinterpret_cast<unsigned int*>(dest));
+ dest_ref.compare_exchange_strong(*reinterpret_cast<unsigned int*>(&compare),
+ *reinterpret_cast<unsigned int*>(&value));
+ return compare;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 8, T> device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrder, MemoryScope) {
+ static_assert(sizeof(unsigned long long int) == 8,
+ "this function assumes an unsigned long long is 64-bit");
+ sycl_atomic_ref<unsigned long long int, MemoryOrder, MemoryScope> dest_ref(
+ *reinterpret_cast<unsigned long long int*>(dest));
+ dest_ref.compare_exchange_strong(*reinterpret_cast<unsigned long long int*>(&compare),
+ *reinterpret_cast<unsigned long long int*>(&value));
+ return compare;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 4, T> device_atomic_exchange(T* const dest,
+ T value,
+ MemoryOrder,
+ MemoryScope) {
+ static_assert(sizeof(unsigned int) == 4,
+ "this function assumes an unsigned int is 32-bit");
+ sycl_atomic_ref<unsigned int, MemoryOrder, MemoryScope> dest_ref(
+ *reinterpret_cast<unsigned int*>(dest));
+ unsigned int return_val = dest_ref.exchange(*reinterpret_cast<unsigned int*>(&value));
+ return reinterpret_cast<T&>(return_val);
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<sizeof(T) == 8, T> device_atomic_exchange(T* const dest,
+ T value,
+ MemoryOrder,
+ MemoryScope) {
+ static_assert(sizeof(unsigned long long int) == 8,
+ "this function assumes an unsigned long long is 64-bit");
+ sycl_atomic_ref<unsigned long long int, MemoryOrder, MemoryScope> dest_ref(
+ *reinterpret_cast<unsigned long long int*>(dest));
+ unsigned long long int return_val =
+ dest_ref.exchange(reinterpret_cast<unsigned long long int&>(value));
+ return reinterpret_cast<T&>(return_val);
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<(sizeof(T) != 8) && (sizeof(T) != 4), T>
+device_atomic_compare_exchange(
+ T* const dest, T compare, T value, MemoryOrder, MemoryScope scope) {
+ // This is a way to avoid deadlock in a subgroup
+ T return_val;
+ int done = 0;
+#if defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20250000
+ auto sg = sycl::ext::oneapi::this_work_item::get_sub_group();
+#else
+ auto sg = sycl::ext::oneapi::experimental::this_sub_group();
+#endif
+ using sycl::ext::oneapi::group_ballot;
+ using sycl::ext::oneapi::sub_group_mask;
+ sub_group_mask active = group_ballot(sg, 1);
+ sub_group_mask done_active = group_ballot(sg, 0);
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_sycl((void*)dest, scope)) {
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ atomic_thread_fence(MemoryOrderRelease(), scope);
+ atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ if (return_val == compare) {
+ *dest = value;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ }
+ unlock_address_sycl((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = group_ballot(sg, done);
+ }
+ return return_val;
+}
+
+template <class T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<(sizeof(T) != 8) && (sizeof(T) != 4), T> device_atomic_exchange(
+ T* const dest, T value, MemoryOrder, MemoryScope scope) {
+ // This is a way to avoid deadlock in a subgroup
+ T return_val;
+ int done = 0;
+#if defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20250000
+ auto sg = sycl::ext::oneapi::this_work_item::get_sub_group();
+#else
+ auto sg = sycl::ext::oneapi::experimental::this_sub_group();
+#endif
+ using sycl::ext::oneapi::group_ballot;
+ using sycl::ext::oneapi::sub_group_mask;
+ sub_group_mask active = group_ballot(sg, 1);
+ sub_group_mask done_active = group_ballot(sg, 0);
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_sycl((void*)dest, scope)) {
+ if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+ atomic_thread_fence(MemoryOrderRelease(), scope);
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ *dest = value;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_sycl((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = group_ballot(sg, done);
+ }
+ return return_val;
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_SCOPECALLER_HPP_
#define DESUL_ATOMICS_COMPARE_EXCHANGE_SCOPECALLER_HPP_
-#include "desul/atomics/Common.hpp"
-namespace desul {
+#include <desul/atomics/Common.hpp>
-template <class MemoryOrder>
-DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrder, MemoryScopeCaller) {}
+namespace desul {
#define DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MEMORY_ORDER) \
- template <typename T> \
+ template <class T> \
DESUL_INLINE_FUNCTION T atomic_exchange( \
T* dest, T value, MEMORY_ORDER, MemoryScopeCaller) { \
T return_val = *dest; \
return return_val; \
} \
\
- template <typename T> \
+ template <class T> \
DESUL_INLINE_FUNCTION T atomic_compare_exchange( \
T* dest, T compare, T value, MEMORY_ORDER, MemoryScopeCaller) { \
T current_val = *dest; \
DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MemoryOrderRelaxed)
#undef DESUL_ATOMIC_EXCHANGE_SCOPECALLER
+
} // namespace desul
+
#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_FETCH_OP_HPP_
+#define DESUL_ATOMICS_FETCH_OP_HPP_
+
+#include <desul/atomics/Macros.hpp>
+
+#ifdef DESUL_HAVE_GCC_ATOMICS
+#include <desul/atomics/Fetch_Op_GCC.hpp>
+#endif
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+#include <desul/atomics/Fetch_Op_CUDA.hpp>
+#endif
+#ifdef DESUL_HAVE_HIP_ATOMICS
+#include <desul/atomics/Fetch_Op_HIP.hpp>
+#endif
+#ifdef DESUL_HAVE_OPENMP_ATOMICS
+#include <desul/atomics/Fetch_Op_OpenMP.hpp>
+#endif
+#ifdef DESUL_HAVE_OPENACC_ATOMICS
+#include <desul/atomics/Fetch_Op_OpenACC.hpp>
+#endif
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+#include <desul/atomics/Fetch_Op_SYCL.hpp>
+#endif
+
+#include <desul/atomics/Fetch_Op_ScopeCaller.hpp>
+
+// Must come last
+#include <desul/atomics/Fetch_Op_Generic.hpp>
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_FETCH_OP_CUDA_HPP_
+#define DESUL_ATOMICS_FETCH_OP_CUDA_HPP_
+
+#ifndef DESUL_CUDA_ARCH_IS_PRE_VOLTA
+
+#define DESUL_HAVE_CUDA_ATOMICS_ASM
+
+#include <desul/atomics/cuda/CUDA_asm.hpp>
+
+#else
+
+namespace desul {
+namespace Impl {
+
+// clang-format off
+inline __device__ int device_atomic_fetch_add( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_add( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+inline __device__ unsigned long long device_atomic_fetch_add(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+inline __device__ float device_atomic_fetch_add( float* ptr, float val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+#ifndef DESUL_CUDA_ARCH_IS_PRE_PASCAL
+inline __device__ double device_atomic_fetch_add( double* ptr, double val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+#endif
+
+inline __device__ int device_atomic_fetch_sub( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_sub( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, val); }
+inline __device__ unsigned long long device_atomic_fetch_sub(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
+inline __device__ float device_atomic_fetch_sub( float* ptr, float val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
+#ifndef DESUL_CUDA_ARCH_IS_PRE_PASCAL
+inline __device__ double device_atomic_fetch_sub( double* ptr, double val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
+#endif
+
+inline __device__ int device_atomic_fetch_min( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_min( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
+inline __device__ unsigned long long device_atomic_fetch_min(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
+
+inline __device__ int device_atomic_fetch_max( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_max( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
+inline __device__ unsigned long long device_atomic_fetch_max(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
+
+inline __device__ int device_atomic_fetch_and( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_and( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
+inline __device__ unsigned long long device_atomic_fetch_and(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
+
+inline __device__ int device_atomic_fetch_or ( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_or ( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
+inline __device__ unsigned long long device_atomic_fetch_or (unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
+
+inline __device__ int device_atomic_fetch_xor( int* ptr, int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_xor( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
+inline __device__ unsigned long long device_atomic_fetch_xor(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
+
+inline __device__ int device_atomic_fetch_inc( int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1 ); }
+inline __device__ unsigned int device_atomic_fetch_inc( unsigned int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1u ); }
+inline __device__ unsigned long long device_atomic_fetch_inc(unsigned long long* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1ull); }
+
+inline __device__ int device_atomic_fetch_dec( int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, 1 ); }
+inline __device__ unsigned int device_atomic_fetch_dec( unsigned int* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, 1u ); }
+inline __device__ unsigned long long device_atomic_fetch_dec(unsigned long long* ptr, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -1ull);}
+
+inline __device__ unsigned int device_atomic_fetch_inc_mod( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicInc(ptr, val); }
+inline __device__ unsigned int device_atomic_fetch_dec_mod( unsigned int* ptr, unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicDec(ptr, val); }
+// clang-format on
+
+#define DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(FETCH_OP, TYPE) \
+ template <class MemoryOrder> \
+ __device__ TYPE device_atomic_##FETCH_OP( \
+ TYPE* ptr, TYPE val, MemoryOrder, MemoryScopeDevice) { \
+ __threadfence(); \
+ TYPE return_val = \
+ device_atomic_##FETCH_OP(ptr, val, MemoryOrderRelaxed(), MemoryScopeDevice()); \
+ __threadfence(); \
+ return return_val; \
+ } \
+ template <class MemoryOrder> \
+ __device__ TYPE device_atomic_##FETCH_OP( \
+ TYPE* ptr, TYPE val, MemoryOrder, MemoryScopeCore) { \
+ return device_atomic_##FETCH_OP(ptr, val, MemoryOrder(), MemoryScopeDevice()); \
+ }
+
+#define DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(FETCH_OP) \
+ DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(FETCH_OP, int) \
+ DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(FETCH_OP, unsigned int) \
+ DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(FETCH_OP, unsigned long long)
+
+#ifdef DESUL_CUDA_ARCH_IS_PRE_PASCAL
+
+#define DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(FETCH_OP) \
+ DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(FETCH_OP, float)
+
+#else
+
+#define DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(FETCH_OP) \
+ DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(FETCH_OP, float) \
+ DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(FETCH_OP, double)
+
+#endif
+
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_min)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_max)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_and)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_or)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_xor)
+
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(fetch_add)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_add)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(fetch_sub)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_sub)
+
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_inc)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(fetch_dec)
+
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(fetch_inc_mod, unsigned int)
+DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP(fetch_dec_mod, unsigned int)
+
+#undef DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT
+#undef DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP_INTEGRAL
+#undef DESUL_IMPL_CUDA_DEVICE_ATOMIC_FETCH_OP
+
+} // namespace Impl
+} // namespace desul
+
+#endif
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_FETCH_OP_GCC_HPP_
+#define DESUL_ATOMICS_FETCH_OP_GCC_HPP_
+
+#include <desul/atomics/Adapt_GCC.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+// clang-format off
+#define DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE(OP, MEMORY_ORDER, MEMORY_SCOPE) \
+ template <class T> \
+ std::enable_if_t<std::is_integral<T>::value, T> host_atomic_fetch_##OP (T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
+ return __atomic_fetch_##OP (dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
+ } \
+ template <class T> \
+ std::enable_if_t<std::is_integral<T>::value, T> host_atomic_##OP##_fetch(T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) { \
+ return __atomic_##OP##_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
+ }
+
+#define DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL(OP) \
+ DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE(OP, MemoryOrderRelaxed, MemoryScopeNode ) \
+ DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE(OP, MemoryOrderRelaxed, MemoryScopeDevice) \
+ DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE(OP, MemoryOrderRelaxed, MemoryScopeCore ) \
+ DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE(OP, MemoryOrderSeqCst , MemoryScopeNode ) \
+ DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE(OP, MemoryOrderSeqCst , MemoryScopeDevice) \
+ DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE(OP, MemoryOrderSeqCst , MemoryScopeCore )
+// clang-format on
+
+DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL(add)
+DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL(sub)
+DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL(and)
+DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL(xor)
+DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL(or)
+DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL(nand)
+
+#undef DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL
+#undef DESUL_IMPL_GCC_HOST_ATOMIC_FETCH_OP_INTEGRAL_ORDER_SCOPE
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_FETCH_OP_GENERIC_HPP_
+#define DESUL_ATOMICS_FETCH_OP_GENERIC_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Based_Fetch_Op.hpp>
+#include <desul/atomics/Lock_Free_Fetch_Op.hpp>
+#include <desul/atomics/Operator_Function_Objects.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+#define DESUL_IMPL_ATOMIC_FETCH_OP(ANNOTATION, HOST_OR_DEVICE, FETCH_OP, OP_FETCH) \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_##FETCH_OP( \
+ T* const dest, const T val, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_fetch_oper( \
+ OP_FETCH##_operator<T, const T>(), dest, val, order, scope); \
+ } \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_##OP_FETCH( \
+ T* const dest, const T val, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_oper_fetch( \
+ OP_FETCH##_operator<T, const T>(), dest, val, order, scope); \
+ }
+
+#define DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(FETCH_OP, OP_FETCH) \
+ DESUL_IMPL_ATOMIC_FETCH_OP(DESUL_IMPL_HOST_FUNCTION, host, FETCH_OP, OP_FETCH) \
+ DESUL_IMPL_ATOMIC_FETCH_OP(DESUL_IMPL_DEVICE_FUNCTION, device, FETCH_OP, OP_FETCH)
+
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_add, add_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_sub, sub_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_max, max_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_min, min_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_mul, mul_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_div, div_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_mod, mod_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_and, and_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_or, or_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_xor, xor_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_nand, nand_fetch)
+
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_inc_mod, inc_mod_fetch)
+DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE(fetch_dec_mod, dec_mod_fetch)
+
+#undef DESUL_IMPL_ATOMIC_FETCH_OP_HOST_AND_DEVICE
+#undef DESUL_IMPL_ATOMIC_FETCH_OP
+
+#define DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT(ANNOTATION, HOST_OR_DEVICE, OP) \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_fetch_##OP( \
+ T* const dest, const unsigned int val, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_fetch_oper( \
+ OP##_fetch_operator<T, const unsigned int>(), dest, val, order, scope); \
+ } \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_##OP##_fetch( \
+ T* const dest, const unsigned int val, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_oper_fetch( \
+ OP##_fetch_operator<T, const unsigned int>(), dest, val, order, scope); \
+ }
+
+#define DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT_HOST_AND_DEVICE(OP) \
+ DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT(DESUL_IMPL_HOST_FUNCTION, host, OP) \
+ DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT(DESUL_IMPL_DEVICE_FUNCTION, device, OP)
+
+DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT_HOST_AND_DEVICE(lshift)
+DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT_HOST_AND_DEVICE(rshift)
+
+#undef DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT_HOST_AND_DEVICE
+#undef DESUL_IMPL_ATOMIC_FETCH_OP_SHIFT
+
+// NOTE: using atomic_oper_fetch in the fallback implementation of atomic_store to avoid
+// reading potentially uninitialized values which would yield undefined behavior.
+#define DESUL_IMPL_ATOMIC_LOAD_AND_STORE(ANNOTATION, HOST_OR_DEVICE) \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_load( \
+ const T* const dest, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_fetch_oper( \
+ load_fetch_operator<T, const T>(), const_cast<T*>(dest), T(), order, scope); \
+ } \
+ \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION void HOST_OR_DEVICE##_atomic_store( \
+ T* const dest, const T val, MemoryOrder order, MemoryScope scope) { \
+ (void)HOST_OR_DEVICE##_atomic_oper_fetch( \
+ store_fetch_operator<T, const T>(), dest, val, order, scope); \
+ }
+
+DESUL_IMPL_ATOMIC_LOAD_AND_STORE(DESUL_IMPL_HOST_FUNCTION, host)
+DESUL_IMPL_ATOMIC_LOAD_AND_STORE(DESUL_IMPL_DEVICE_FUNCTION, device)
+
+#undef DESUL_IMPL_ATOMIC_LOAD_AND_STORE
+
+#define DESUL_IMPL_ATOMIC_OP(ANNOTATION, HOST_OR_DEVICE, OP) \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION void HOST_OR_DEVICE##_atomic_##OP( \
+ T* const dest, const T val, MemoryOrder order, MemoryScope scope) { \
+ (void)HOST_OR_DEVICE##_atomic_fetch_##OP(dest, val, order, scope); \
+ }
+
+#define DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE(OP) \
+ DESUL_IMPL_ATOMIC_OP(DESUL_IMPL_HOST_FUNCTION, host, OP) \
+ DESUL_IMPL_ATOMIC_OP(DESUL_IMPL_DEVICE_FUNCTION, device, OP)
+
+DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE(add)
+DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE(sub)
+DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE(mul)
+DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE(div)
+DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE(min)
+DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE(max)
+
+#undef DESUL_IMPL_ATOMIC_OP_HOST_AND_DEVICE
+#undef DESUL_IMPL_ATOMIC_OP
+
+#define DESUL_IMPL_ATOMIC_INCREMENT_DECREMENT(ANNOTATION, HOST_OR_DEVICE) \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_inc_fetch( \
+ T* const dest, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_add_fetch(dest, T(1), order, scope); \
+ } \
+ \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_dec_fetch( \
+ T* const dest, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_sub_fetch(dest, T(1), order, scope); \
+ } \
+ \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_fetch_inc( \
+ T* const dest, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_fetch_add(dest, T(1), order, scope); \
+ } \
+ \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_fetch_dec( \
+ T* const dest, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_fetch_sub(dest, T(1), order, scope); \
+ } \
+ \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION void HOST_OR_DEVICE##_atomic_inc( \
+ T* const dest, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_add(dest, T(1), order, scope); \
+ } \
+ \
+ template <class T, class MemoryOrder, class MemoryScope> \
+ ANNOTATION void HOST_OR_DEVICE##_atomic_dec( \
+ T* const dest, MemoryOrder order, MemoryScope scope) { \
+ return HOST_OR_DEVICE##_atomic_sub(dest, T(1), order, scope); \
+ }
+
+DESUL_IMPL_ATOMIC_INCREMENT_DECREMENT(DESUL_IMPL_HOST_FUNCTION, host)
+DESUL_IMPL_ATOMIC_INCREMENT_DECREMENT(DESUL_IMPL_DEVICE_FUNCTION, device)
+
+#undef DESUL_IMPL_ATOMIC_INCREMENT_DECREMENT
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_FECH_OP_HIP_HPP_
+#define DESUL_ATOMICS_FECH_OP_HIP_HPP_
+
+#include <desul/atomics/Adapt_HIP.hpp>
+
+namespace desul {
+namespace Impl {
+
+#define DESUL_IMPL_HIP_ATOMIC_FETCH_OP(OP, T) \
+ template <class MemoryOrder, class MemoryScope> \
+ __device__ inline T device_atomic_fetch_##OP( \
+ T* ptr, T val, MemoryOrder, MemoryScope) { \
+ return __hip_atomic_fetch_##OP(ptr, \
+ val, \
+ HIPMemoryOrder<MemoryOrder>::value, \
+ HIPMemoryScope<MemoryScope>::value); \
+ }
+
+#define DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL(OP) \
+ DESUL_IMPL_HIP_ATOMIC_FETCH_OP(OP, int) \
+ DESUL_IMPL_HIP_ATOMIC_FETCH_OP(OP, long long) \
+ DESUL_IMPL_HIP_ATOMIC_FETCH_OP(OP, unsigned int) \
+ DESUL_IMPL_HIP_ATOMIC_FETCH_OP(OP, unsigned long long)
+
+#define DESUL_IMPL_HIP_ATOMIC_FETCH_OP_FLOATING_POINT(OP) \
+ DESUL_IMPL_HIP_ATOMIC_FETCH_OP(OP, float) \
+ DESUL_IMPL_HIP_ATOMIC_FETCH_OP(OP, double)
+
+DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL(add)
+DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL(min)
+DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL(max)
+DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL(and)
+DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL(or)
+DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL(xor)
+DESUL_IMPL_HIP_ATOMIC_FETCH_OP_FLOATING_POINT(add)
+// atomic min/max gives the wrong results (tested with ROCm 6.0 on Frontier)
+// DESUL_IMPL_HIP_ATOMIC_FETCH_OP_FLOATING_POINT(min)
+// DESUL_IMPL_HIP_ATOMIC_FETCH_OP_FLOATING_POINT(max)
+
+#undef DESUL_IMPL_HIP_ATOMIC_FETCH_OP_FLOATING_POINT
+#undef DESUL_IMPL_HIP_ATOMIC_FETCH_OP_INTEGRAL
+#undef DESUL_IMPL_HIP_ATOMIC_FETCH_OP
+
+#define DESUL_IMPL_HIP_ATOMIC_FETCH_SUB(T) \
+ template <class MemoryOrder, class MemoryScope> \
+ __device__ inline T device_atomic_fetch_sub( \
+ T* ptr, T val, MemoryOrder, MemoryScope) { \
+ return __hip_atomic_fetch_add(ptr, \
+ -val, \
+ HIPMemoryOrder<MemoryOrder>::value, \
+ HIPMemoryScope<MemoryScope>::value); \
+ }
+
+DESUL_IMPL_HIP_ATOMIC_FETCH_SUB(int)
+DESUL_IMPL_HIP_ATOMIC_FETCH_SUB(long long)
+DESUL_IMPL_HIP_ATOMIC_FETCH_SUB(unsigned int)
+DESUL_IMPL_HIP_ATOMIC_FETCH_SUB(unsigned long long)
+DESUL_IMPL_HIP_ATOMIC_FETCH_SUB(float)
+DESUL_IMPL_HIP_ATOMIC_FETCH_SUB(double)
+
+#undef DESUL_IMPL_HIP_ATOMIC_FETCH_SUB
+
+#define DESUL_IMPL_HIP_ATOMIC_FETCH_INC(T) \
+ template <class MemoryOrder, class MemoryScope> \
+ __device__ inline T device_atomic_fetch_inc(T* ptr, MemoryOrder, MemoryScope) { \
+ return __hip_atomic_fetch_add(ptr, \
+ 1, \
+ HIPMemoryOrder<MemoryOrder>::value, \
+ HIPMemoryScope<MemoryScope>::value); \
+ } \
+ template <class MemoryOrder, class MemoryScope> \
+ __device__ inline T device_atomic_fetch_dec(T* ptr, MemoryOrder, MemoryScope) { \
+ return __hip_atomic_fetch_add(ptr, \
+ -1, \
+ HIPMemoryOrder<MemoryOrder>::value, \
+ HIPMemoryScope<MemoryScope>::value); \
+ }
+
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC(int)
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC(long long)
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC(unsigned int)
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC(unsigned long long)
+
+#undef DESUL_IMPL_HIP_ATOMIC_FETCH_INC
+
+#define DESUL_IMPL_HIP_ATOMIC_FETCH_INC_MOD(MEMORY_SCOPE, MEMORY_SCOPE_STRING_LITERAL) \
+ template <class MemoryOrder> \
+ __device__ inline unsigned int device_atomic_fetch_inc_mod( \
+ unsigned int* ptr, unsigned int val, MemoryOrder, MEMORY_SCOPE) { \
+ return __builtin_amdgcn_atomic_inc32( \
+ ptr, val, HIPMemoryOrder<MemoryOrder>::value, MEMORY_SCOPE_STRING_LITERAL); \
+ } \
+ template <class MemoryOrder> \
+ __device__ inline unsigned int device_atomic_fetch_dec_mod( \
+ unsigned int* ptr, unsigned int val, MemoryOrder, MEMORY_SCOPE) { \
+ return __builtin_amdgcn_atomic_dec32( \
+ ptr, val, HIPMemoryOrder<MemoryOrder>::value, MEMORY_SCOPE_STRING_LITERAL); \
+ }
+
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC_MOD(MemoryScopeCore, "workgroup")
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC_MOD(MemoryScopeDevice, "agent")
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC_MOD(MemoryScopeNode, "")
+DESUL_IMPL_HIP_ATOMIC_FETCH_INC_MOD(MemoryScopeSystem, "")
+
+#undef DESUL_IMPL_HIP_ATOMIC_FETCH_INC_MOD
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_FETCH_OP_OPENACC_HPP_
+#define DESUL_ATOMICS_FETCH_OP_OPENACC_HPP_
+
+#include <algorithm> // min, max
+#include <desul/atomics/Common.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+#ifdef __NVCOMPILER
+
+template <class T>
+inline constexpr bool is_openacc_integral_type_v =
+ std::is_same_v<T, int> || std::is_same_v<T, unsigned int> ||
+ std::is_same_v<T, unsigned long long>;
+
+template <class T>
+inline constexpr bool is_openacc_arithmetic_type_v = std::is_same_v<T, float> ||
+#ifndef DESUL_CUDA_ARCH_IS_PRE_PASCAL
+ std::is_same_v<T, double> ||
+#endif
+ is_openacc_integral_type_v<T>;
+
+#else
+
+template <class T>
+inline constexpr bool is_openacc_integral_type_v = std::is_integral_v<T>;
+
+template <class T>
+inline constexpr bool is_openacc_arithmetic_type_v = std::is_arithmetic_v<T>;
+
+#endif
+
+//<editor-fold
+// desc="device_atomic_fetch_{add,sub,mul,div,lshift,rshift,mod,max,min,and,or,xor}">
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_add(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr += val;
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_inc(
+ T* ptr, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr += T(1);
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_sub(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr -= val;
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_dec(
+ T* ptr, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr -= T(1);
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_mul(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr *= val;
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_div(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr /= val;
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_fetch_lshift(
+ T* ptr, const unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr = *ptr << val;
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_fetch_rshift(
+ T* ptr, const unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr = *ptr >> val;
+ }
+ return old;
+}
+
+#ifdef __NVCOMPILER
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_max(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+ old = atomicMax(ptr, val);
+ return old;
+}
+#endif
+
+#ifdef __NVCOMPILER
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_fetch_min(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ int old;
+ old = atomicMin(ptr, val);
+ return old;
+}
+#endif
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_fetch_and(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr &= val;
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_fetch_or(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr |= val;
+ }
+ return old;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_fetch_xor(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T old;
+#pragma acc atomic capture
+ {
+ old = *ptr;
+ *ptr ^= val;
+ }
+ return old;
+}
+//</editor-fold>
+
+//<editor-fold
+// desc="device_atomic_{add,sub,mul,div,lshift,rshift,mod,max,min,and,or,xor}_fetch">
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_add_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr += val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_inc_fetch(
+ T* ptr, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr += T(1);
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_sub_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr -= val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_dec_fetch(
+ T* ptr, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr -= T(1);
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_mul_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr *= val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_div_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr /= val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_lshift_fetch(
+ T* ptr, const unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr = *ptr << val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_rshift_fetch(
+ T* ptr, const unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr = *ptr >> val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#ifdef __NVCOMPILER
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_max_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+ tmp = atomicMax(ptr, val);
+ tmp = std::max(tmp, val);
+ return tmp;
+}
+#endif
+
+#ifdef __NVCOMPILER
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_min_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+ tmp = atomicMin(ptr, val);
+ tmp = std::min(tmp, val);
+ return tmp;
+}
+#endif
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_and_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr &= val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_or_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr |= val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_integral_type_v<T>, T> device_atomic_xor_fetch(
+ T* ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma acc atomic capture
+ {
+ *ptr ^= val;
+ tmp = *ptr;
+ }
+ return tmp;
+}
+//</editor-fold>
+
+//<editor-fold desc="device_atomic_{store,load}">
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, void> device_atomic_store(
+ T* const ptr, const T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+#pragma acc atomic write
+ *ptr = val;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, void> device_atomic_store(
+ T* const ptr, const T val, MemoryOrderRelease, MemoryScopeDevice) {
+ if (acc_on_device(acc_device_not_host)) {
+ printf(
+ "DESUL error in device_atomic_store(MemoryOrderRelease): Not supported atomic "
+ "operation in the OpenACC backend\n");
+ }
+#pragma acc atomic write
+ *ptr = val;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_load(
+ const T* const ptr, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T retval;
+#pragma acc atomic read
+ retval = *ptr;
+ return retval;
+}
+
+#pragma acc routine seq
+template <class T>
+std::enable_if_t<is_openacc_arithmetic_type_v<T>, T> device_atomic_load(
+ const T* const ptr, MemoryOrderAcquire, MemoryScopeDevice) {
+ if (acc_on_device(acc_device_not_host)) {
+ printf(
+ "DESUL error in device_atomic_load(MemoryOrderAcquire): Not supported atomic "
+ "operation in the OpenACC backend\n");
+ }
+ T retval;
+#pragma acc atomic read
+ retval = *ptr;
+ return retval;
+}
+//</editor-fold>
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_FETCH_OP_OPENMP_HPP_
+#define DESUL_ATOMICS_FETCH_OP_OPENMP_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/openmp/OpenMP_40.hpp>
+
+#if 0 // FIXME_OPENMP
+namespace desul {
+namespace Impl {
+
+// clang-format off
+//<editor-fold desc="atomic_fetch_{add,sub,and,or,xor}">
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_fetch_add(
+T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { tmp = *ptr; *ptr += val; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_fetch_sub(
+T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { tmp = *ptr; *ptr -= val; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_fetch_and(
+T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { tmp = *ptr; *ptr &= val; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_fetch_or(
+T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { tmp = *ptr; *ptr |= val; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_fetch_xor(
+T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { tmp = *ptr; *ptr ^= val; }
+ return tmp;
+}
+//</editor-fold>
+
+//<editor-fold desc="atomic_{add,sub,and,or,xor}_fetch">
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_add_fetch(
+ T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { *ptr += val; tmp = *ptr; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_sub_fetch(
+ T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { *ptr -= val; tmp = *ptr; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_and_fetch(
+ T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { *ptr &= val; tmp = *ptr; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_or_fetch(
+ T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { *ptr |= val; tmp = *ptr; }
+ return tmp;
+}
+
+template <class T>
+std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_xor_fetch(
+ T* ptr, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+ T tmp;
+#pragma omp atomic capture
+ { *ptr ^= val; tmp = *ptr; }
+ return tmp;
+}
+//</editor-fold>
+// clang-format on
+
+#define DESUL_IMPL_OPENMP_HOST_ATOMIC_FETCH_OP_ARITHMETIC(OP, MEMORY_ORDER) \
+ template <class T> \
+ std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_fetch_##OP( \
+ T* ptr, T val, MemoryOrderRelaxed, MemoryScopeCore) { \
+ return host_atomic_fetch_##OP( \
+ ptr, val, MemoryOrderRelaxed(), MemoryScopeDevice()); \
+ } \
+ template <class T> \
+ std::enable_if_t<std::is_arithmetic<T>::value, T> host_atomic_##OP##_fetch( \
+ T* ptr, T val, MemoryOrderRelaxed, MemoryScopeCore) { \
+ return host_atomic_##OP##_fetch( \
+ ptr, val, MemoryOrderRelaxed(), MemoryScopeDevice()); \
+ }
+
+} // namespace Impl
+} // namespace desul
+#endif
+
+#endif
SPDX-License-Identifier: (BSD-3-Clause)
*/
-#ifndef DESUL_ATOMICS_SYCL_HPP_
-#define DESUL_ATOMICS_SYCL_HPP_
-#ifdef DESUL_HAVE_SYCL_ATOMICS
+#ifndef DESUL_ATOMICS_FETCH_OP_SYCL_HPP_
+#define DESUL_ATOMICS_FETCH_OP_SYCL_HPP_
-// clang-format off
-#include "desul/atomics/SYCLConversions.hpp"
-#include "desul/atomics/Common.hpp"
-// clang-format on
+#include <desul/atomics/Adapt_SYCL.hpp>
+#include <desul/atomics/Common.hpp>
namespace desul {
+namespace Impl {
-#define DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, TYPE) \
- template <class MemoryOrder> \
- TYPE atomic_fetch_##OPER(TYPE* dest, TYPE val, MemoryOrder, MemoryScopeDevice) { \
- Impl::sycl_atomic_ref<TYPE, MemoryOrder, MemoryScopeDevice> dest_ref(*dest); \
- return dest_ref.fetch_##OPER(val); \
- } \
- template <class MemoryOrder> \
- TYPE atomic_fetch_##OPER(TYPE* dest, TYPE val, MemoryOrder, MemoryScopeCore) { \
- Impl::sycl_atomic_ref<TYPE, MemoryOrder, MemoryScopeCore> dest_ref(*dest); \
- return dest_ref.fetch_##OPER(val); \
+// clang-format off
+#define DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, TYPE) \
+ template <class MemoryOrder> \
+ TYPE device_atomic_fetch_##OPER(TYPE* dest, TYPE val, MemoryOrder, MemoryScopeDevice) { \
+ sycl_atomic_ref<TYPE, MemoryOrder, MemoryScopeDevice> dest_ref(*dest); \
+ return dest_ref.fetch_##OPER(val); \
+ } \
+ template <class MemoryOrder> \
+ TYPE device_atomic_fetch_##OPER(TYPE* dest, TYPE val, MemoryOrder, MemoryScopeCore ) { \
+ sycl_atomic_ref<TYPE, MemoryOrder, MemoryScopeCore> dest_ref(*dest); \
+ return dest_ref.fetch_##OPER(val); \
}
+// clang-format on
#define DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(OPER) \
DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, int) \
#undef DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL
#undef DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER
+} // namespace Impl
} // namespace desul
-#endif // DESUL_HAVE_SYCL_ATOMICS
-#endif // DESUL_ATOMICS_SYCL_HPP_
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_FETCH_OP_SCOPECALLER_HPP_
+#define DESUL_ATOMICS_FETCH_OP_SCOPECALLER_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Macros.hpp>
+
+namespace desul {
+namespace Impl {
+
+#define DESUL_IMPL_ATOMIC_FETCH_OPER(ANNOTATION, HOST_OR_DEVICE) \
+ template <class Oper, class T, class MemoryOrder> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_fetch_oper( \
+ const Oper& op, \
+ T* const dest, \
+ dont_deduce_this_parameter_t<const T> val, \
+ MemoryOrder /*order*/, \
+ MemoryScopeCaller /*scope*/) { \
+ T oldval = *dest; \
+ *dest = op.apply(oldval, val); \
+ return oldval; \
+ } \
+ \
+ template <class Oper, class T, class MemoryOrder> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_oper_fetch( \
+ const Oper& op, \
+ T* const dest, \
+ dont_deduce_this_parameter_t<const T> val, \
+ MemoryOrder /*order*/, \
+ MemoryScopeCaller /*scope*/) { \
+ T oldval = *dest; \
+ T newval = op.apply(oldval, val); \
+ *dest = newval; \
+ return newval; \
+ }
+
+DESUL_IMPL_ATOMIC_FETCH_OPER(DESUL_IMPL_HOST_FUNCTION, host)
+DESUL_IMPL_ATOMIC_FETCH_OPER(DESUL_IMPL_DEVICE_FUNCTION, device)
+
+#undef DESUL_IMPL_ATOMIC_FETCH_OPER
+
+} // namespace Impl
+} // namespace desul
+
+// FIXME consider implementing directly atomic_fetch_##OP and atomic_##OP##_fetch or
+// dropping this placeholder
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_GENERIC_HPP_
+#define DESUL_ATOMICS_GENERIC_HPP_
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Compare_Exchange.hpp>
+#include <desul/atomics/Fetch_Op.hpp>
+#include <desul/atomics/Lock_Array.hpp>
+#include <desul/atomics/Macros.hpp>
+#include <desul/atomics/Thread_Fence.hpp>
+#include <type_traits>
+
+namespace desul {
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_thread_fence(order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_thread_fence(order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_exchange(T* dest, T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_exchange(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_exchange(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_compare_exchange(T* dest, T cmp, T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(
+ return Impl::device_atomic_compare_exchange(dest, cmp, val, order, scope);)
+ DESUL_IF_ON_HOST(
+ return Impl::host_atomic_compare_exchange(dest, cmp, val, order, scope);)
+}
+
+// Fetch_Oper atomics: return value before operation
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_add(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_add(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_add(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_sub(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_sub(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_sub(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_max(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_max(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_max(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_min(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_min(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_min(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_mul(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_mul(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_mul(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_div(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_div(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_div(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_mod(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_mod(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_mod(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_and(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_and(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_and(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_or(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_or(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_or(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_xor(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_xor(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_xor(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_nand(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_nand(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_nand(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_lshift(T* const dest,
+ const unsigned int val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_lshift(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_lshift(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_rshift(T* const dest,
+ const unsigned int val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_rshift(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_rshift(dest, val, order, scope);)
+}
+
+// Oper Fetch atomics: return value after operation
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_add_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_add_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_add_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_sub_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_sub_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_sub_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_max_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_max_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_max_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_min_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_min_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_min_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_mul_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_mul_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_mul_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_div_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_div_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_div_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_mod_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_mod_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_mod_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_and_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_and_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_and_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_or_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_or_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_or_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_xor_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_xor_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_xor_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_nand_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_nand_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_nand_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_lshift_fetch(T* const dest,
+ const unsigned int val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_lshift_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_lshift_fetch(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_rshift_fetch(T* const dest,
+ const unsigned int val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_rshift_fetch(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_rshift_fetch(dest, val, order, scope);)
+}
+
+// Other atomics
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_load(const T* const dest,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_load(dest, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_load(dest, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_store(T* const dest,
+ const T val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_store(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_store(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_add(T* const dest,
+ const T val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_add(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_add(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_sub(T* const dest,
+ const T val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_sub(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_sub(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_mul(T* const dest,
+ const T val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_mul(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_mul(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_div(T* const dest,
+ const T val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_div(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_div(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_min(T* const dest,
+ const T val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_min(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_min(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_max(T* const dest,
+ const T val,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_max(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_max(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_inc_fetch(T* const dest,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_inc_fetch(dest, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_inc_fetch(dest, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_dec_fetch(T* const dest,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_dec_fetch(dest, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_dec_fetch(dest, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_inc(T* const dest,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_inc(dest, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_inc(dest, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_inc_mod(T* const dest, T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_inc_mod(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_inc_mod(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_dec(T* const dest,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_dec(dest, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_dec(dest, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_dec_mod(T* const dest, T val, MemoryOrder order, MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_fetch_dec_mod(dest, val, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_fetch_dec_mod(dest, val, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_inc(T* const dest,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_inc(dest, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_inc(dest, order, scope);)
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_dec(T* const dest,
+ MemoryOrder order,
+ MemoryScope scope) {
+ DESUL_IF_ON_DEVICE(return Impl::device_atomic_dec(dest, order, scope);)
+ DESUL_IF_ON_HOST(return Impl::host_atomic_dec(dest, order, scope);)
+}
+
+// FIXME
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T,
+ class SuccessMemoryOrder,
+ class FailureMemoryOrder,
+ class MemoryScope>
+DESUL_INLINE_FUNCTION bool atomic_compare_exchange_strong(
+ T* const dest,
+ T& expected,
+ T desired,
+ SuccessMemoryOrder success,
+ FailureMemoryOrder /*failure*/,
+ MemoryScope scope) {
+ T const old = atomic_compare_exchange(dest, expected, desired, success, scope);
+ if (old != expected) {
+ expected = old;
+ return false;
+ } else {
+ return true;
+ }
+}
+
+DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+template <class T,
+ class SuccessMemoryOrder,
+ class FailureMemoryOrder,
+ class MemoryScope>
+DESUL_INLINE_FUNCTION bool atomic_compare_exchange_weak(T* const dest,
+ T& expected,
+ T desired,
+ SuccessMemoryOrder success,
+ FailureMemoryOrder failure,
+ MemoryScope scope) {
+ return atomic_compare_exchange_strong(
+ dest, expected, desired, success, failure, scope);
+}
+
+} // namespace desul
+
+#endif
#ifndef DESUL_ATOMICS_LOCK_ARRAY_HPP_
#define DESUL_ATOMICS_LOCK_ARRAY_HPP_
-#include "desul/atomics/Compare_Exchange.hpp"
-#include "desul/atomics/Lock_Array_Cuda.hpp"
-#include "desul/atomics/Lock_Array_HIP.hpp"
-#include "desul/atomics/Macros.hpp"
+#include <desul/atomics/Compare_Exchange.hpp>
+#include <desul/atomics/Macros.hpp>
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+#include <desul/atomics/Lock_Array_CUDA.hpp>
+#endif
+#ifdef DESUL_HAVE_HIP_ATOMICS
+#include <desul/atomics/Lock_Array_HIP.hpp>
+#endif
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+#include <desul/atomics/Lock_Array_SYCL.hpp>
+#endif
namespace desul {
namespace Impl {
-struct host_locks__ {
+
+struct HostLocks {
static constexpr uint32_t HOST_SPACE_ATOMIC_MASK = 0xFFFF;
static constexpr uint32_t HOST_SPACE_ATOMIC_XOR_MASK = 0x5A39;
- template <typename is_always_void = void>
+ template <class is_always_void = void>
static int32_t* get_host_locks_() {
- static int32_t HOST_SPACE_ATOMIC_LOCKS_DEVICE[HOST_SPACE_ATOMIC_MASK + 1] = {0};
+ static int32_t HOST_SPACE_ATOMIC_LOCKS_DEVICE[HOST_SPACE_ATOMIC_MASK + 1] = {};
return HOST_SPACE_ATOMIC_LOCKS_DEVICE;
}
static inline int32_t* get_host_lock_(void* ptr) {
inline void init_lock_arrays() {
static bool is_initialized = false;
if (!is_initialized) {
- host_locks__::get_host_locks_();
+ HostLocks::get_host_locks_();
is_initialized = true;
}
finalize_lock_arrays_hip();
#endif
}
-template <typename MemoryScope>
-inline bool lock_address(void* ptr, MemoryScope ms) {
- return 0 ==
- atomic_exchange(
- host_locks__::get_host_lock_(ptr), int32_t(1), MemoryOrderSeqCst(), ms);
+
+inline void ensure_lock_arrays_on_device() {
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+ ensure_cuda_lock_arrays_on_device();
+#endif
+
+#ifdef DESUL_HAVE_HIP_ATOMICS
+ ensure_hip_lock_arrays_on_device();
+#endif
+}
+
+template <class MemoryScope>
+bool lock_address(void* ptr, MemoryScope ms) {
+ return 0 == atomic_exchange(
+ HostLocks::get_host_lock_(ptr), int32_t(1), MemoryOrderSeqCst(), ms);
}
-template <typename MemoryScope>
+
+template <class MemoryScope>
void unlock_address(void* ptr, MemoryScope ms) {
(void)atomic_exchange(
- host_locks__::get_host_lock_(ptr), int32_t(0), MemoryOrderSeqCst(), ms);
+ HostLocks::get_host_lock_(ptr), int32_t(0), MemoryOrderSeqCst(), ms);
}
+
} // namespace Impl
} // namespace desul
#ifndef DESUL_ATOMICS_LOCK_ARRAY_CUDA_HPP_
#define DESUL_ATOMICS_LOCK_ARRAY_CUDA_HPP_
+#include <cstdint>
+
#include "desul/atomics/Common.hpp"
#include "desul/atomics/Macros.hpp"
-#ifdef DESUL_HAVE_CUDA_ATOMICS
-
-#include <cstdint>
-
namespace desul {
namespace Impl {
-#ifdef __CUDA_ARCH__
-#define DESUL_IMPL_BALLOT_MASK(m, x) __ballot_sync(m, x)
-#define DESUL_IMPL_ACTIVEMASK __activemask()
-#else
-#define DESUL_IMPL_BALLOT_MASK(m, x) m == 0 ? 0 : 1
-#define DESUL_IMPL_ACTIVEMASK 0
-#endif
-
/// \brief This global variable in Host space is the central definition
/// of these arrays.
extern int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h;
template <typename /*AlwaysInt*/ = int>
void finalize_lock_arrays_cuda();
-} // namespace Impl
-} // namespace desul
-
-#if defined(__CUDACC__)
-
-namespace desul {
-namespace Impl {
-
/// \brief This global variable in CUDA space is what kernels use
/// to get access to the lock arrays.
///
/// variable based on the Host global variable prior to running any kernels
/// that will use it.
/// That is the purpose of the ensure_cuda_lock_arrays_on_device function.
-__device__
-#ifdef __CUDACC_RDC__
- __constant__ extern
+#ifdef DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION
+extern
#endif
- int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE;
+ __device__ __constant__ int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE;
-__device__
-#ifdef __CUDACC_RDC__
- __constant__ extern
+#ifdef DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION
+extern
#endif
- int32_t* CUDA_SPACE_ATOMIC_LOCKS_NODE;
+ __device__ __constant__ int32_t* CUDA_SPACE_ATOMIC_LOCKS_NODE;
#define CUDA_SPACE_ATOMIC_MASK 0x1FFFF
atomicExch(&desul::Impl::CUDA_SPACE_ATOMIC_LOCKS_NODE[offset], 0);
}
-} // namespace Impl
-} // namespace desul
-
-// Make lock_array_copied an explicit translation unit scope thingy
-namespace desul {
-namespace Impl {
-namespace {
-static int lock_array_copied = 0;
-inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
-} // namespace
-
-#ifdef __CUDACC_RDC__
+#ifdef DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION
inline
#else
-static
+inline static
#endif
void
copy_cuda_lock_arrays_to_device() {
- if (lock_array_copied == 0) {
+ static bool once = []() {
cudaMemcpyToSymbol(CUDA_SPACE_ATOMIC_LOCKS_DEVICE,
&CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h,
sizeof(int32_t*));
cudaMemcpyToSymbol(CUDA_SPACE_ATOMIC_LOCKS_NODE,
&CUDA_SPACE_ATOMIC_LOCKS_NODE_h,
sizeof(int32_t*));
- }
- lock_array_copied = 1;
+ return true;
+ }();
+ (void)once;
}
} // namespace Impl
} // namespace desul
-#endif /* defined( __CUDACC__ ) */
-
-#endif /* defined( DESUL_HAVE_CUDA_ATOMICS ) */
-
namespace desul {
-#if defined(__CUDACC_RDC__) || (!defined(__CUDACC__))
+#ifdef DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION
inline void ensure_cuda_lock_arrays_on_device() {}
#else
static inline void ensure_cuda_lock_arrays_on_device() {
#ifndef DESUL_ATOMICS_LOCK_ARRAY_HIP_HPP_
#define DESUL_ATOMICS_LOCK_ARRAY_HIP_HPP_
-#include "desul/atomics/Common.hpp"
-#include "desul/atomics/Macros.hpp"
-
-#ifdef DESUL_HAVE_HIP_ATOMICS
-
#include <hip/hip_runtime.h>
#include <cstdint>
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Macros.hpp"
+
namespace desul {
namespace Impl {
-#ifdef __HIP_DEVICE_COMPILE__
-#define DESUL_IMPL_BALLOT_MASK(x) __ballot(x)
-#else
-#define DESUL_IMPL_BALLOT_MASK(x) 0
-#endif
-
/**
* \brief This global variable in Host space is the central definition of these
* arrays.
template <typename /*AlwaysInt*/ = int>
void init_lock_arrays_hip();
-/// \brief After this call, the g_host_cuda_lock_arrays variable has
+/// \brief After this call, the g_host_hip_lock_arrays variable has
/// all null pointers, and all array memory has been freed.
///
/// This call is idempotent.
/// snapshotted version while also linking against pure Desul
template <typename /*AlwaysInt*/ = int>
void finalize_lock_arrays_hip();
-} // namespace Impl
-} // namespace desul
-
-#ifdef __HIPCC__
-namespace desul {
-namespace Impl {
/**
* \brief This global variable in HIP space is what kernels use to get access
* be created in every translation unit that sees this header file (we make this
* clear by marking it static, meaning no other translation unit can link to
* it). Since the Kokkos_HIP_Locks.cpp translation unit cannot initialize the
- * instances in other translation units, we must update this CUDA global
+ * instances in other translation units, we must update this HIP global
* variable based on the Host global variable prior to running any kernels that
* will use it. That is the purpose of the
- * KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE macro.
+ * ensure_hip_lock_arrays_on_device function.
*/
-__device__
-#ifdef DESUL_HIP_RDC
- __constant__ extern
+#ifdef DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION
+extern
#endif
- int32_t* HIP_SPACE_ATOMIC_LOCKS_DEVICE;
+ __device__ __constant__ int32_t* HIP_SPACE_ATOMIC_LOCKS_DEVICE;
-__device__
-#ifdef DESUL_HIP_RDC
- __constant__ extern
+#ifdef DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION
+extern
#endif
- int32_t* HIP_SPACE_ATOMIC_LOCKS_NODE;
+ __device__ __constant__ int32_t* HIP_SPACE_ATOMIC_LOCKS_NODE;
#define HIP_SPACE_ATOMIC_MASK 0x1FFFF
offset = offset & HIP_SPACE_ATOMIC_MASK;
atomicExch(&desul::Impl::HIP_SPACE_ATOMIC_LOCKS_NODE[offset], 0);
}
-#endif
-} // namespace Impl
-} // namespace desul
-
-// Make lock_array_copied an explicit translation unit scope thing
-namespace desul {
-namespace Impl {
-namespace {
-static int lock_array_copied = 0;
-inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
-} // namespace
-} // namespace Impl
-} // namespace desul
-
-/* It is critical that this code be a macro, so that it will
- capture the right address for g_device_hip_lock_arrays!
- putting this in an inline function will NOT do the right thing! */
-#define DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE() \
- { \
- if (::desul::Impl::lock_array_copied == 0) { \
- (void)hipMemcpyToSymbol( \
- HIP_SYMBOL(::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_DEVICE), \
- &::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_DEVICE_h, \
- sizeof(int32_t*)); \
- (void)hipMemcpyToSymbol(HIP_SYMBOL(::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_NODE), \
- &::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_NODE_h, \
- sizeof(int32_t*)); \
- } \
- ::desul::Impl::lock_array_copied = 1; \
- }
+#ifdef DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION
+inline
+#else
+inline static
#endif
+ void
+ copy_hip_lock_arrays_to_device() {
+ static bool once = []() {
+ (void)hipMemcpyToSymbol(HIP_SYMBOL(HIP_SPACE_ATOMIC_LOCKS_DEVICE),
+ &HIP_SPACE_ATOMIC_LOCKS_DEVICE_h,
+ sizeof(int32_t*));
+ (void)hipMemcpyToSymbol(HIP_SYMBOL(HIP_SPACE_ATOMIC_LOCKS_NODE),
+ &HIP_SPACE_ATOMIC_LOCKS_NODE_h,
+ sizeof(int32_t*));
+ return true;
+ }();
+ (void)once;
+}
+} // namespace Impl
-#if defined(DESUL_HIP_RDC) || (!defined(__HIPCC__))
-#define DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
+#ifdef DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION
+inline void ensure_hip_lock_arrays_on_device() {}
#else
-#define DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE() \
- DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE()
+static inline void ensure_hip_lock_arrays_on_device() {
+ Impl::copy_hip_lock_arrays_to_device();
+}
#endif
+} // namespace desul
+
#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_ARRAY_SYCL_HPP_
+#define DESUL_ATOMICS_LOCK_ARRAY_SYCL_HPP_
+
+#include <cstdint>
+
+#include "desul/atomics/Adapt_SYCL.hpp"
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Macros.hpp"
+
+// FIXME_SYCL
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
+#include <CL/sycl.hpp>
+#endif
+
+namespace desul {
+namespace Impl {
+
+// FIXME_SYCL Use SYCL_EXT_ONEAPI_DEVICE_GLOBAL when available instead
+#ifdef DESUL_SYCL_DEVICE_GLOBAL_SUPPORTED
+
+/**
+ * \brief This global variable in Host space is the central definition of these
+ * arrays.
+ */
+extern int32_t* SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h;
+extern int32_t* SYCL_SPACE_ATOMIC_LOCKS_NODE_h;
+
+/// \brief After this call, the lock arrays used in [un]lock_address_sycl
+/// are initialized and ready to be used.
+///
+/// This call is idempotent.
+/// The function is templated to make it a weak symbol to deal with Kokkos/RAJA
+/// snapshotted version while also linking against pure Desul
+template <typename /*AlwaysInt*/ = int>
+void init_lock_arrays_sycl(sycl::queue q);
+
+/// \brief After this call, the lock arrays used in [un]lock_address_sycl
+/// are freed and can't be used anymore.
+///
+/// This call is idempotent.
+/// The function is templated to make it a weak symbol to deal with Kokkos/RAJA
+/// snapshotted version while also linking against pure Desul
+template <typename /*AlwaysInt*/ = int>
+void finalize_lock_arrays_sycl(sycl::queue q);
+
+/**
+ * \brief This global variable in SYCL space is what kernels use to get access
+ * to the lock arrays.
+ *
+ * When relocatable device code is enabled, there is only one single instance of this
+ * global variable for the entire executable, whose definition will be in
+ * Kokkos_SYCL_Locks.cpp (and whose declaration here must then be extern). This one
+ * instance will be initialized by initialize_host_sycl_lock_arrays and need not be
+ * modified afterwards.
+ *
+ * When relocatable device code is disabled, an instance of this variable will be
+ * created in every translation unit that sees this header file (we make this clear by
+ * marking it static, meaning no other translation unit can link to it). Since the
+ * Kokkos_SYCL_Locks.cpp translation unit cannot initialize the instances in other
+ * translation units, we must update this SYCL global variable based on the Host global
+ * variable prior to running any kernels that will use it. That is the purpose of the
+ * ensure_sycl_lock_arrays_on_device function.
+ */
+#ifdef DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+SYCL_EXTERNAL extern
+#else
+static
+#endif
+ sycl_device_global<int32_t*>
+ SYCL_SPACE_ATOMIC_LOCKS_DEVICE;
+
+#ifdef DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+SYCL_EXTERNAL extern
+#else
+static
+#endif
+ sycl_device_global<int32_t*>
+ SYCL_SPACE_ATOMIC_LOCKS_NODE;
+
+#define SYCL_SPACE_ATOMIC_MASK 0x1FFFF
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+inline bool lock_address_sycl(void* ptr, MemoryScopeDevice) {
+ size_t offset = size_t(ptr);
+ offset = offset >> 2;
+ offset = offset & SYCL_SPACE_ATOMIC_MASK;
+ sycl::atomic_ref<int32_t,
+ sycl::memory_order::relaxed,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ lock_device_ref(SYCL_SPACE_ATOMIC_LOCKS_DEVICE[offset]);
+ return (0 == lock_device_ref.exchange(1));
+}
+
+inline bool lock_address_sycl(void* ptr, MemoryScopeNode) {
+ size_t offset = size_t(ptr);
+ offset = offset >> 2;
+ offset = offset & SYCL_SPACE_ATOMIC_MASK;
+ sycl::atomic_ref<int32_t,
+ sycl::memory_order::relaxed,
+ sycl::memory_scope::system,
+ sycl::access::address_space::global_space>
+ lock_node_ref(SYCL_SPACE_ATOMIC_LOCKS_NODE[offset]);
+ return (0 == lock_node_ref.exchange(1));
+}
+
+/**
+ * \brief Release lock for the address
+ *
+ * This function releases the lock for the hash value derived from the provided
+ * ptr. This function should only be called after previously successfully
+ * acquiring a lock with lock_address.
+ */
+inline void unlock_address_sycl(void* ptr, MemoryScopeDevice) {
+ size_t offset = size_t(ptr);
+ offset = offset >> 2;
+ offset = offset & SYCL_SPACE_ATOMIC_MASK;
+ sycl::atomic_ref<int32_t,
+ sycl::memory_order::relaxed,
+ sycl::memory_scope::device,
+ sycl::access::address_space::global_space>
+ lock_device_ref(SYCL_SPACE_ATOMIC_LOCKS_DEVICE[offset]);
+ lock_device_ref.exchange(0);
+}
+
+inline void unlock_address_sycl(void* ptr, MemoryScopeNode) {
+ size_t offset = size_t(ptr);
+ offset = offset >> 2;
+ offset = offset & SYCL_SPACE_ATOMIC_MASK;
+ sycl::atomic_ref<int32_t,
+ sycl::memory_order::relaxed,
+ sycl::memory_scope::system,
+ sycl::access::address_space::global_space>
+ lock_node_ref(SYCL_SPACE_ATOMIC_LOCKS_NODE[offset]);
+ lock_node_ref.exchange(0);
+}
+
+#ifdef DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+inline
+#else
+inline static
+#endif
+ void
+ copy_sycl_lock_arrays_to_device(sycl::queue q) {
+ static bool once = [&q]() {
+#ifdef SYCL_EXT_ONEAPI_DEVICE_GLOBAL
+ q.memcpy(SYCL_SPACE_ATOMIC_LOCKS_DEVICE,
+ &SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h,
+ sizeof(int32_t*));
+ q.memcpy(SYCL_SPACE_ATOMIC_LOCKS_NODE,
+ &SYCL_SPACE_ATOMIC_LOCKS_NODE_h,
+ sizeof(int32_t*));
+#else
+ auto device_ptr = SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h;
+ auto node_ptr = SYCL_SPACE_ATOMIC_LOCKS_NODE_h;
+ q.single_task([=] {
+ SYCL_SPACE_ATOMIC_LOCKS_DEVICE.get() = device_ptr;
+ SYCL_SPACE_ATOMIC_LOCKS_NODE.get() = node_ptr;
+ });
+#endif
+ return true;
+ }();
+ (void)once;
+}
+
+#else // not supported
+
+template <typename /*AlwaysInt*/ = int>
+void init_lock_arrays_sycl(sycl::queue) {
+ assert(false);
+}
+
+template <typename /*AlwaysInt*/ = int>
+void finalize_lock_arrays_sycl(sycl::queue) {
+ assert(false);
+}
+
+inline bool lock_address_sycl(void*, MemoryScopeDevice) {
+ assert(false);
+ // return true so that the CAS loops don't hang.
+ return true;
+}
+
+inline bool lock_address_sycl(void*, MemoryScopeNode) {
+ assert(false);
+ // return true so that the CAS loops don't hang.
+ return true;
+}
+
+inline void unlock_address_sycl(void*, MemoryScopeDevice) { assert(false); }
+
+inline void unlock_address_sycl(void*, MemoryScopeNode) { assert(false); }
+
+#ifdef DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+inline
+#else
+inline static
+#endif
+ void
+ copy_sycl_lock_arrays_to_device(sycl::queue) {
+}
+
+#endif
+} // namespace Impl
+
+#ifdef DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+inline void ensure_sycl_lock_arrays_on_device(sycl::queue) {}
+#else
+static inline void ensure_sycl_lock_arrays_on_device(sycl::queue q) {
+ Impl::copy_sycl_lock_arrays_to_device(q);
+}
+#endif
+
+} // namespace desul
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_BASED_FETCH_OP_HPP_
+#define DESUL_ATOMICS_LOCK_BASED_FETCH_OP_HPP_
+
+#include <desul/atomics/Macros.hpp>
+
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+#include <desul/atomics/Lock_Based_Fetch_Op_CUDA.hpp>
+#endif
+#ifdef DESUL_HAVE_HIP_ATOMICS
+#include <desul/atomics/Lock_Based_Fetch_Op_HIP.hpp>
+#endif
+#ifdef DESUL_HAVE_OPENACC_ATOMICS
+#include <desul/atomics/Lock_Based_Fetch_Op_OpenACC.hpp>
+#endif
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+#include <desul/atomics/Lock_Based_Fetch_Op_SYCL.hpp>
+#endif
+
+#include <desul/atomics/Lock_Based_Fetch_Op_Host.hpp>
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_BASED_FETCH_OP_CUDA_HPP_
+#define DESUL_ATOMICS_LOCK_BASED_FETCH_OP_CUDA_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array_CUDA.hpp>
+#include <desul/atomics/Thread_Fence_CUDA.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+__device__ T device_atomic_fetch_oper(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned int mask = __activemask();
+ unsigned int active = __ballot_sync(mask, 1);
+ unsigned int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_cuda((void*)dest, scope)) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ *dest = op.apply(return_val, val);
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_cuda((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot_sync(mask, done);
+ }
+ return return_val;
+}
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+__device__ T device_atomic_oper_fetch(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned int mask = __activemask();
+ unsigned int active = __ballot_sync(mask, 1);
+ unsigned int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_cuda((void*)dest, scope)) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = op.apply(*dest, val);
+ *dest = return_val;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_cuda((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot_sync(mask, done);
+ }
+ return return_val;
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_BASED_FETCH_OP_HIP_HPP_
+#define DESUL_ATOMICS_LOCK_BASED_FETCH_OP_HIP_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array_HIP.hpp>
+#include <desul/atomics/Thread_Fence_HIP.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+__device__ T device_atomic_fetch_oper(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned long long int active = __ballot(1);
+ unsigned long long int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_hip((void*)dest, scope)) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ *dest = op.apply(return_val, val);
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_hip((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot(done);
+ }
+ return return_val;
+}
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+__device__ T device_atomic_oper_fetch(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // This is a way to avoid deadlock in a warp or wave front
+ T return_val;
+ int done = 0;
+ unsigned long long int active = __ballot(1);
+ unsigned long long int done_active = 0;
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_hip((void*)dest, scope)) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = op.apply(*dest, val);
+ *dest = return_val;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_hip((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = __ballot(done);
+ }
+ return return_val;
+}
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_BASED_FETCH_OP_HOST_HPP_
+#define DESUL_ATOMICS_LOCK_BASED_FETCH_OP_HOST_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array.hpp>
+#include <desul/atomics/Thread_Fence.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+inline T host_atomic_fetch_oper(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // Acquire a lock for the address
+ while (!lock_address((void*)dest, scope)) {
+ }
+
+ host_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ *dest = op.apply(return_val, val);
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+inline T host_atomic_oper_fetch(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // Acquire a lock for the address
+ while (!lock_address((void*)dest, scope)) {
+ }
+
+ host_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = op.apply(*dest, val);
+ *dest = return_val;
+ host_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_BASED_FETCH_OP_OPENACC_HPP_
+#define DESUL_ATOMICS_LOCK_BASED_FETCH_OP_OPENACC_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array.hpp>
+#include <desul/atomics/Thread_Fence.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+inline T device_atomic_fetch_oper(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ if (acc_on_device(acc_device_not_host)) {
+ printf(
+ "DESUL error in device_atomic_fetch_oper(): Not supported atomic operation in "
+ "the OpenACC backend\n");
+ }
+ // Acquire a lock for the address
+ while (!lock_address((void*)dest, scope)) {
+ }
+
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = *dest;
+ *dest = op.apply(return_val, val);
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+inline T device_atomic_oper_fetch(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ if (acc_on_device(acc_device_not_host)) {
+ printf(
+ "DESUL error in device_atomic_oper_fetch(): Not supported atomic operation in "
+ "the OpenACC backend\n");
+ }
+ // Acquire a lock for the address
+ while (!lock_address((void*)dest, scope)) {
+ }
+
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ T return_val = op.apply(*dest, val);
+ *dest = return_val;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address((void*)dest, scope);
+ return return_val;
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_BASED_FETCH_OP_SYCL_HPP_
+#define DESUL_ATOMICS_LOCK_BASED_FETCH_OP_SYCL_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Lock_Array_SYCL.hpp>
+#include <desul/atomics/Thread_Fence_SYCL.hpp>
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+T device_atomic_fetch_oper(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // This is a way to avoid deadlock in a subgroup
+ T return_val;
+ int done = 0;
+#if defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20250000
+ auto sg = sycl::ext::oneapi::this_work_item::get_sub_group();
+#else
+ auto sg = sycl::ext::oneapi::experimental::this_sub_group();
+#endif
+ using sycl::ext::oneapi::group_ballot;
+ using sycl::ext::oneapi::sub_group_mask;
+ sub_group_mask active = group_ballot(sg, 1);
+ sub_group_mask done_active = group_ballot(sg, 0);
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_sycl((void*)dest, scope)) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = *dest;
+ *dest = op.apply(return_val, val);
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_sycl((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = group_ballot(sg, done);
+ }
+ return return_val;
+}
+
+template <class Oper,
+ class T,
+ class MemoryOrder,
+ class MemoryScope,
+ // equivalent to:
+ // requires !atomic_always_lock_free(sizeof(T))
+ std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+T device_atomic_oper_fetch(const Oper& op,
+ T* const dest,
+ dont_deduce_this_parameter_t<const T> val,
+ MemoryOrder /*order*/,
+ MemoryScope scope) {
+ // This is a way to avoid deadlock in a subgroup
+ T return_val;
+ int done = 0;
+#if defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20250000
+ auto sg = sycl::ext::oneapi::this_work_item::get_sub_group();
+#else
+ auto sg = sycl::ext::oneapi::experimental::this_sub_group();
+#endif
+ using sycl::ext::oneapi::group_ballot;
+ using sycl::ext::oneapi::sub_group_mask;
+ sub_group_mask active = group_ballot(sg, 1);
+ sub_group_mask done_active = group_ballot(sg, 0);
+ while (active != done_active) {
+ if (!done) {
+ if (lock_address_sycl((void*)dest, scope)) {
+ device_atomic_thread_fence(MemoryOrderAcquire(), scope);
+ return_val = op.apply(*dest, val);
+ *dest = return_val;
+ device_atomic_thread_fence(MemoryOrderRelease(), scope);
+ unlock_address_sycl((void*)dest, scope);
+ done = 1;
+ }
+ }
+ done_active = group_ballot(sg, done);
+ }
+ return return_val;
+}
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_FREE_FETCH_OP_HPP_
+#define DESUL_ATOMICS_LOCK_FREE_FETCH_OP_HPP_
+
+#include <desul/atomics/Common.hpp>
+#include <desul/atomics/Compare_Exchange.hpp>
+#include <type_traits>
+
+#if defined(__GNUC__) && (!defined(__clang__))
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#endif
+
+namespace desul {
+namespace Impl {
+
+#define DESUL_IMPL_ATOMIC_FETCH_OPER(ANNOTATION, HOST_OR_DEVICE) \
+ template <class Oper, \
+ class T, \
+ class MemoryOrder, \
+ class MemoryScope, \
+ std::enable_if_t<atomic_always_lock_free(sizeof(T)), int> = 0> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_fetch_oper( \
+ const Oper& op, \
+ T* const dest, \
+ dont_deduce_this_parameter_t<const T> val, \
+ MemoryOrder order, \
+ MemoryScope scope) { \
+ using cas_t = atomic_compare_exchange_t<T>; \
+ cas_t oldval = reinterpret_cast<cas_t&>(*dest); \
+ cas_t assume = oldval; \
+ \
+ do { \
+ if (check_early_exit(op, reinterpret_cast<T&>(oldval), val)) \
+ return reinterpret_cast<T&>(oldval); \
+ assume = oldval; \
+ T newval = op.apply(reinterpret_cast<T&>(assume), val); \
+ oldval = \
+ HOST_OR_DEVICE##_atomic_compare_exchange(reinterpret_cast<cas_t*>(dest), \
+ assume, \
+ reinterpret_cast<cas_t&>(newval), \
+ order, \
+ scope); \
+ } while (assume != oldval); \
+ \
+ return reinterpret_cast<T&>(oldval); \
+ } \
+ \
+ template <class Oper, \
+ class T, \
+ class MemoryOrder, \
+ class MemoryScope, \
+ std::enable_if_t<atomic_always_lock_free(sizeof(T)), int> = 0> \
+ ANNOTATION T HOST_OR_DEVICE##_atomic_oper_fetch( \
+ const Oper& op, \
+ T* const dest, \
+ dont_deduce_this_parameter_t<const T> val, \
+ MemoryOrder order, \
+ MemoryScope scope) { \
+ using cas_t = atomic_compare_exchange_t<T>; \
+ cas_t oldval = reinterpret_cast<cas_t&>(*dest); \
+ T newval = val; \
+ cas_t assume = oldval; \
+ do { \
+ if (check_early_exit(op, reinterpret_cast<T&>(oldval), val)) \
+ return reinterpret_cast<T&>(oldval); \
+ assume = oldval; \
+ newval = op.apply(reinterpret_cast<T&>(assume), val); \
+ oldval = \
+ HOST_OR_DEVICE##_atomic_compare_exchange(reinterpret_cast<cas_t*>(dest), \
+ assume, \
+ reinterpret_cast<cas_t&>(newval), \
+ order, \
+ scope); \
+ } while (assume != oldval); \
+ \
+ return newval; \
+ }
+
+DESUL_IMPL_ATOMIC_FETCH_OPER(DESUL_IMPL_HOST_FUNCTION, host)
+DESUL_IMPL_ATOMIC_FETCH_OPER(DESUL_IMPL_DEVICE_FUNCTION, device)
+
+#undef DESUL_IMPL_ATOMIC_FETCH_OPER
+
+} // namespace Impl
+} // namespace desul
+
+#if defined(__GNUC__) && (!defined(__clang__))
+#pragma GCC diagnostic pop
+#endif
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_MACROS_HPP_
+#define DESUL_ATOMICS_MACROS_HPP_
+
+#include <desul/atomics/Config.hpp>
+
+// Intercept incompatible relocatable device code mode which leads to ODR violations
+#ifdef DESUL_ATOMICS_ENABLE_CUDA
+#if (defined(__clang__) && defined(__CUDA__) && defined(__CLANG_RDC__)) || \
+ defined(__CUDACC_RDC__)
+#define DESUL_IMPL_CUDA_RDC
+#endif
+
+#if (defined(DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION) && \
+ !defined(DESUL_IMPL_CUDA_RDC)) || \
+ (!defined(DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION) && \
+ defined(DESUL_IMPL_CUDA_RDC))
+#error Relocatable device code mode incompatible with desul atomics configuration
+#endif
+
+#ifdef DESUL_IMPL_CUDA_RDC
+#undef DESUL_IMPL_CUDA_RDC
+#endif
+#endif
+
+#ifdef DESUL_ATOMICS_ENABLE_HIP
+#if (defined(DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION) && \
+ !defined(__CLANG_RDC__)) || \
+ (!defined(DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION) && \
+ defined(__CLANG_RDC__))
+#error Relocatable device code mode incompatible with desul atomics configuration
+#endif
+#endif
+
+// Macros
+
+#if defined(DESUL_ATOMICS_ENABLE_CUDA) && defined(__CUDACC__)
+#define DESUL_HAVE_CUDA_ATOMICS
+#endif
+
+#if defined(DESUL_ATOMICS_ENABLE_HIP) && defined(__HIPCC__)
+#define DESUL_HAVE_HIP_ATOMICS
+#endif
+
+#if defined(DESUL_ATOMICS_ENABLE_SYCL) && defined(SYCL_LANGUAGE_VERSION)
+#define DESUL_HAVE_SYCL_ATOMICS
+#endif
+
+#if defined(DESUL_ATOMICS_ENABLE_OPENMP)
+#define DESUL_HAVE_OPENMP_ATOMICS
+#endif
+
+#if defined(DESUL_ATOMICS_ENABLE_OPENACC)
+#define DESUL_HAVE_OPENACC_ATOMICS
+#endif
+
+// ONLY use GNUC atomics if not explicitly say to use OpenMP atomics
+#if !defined(DESUL_HAVE_OPENMP_ATOMICS) && defined(__GNUC__)
+#define DESUL_HAVE_GCC_ATOMICS
+#endif
+
+// Equivalent to above for MSVC atomics
+#if !defined(DESUL_HAVE_OPENMP_ATOMICS) && defined(_MSC_VER)
+#define DESUL_HAVE_MSVC_ATOMICS
+#endif
+
+#if defined(DESUL_HAVE_CUDA_ATOMICS) || defined(DESUL_HAVE_HIP_ATOMICS)
+#define DESUL_FORCEINLINE_FUNCTION inline __host__ __device__
+#define DESUL_INLINE_FUNCTION inline __host__ __device__
+#define DESUL_FUNCTION __host__ __device__
+#define DESUL_IMPL_HOST_FUNCTION __host__
+#define DESUL_IMPL_DEVICE_FUNCTION __device__
+#else
+#define DESUL_FORCEINLINE_FUNCTION inline
+#define DESUL_INLINE_FUNCTION inline
+#define DESUL_FUNCTION
+#define DESUL_IMPL_HOST_FUNCTION
+#define DESUL_IMPL_DEVICE_FUNCTION
+#endif
+
+#define DESUL_IMPL_STRIP_PARENS(X) DESUL_IMPL_ESC(DESUL_IMPL_ISH X)
+#define DESUL_IMPL_ISH(...) DESUL_IMPL_ISH __VA_ARGS__
+#define DESUL_IMPL_ESC(...) DESUL_IMPL_ESC_(__VA_ARGS__)
+#define DESUL_IMPL_ESC_(...) DESUL_IMPL_VAN_##__VA_ARGS__
+#define DESUL_IMPL_VAN_DESUL_IMPL_ISH
+
+#if (defined(DESUL_ATOMICS_ENABLE_CUDA) && defined(__CUDACC__)) && defined(__NVCOMPILER)
+#include <nv/target>
+#define DESUL_IF_ON_DEVICE(CODE) NV_IF_TARGET(NV_IS_DEVICE, CODE)
+#define DESUL_IF_ON_HOST(CODE) NV_IF_TARGET(NV_IS_HOST, CODE)
+#endif
+
+// FIXME OpenMP Offload differentiate between device and host, but do we need this?
+#if defined(DESUL_HAVE_OPENMP_ATOMICS)
+#if 0
+// Base function.
+static constexpr bool desul_impl_omp_on_host() { return true; }
+
+#pragma omp begin declare variant match(device = {kind(host)})
+static constexpr bool desul_impl_omp_on_host() { return true; }
+#pragma omp end declare variant
+
+#pragma omp begin declare variant match(device = {kind(nohost)})
+static constexpr bool desul_impl_omp_on_host() { return false; }
+#pragma omp end declare variant
+
+#define DESUL_IF_ON_DEVICE(CODE) \
+ if constexpr (!desul_impl_omp_on_host()) { \
+ DESUL_IMPL_STRIP_PARENS(CODE) \
+ }
+#define DESUL_IF_ON_HOST(CODE) \
+ if constexpr (desul_impl_omp_on_host()) { \
+ DESUL_IMPL_STRIP_PARENS(CODE) \
+ }
+#else
+#define DESUL_IF_ON_DEVICE(CODE) \
+ {}
+#define DESUL_IF_ON_HOST(CODE) \
+ { DESUL_IMPL_STRIP_PARENS(CODE) }
+#endif
+#endif
+
+#if defined(DESUL_HAVE_OPENACC_ATOMICS)
+#include <openacc.h>
+#ifdef __NVCOMPILER
+// FIXME_OPENACC We cannot determine in a constant expresion whether we are on host or
+// on device with NVHPC. We use the device implementation on both sides.
+#define DESUL_IF_ON_DEVICE(CODE) \
+ { DESUL_IMPL_STRIP_PARENS(CODE) }
+#define DESUL_IF_ON_HOST(CODE) \
+ {}
+#else
+#define DESUL_IF_ON_DEVICE(CODE) \
+ if constexpr (acc_on_device(acc_device_not_host)) { \
+ DESUL_IMPL_STRIP_PARENS(CODE) \
+ }
+#define DESUL_IF_ON_HOST(CODE) \
+ if constexpr (acc_on_device(acc_device_host)) { \
+ DESUL_IMPL_STRIP_PARENS(CODE) \
+ }
+#endif
+#define DESUL_IMPL_ACC_ROUTINE_DIRECTIVE _Pragma("acc routine seq")
+#else
+#define DESUL_IMPL_ACC_ROUTINE_DIRECTIVE
+#endif
+
+#if !defined(DESUL_IF_ON_HOST) && !defined(DESUL_IF_ON_DEVICE)
+#if (defined(DESUL_ATOMICS_ENABLE_CUDA) && defined(__CUDA_ARCH__)) || \
+ (defined(DESUL_ATOMICS_ENABLE_HIP) && defined(__HIP_DEVICE_COMPILE__)) || \
+ (defined(DESUL_ATOMICS_ENABLE_SYCL) && defined(__SYCL_DEVICE_ONLY__))
+#define DESUL_IF_ON_DEVICE(CODE) \
+ { DESUL_IMPL_STRIP_PARENS(CODE) }
+#define DESUL_IF_ON_HOST(CODE) \
+ {}
+#else
+#define DESUL_IF_ON_DEVICE(CODE) \
+ {}
+#define DESUL_IF_ON_HOST(CODE) \
+ { DESUL_IMPL_STRIP_PARENS(CODE) }
+#endif
+#endif
+
+#endif // DESUL_ATOMICS_MACROS_HPP_
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_OPERATOR_FUNCTION_OBJECTS_HPP_
+#define DESUL_ATOMICS_OPERATOR_FUNCTION_OBJECTS_HPP_
+
+#include <desul/atomics/Macros.hpp>
+#include <type_traits>
+
+// Function objects that represent common arithmetic and logical
+// Combination operands to be used in a compare-and-exchange based atomic operation
+namespace desul {
+namespace Impl {
+
+template <class Scalar1, class Scalar2>
+struct max_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+ return (val1 > val2 ? val1 : val2);
+ }
+ DESUL_FORCEINLINE_FUNCTION
+ static constexpr bool check_early_exit(Scalar1 const& val1, Scalar2 const& val2) {
+ return val1 > val2;
+ }
+};
+
+template <class Scalar1, class Scalar2>
+struct min_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+ return (val1 < val2 ? val1 : val2);
+ }
+ DESUL_FORCEINLINE_FUNCTION
+ static constexpr bool check_early_exit(Scalar1 const& val1, Scalar2 const& val2) {
+ return val1 < val2;
+ }
+};
+
+template <class Op, class Scalar1, class Scalar2, class = bool>
+struct may_exit_early : std::false_type {};
+
+// This exit early optimization causes weird compiler errors with MSVC 2019
+#ifndef DESUL_HAVE_MSVC_ATOMICS
+template <class Op, class Scalar1, class Scalar2>
+struct may_exit_early<Op,
+ Scalar1,
+ Scalar2,
+ decltype(Op::check_early_exit(std::declval<Scalar1 const&>(),
+ std::declval<Scalar2 const&>()))>
+ : std::true_type {};
+#endif
+
+template <class Op, class Scalar1, class Scalar2>
+constexpr DESUL_FUNCTION
+ std::enable_if_t<may_exit_early<Op, Scalar1, Scalar2>::value, bool>
+ check_early_exit(Op const&, Scalar1 const& val1, Scalar2 const& val2) {
+ return Op::check_early_exit(val1, val2);
+}
+
+template <class Op, class Scalar1, class Scalar2>
+constexpr DESUL_FUNCTION
+ std::enable_if_t<!may_exit_early<Op, Scalar1, Scalar2>::value, bool>
+ check_early_exit(Op const&, Scalar1 const&, Scalar2 const&) {
+ return false;
+}
+
+template <class Scalar1, class Scalar2>
+struct add_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 + val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct sub_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 - val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct mul_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 * val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct div_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 / val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct mod_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 % val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct and_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 & val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct or_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 | val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct xor_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 ^ val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct nand_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+ return ~(val1 & val2);
+ }
+};
+
+template <class Scalar1, class Scalar2>
+struct lshift_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+ return val1 << val2;
+ }
+};
+
+template <class Scalar1, class Scalar2>
+struct rshift_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+ return val1 >> val2;
+ }
+};
+
+template <class Scalar1, class Scalar2>
+struct inc_mod_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+ return ((val1 >= val2) ? Scalar1(0) : val1 + Scalar1(1));
+ }
+};
+
+template <class Scalar1, class Scalar2>
+struct dec_mod_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+ return (((val1 == Scalar1(0)) | (val1 > val2)) ? val2 : (val1 - Scalar1(1)));
+ }
+};
+
+template <class Scalar1, class Scalar2>
+struct store_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1&, const Scalar2& val2) { return val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct load_fetch_operator {
+ DESUL_FORCEINLINE_FUNCTION
+ static Scalar1 apply(const Scalar1& val1, const Scalar2&) { return val1; }
+};
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_HPP_
+
+#include <desul/atomics/Macros.hpp>
+
+#ifdef DESUL_HAVE_GCC_ATOMICS
+#include <desul/atomics/Thread_Fence_GCC.hpp>
+#endif
+#ifdef DESUL_HAVE_MSVC_ATOMICS
+#include <desul/atomics/Thread_Fence_MSVC.hpp>
+#endif
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+#include <desul/atomics/Thread_Fence_CUDA.hpp>
+#endif
+#ifdef DESUL_HAVE_HIP_ATOMICS
+#include <desul/atomics/Thread_Fence_HIP.hpp>
+#endif
+#ifdef DESUL_HAVE_OPENMP_ATOMICS
+#include <desul/atomics/Thread_Fence_OpenMP.hpp>
+#endif
+#ifdef DESUL_HAVE_OPENACC_ATOMICS
+#include <desul/atomics/Thread_Fence_OpenACC.hpp>
+#endif
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+#include <desul/atomics/Thread_Fence_SYCL.hpp>
+#endif
+
+#include <desul/atomics/Thread_Fence_ScopeCaller.hpp>
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_CUDA_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_CUDA_HPP_
+
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+namespace Impl {
+
+// clang-format off
+inline __device__ void device_atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcqRel , MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderSeqCst , MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore ) { __threadfence_block(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore ) { __threadfence_block(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcqRel , MemoryScopeCore ) { __threadfence_block(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderSeqCst , MemoryScopeCore ) { __threadfence_block(); }
+#ifndef DESUL_CUDA_ARCH_IS_PRE_PASCAL
+inline __device__ void device_atomic_thread_fence(MemoryOrderRelease, MemoryScopeNode ) { __threadfence_system(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeNode ) { __threadfence_system(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcqRel , MemoryScopeNode ) { __threadfence_system(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderSeqCst , MemoryScopeNode ) { __threadfence_system(); }
+#endif
+// clang-format on
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_GCC_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_GCC_HPP_
+
+#include <desul/atomics/Adapt_GCC.hpp>
+
+namespace desul {
+namespace Impl {
+
+template <class MemoryOrder, class MemoryScope>
+void host_atomic_thread_fence(MemoryOrder, MemoryScope) {
+ __atomic_thread_fence(GCCMemoryOrder<MemoryOrder>::value);
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_HIP_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_HIP_HPP_
+
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+namespace Impl {
+
+// clang-format off
+inline __device__ void device_atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcqRel , MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderSeqCst , MemoryScopeDevice) { __threadfence(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore ) { __threadfence_block(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore ) { __threadfence_block(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcqRel , MemoryScopeCore ) { __threadfence_block(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderSeqCst , MemoryScopeCore ) { __threadfence_block(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderRelease, MemoryScopeNode ) { __threadfence_system(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeNode ) { __threadfence_system(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderAcqRel , MemoryScopeNode ) { __threadfence_system(); }
+inline __device__ void device_atomic_thread_fence(MemoryOrderSeqCst , MemoryScopeNode ) { __threadfence_system(); }
+// clang-format on
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_MSVC_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_MSVC_HPP_
+
+#include <atomic>
+#include <desul/atomics/Adapt_CXX.hpp>
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+namespace Impl {
+
+template <class MemoryOrder, class MemoryScope>
+void host_atomic_thread_fence(MemoryOrder, MemoryScope) {
+ std::atomic_thread_fence(Impl::CXXMemoryOrder<MemoryOrder>::value);
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_OPENACC_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_OPENACC_HPP_
+
+namespace desul {
+namespace Impl {
+
+#pragma acc routine seq
+template <class MemoryOrder, class MemoryScope>
+void device_atomic_thread_fence(MemoryOrder, MemoryScope) {
+ // FIXME_OPENACC: The current OpenACC standard does not support explicit thread fence
+ // operations.
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_OPENMP_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_OPENMP_HPP_
+
+#include <omp.h>
+
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+namespace Impl {
+
+// NVHPC compiler only supports the basic flush construct without the
+// memory-order-clause.
+#if _OPENMP > 201800 && !defined(__NVCOMPILER)
+
+// There is no seq_cst flush in OpenMP, isn't it the same anyway for fence?
+inline void host_atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
+#pragma omp flush acq_rel
+}
+inline void host_atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
+#pragma omp flush acq_rel
+}
+inline void host_atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
+#pragma omp flush release
+}
+inline void host_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
+#pragma omp flush acquire
+}
+inline void host_atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
+#pragma omp flush acq_rel
+}
+inline void host_atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
+#pragma omp flush acq_rel
+}
+inline void host_atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
+#pragma omp flush release
+}
+inline void host_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
+#pragma omp flush acquire
+}
+
+#else
+
+inline void host_atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
+#pragma omp flush
+}
+inline void host_atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
+#pragma omp flush
+}
+inline void host_atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
+#pragma omp flush
+}
+inline void host_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
+#pragma omp flush
+}
+inline void host_atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
+#pragma omp flush
+}
+inline void host_atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
+#pragma omp flush
+}
+inline void host_atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
+#pragma omp flush
+}
+inline void host_atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
+#pragma omp flush
+}
+
+#endif
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_SYCL_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_SYCL_HPP_
+
+#include <desul/atomics/Adapt_SYCL.hpp>
+#include <desul/atomics/Common.hpp>
+
+// FIXME_SYCL SYCL2020 dictates that <sycl/sycl.hpp> is the header to include
+// but icpx 2022.1.0 and earlier versions only provide <CL/sycl.hpp>
+#if __has_include(<sycl/sycl.hpp>)
+#include <sycl/sycl.hpp>
+#else
+#include <CL/sycl.hpp>
+#endif
+
+namespace desul {
+namespace Impl {
+
+template <class MemoryOrder, class MemoryScope>
+void device_atomic_thread_fence(MemoryOrder, MemoryScope) {
+ sycl::atomic_fence(SYCLMemoryOrder<MemoryOrder>::value,
+ SYCLMemoryScope<MemoryScope>::value);
+}
+
+} // namespace Impl
+} // namespace desul
+
+#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_THREAD_FENCE_SCOPECALLER_HPP_
+#define DESUL_ATOMICS_THREAD_FENCE_SCOPECALLER_HPP_
+
+#include <desul/atomics/Common.hpp>
+
+namespace desul {
+
+// clang-format off
+DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrderSeqCst , MemoryScopeCaller) {}
+DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrderAcqRel , MemoryScopeCaller) {}
+DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCaller) {}
+DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCaller) {}
+DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrderRelaxed, MemoryScopeCaller) {}
+// clang-format on
+
+} // namespace desul
+
+#endif
--- /dev/null
+#include <limits>
+namespace desul {
+namespace Impl {
+// Choose the variant of atomics we are using later
+// The __isGlobal intrinsic was only introduced in CUDA 11.2
+// It also stopped working in NVC++ 23.1 - it works in 22.11
+// this is a bug in NVHPC, not treating CUDA intrinsics correctly
+// FIXME_NVHPC
+#if !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE) && \
+ !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL)
+#if ((__CUDACC_VER_MAJOR__ > 11) || \
+ ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ > 1))) && \
+ (!defined(__NVCOMPILER) || __NVCOMPILER_MAJOR__ < 23)
+#define DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
+#else
+#define DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
+#endif
+#endif
+#include <desul/atomics/cuda/cuda_cc7_asm.inc>
+
+} // namespace Impl
+} // namespace desul
--- /dev/null
+#include <limits>
+namespace desul {
+namespace Impl {
+#include <desul/atomics/cuda/cuda_cc7_asm_exchange.inc>
+}
+} // namespace desul
--- /dev/null
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
+#include "cuda_cc7_asm_atomic_fetch_op.inc_isglobal"
+#endif
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
+#include "cuda_cc7_asm_atomic_fetch_op.inc_predicate"
+#endif
+
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_result = 0u; \
if(__isGlobal(dest)) { \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_result = 0u; \
if(__isGlobal(dest)) { \
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_result = 0u; \
if(__isGlobal(dest)) { \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_result = 0u; \
if(__isGlobal(dest)) { \
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_result = 0u; \
if(__isGlobal(dest)) { \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_result = 0u; \
if(__isGlobal(dest)) { \
// Fetch atomics
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
if(__isGlobal(dest)) { \
asm volatile("atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
ctype neg_value = -value; \
if(__isGlobal(dest)) { \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
if(__isGlobal(dest)) { \
asm volatile("atom.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
if(__isGlobal(dest)) { \
asm volatile("atom.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
if(__isGlobal(dest)) { \
} \
return result; \
} \
-inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
if(__isGlobal(dest)) { \
asm volatile("atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
if(__isGlobal(dest)) { \
} \
return result; \
} \
-inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
if(__isGlobal(dest)) { \
asm volatile("atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
return result; \
}
-// Group ops for integer ctypes
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR()
-
// Instantiate Functions
+
+// General comments:
+// - float/double only support add
+// - inc/dec only supported with uint32_t
+// - int64_t does not support add
+
+// floating point types
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
-
+// uint32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(uint32_t,".u32","r","=r")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
+// uint64_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(uint64_t,".u64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint64_t,".u64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint64_t,".u64","l","=l")
+
+// int32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(int32_t,".s32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(int32_t,".s32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(int32_t,".s32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(int32_t,".s32","r","=r")
+
+// int64_t note: add/sub is using unsigned register
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(int64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(int64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(int64_t,".s64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(int64_t,".s64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(int64_t,".s64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(int64_t,".s64","l","=l")
+
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC
#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC
#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND
-
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_result = 0u; \
asm volatile( \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_result = 0u; \
asm volatile( \
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_result = 0u; \
asm volatile( \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_result = 0u; \
asm volatile( \
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_result = 0u; \
asm volatile( \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_result = 0u; \
asm volatile( \
// Fetch atomics
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
asm volatile( \
"{\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
ctype neg_value = -value; \
asm volatile( \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
asm volatile( \
"{\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result=0; \
asm volatile( \
"{\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
asm volatile( \
: reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
return result; \
} \
-inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
asm volatile( \
"{\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
asm volatile( \
: reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
return result; \
} \
-inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ ctype device_atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
ctype result = 0; \
asm volatile( \
"{\n\t" \
return result; \
}
-// Group ops for integer ctypes
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
-
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
// Instantiate Functions
+
+// General comments:
+// - float/double only support add
+// - inc/dec only supported with uint32_t
+// - int64_t does not support add
+
+// floating point types
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
-
+// uint32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(uint32_t,".u32","r","=r")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
+// uint64_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(uint64_t,".u64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint64_t,".u64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint64_t,".u64","l","=l")
+
+// int32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(int32_t,".s32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(int32_t,".s32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(int32_t,".s32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(int32_t,".s32","r","=r")
+
+// int64_t note: add/sub is using unsigned register
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(int64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(int64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(int64_t,".s64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(int64_t,".s64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(int64_t,".s64","l","=l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(int64_t,".s64","l","=l")
+
__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
--- /dev/null
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
+#include "cuda_cc7_asm_atomic_op.inc_isglobal"
+#endif
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
+#include "cuda_cc7_asm_atomic_op.inc_predicate"
+#endif
+
// Non Returning Atomic Operations
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
if(__isGlobal(dest)) { \
asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
} else { \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
type neg_value = -value; \
if(__isGlobal(dest)) { \
asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(neg_value) : "memory"); \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
if(__isGlobal(dest)) { \
asm volatile("red.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
} else { \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
if(__isGlobal(dest)) { \
asm volatile("red.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
} else { \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
type limit = desul::Impl::numeric_limits_max<type>::value; \
if(__isGlobal(dest)) { \
asm volatile("red.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
-inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
type limit = desul::Impl::numeric_limits_max<type>::value; \
if(__isGlobal(dest)) { \
asm volatile("red.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
} \
}
-// Group ops for integer types
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
+// Instantiate Functions
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
+// General comments:
+// - float/double only support add
+// - inc/dec only supported with uint32_t
+// - int64_t does not support add
-// Instantiate Functions
+// floating point types
__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
+// uint32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(uint32_t,".u32","r")
+
+// uint64_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(uint64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(uint64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(uint64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(uint64_t,".u64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(uint64_t,".u64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(uint64_t,".u64","l")
+
+// int32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(int32_t,".s32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(int32_t,".s32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(int32_t,".s32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(int32_t,".s32","r")
+
+// int64_t note: add/sub is using unsigned register
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(int64_t,".s64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(int64_t,".s64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(int64_t,".s64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(int64_t,".s64","l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
// Non Returning Atomic Operations
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
asm volatile( \
"{\n\t" \
".reg .pred p;\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
type neg_value = -value; \
asm volatile( \
"{\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
asm volatile( \
"{\n\t" \
".reg .pred p;\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
asm volatile( \
"{\n\t" \
".reg .pred p;\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
type limit = desul::Impl::numeric_limits_max<type>::value; \
asm volatile( \
"{\n\t" \
}
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
-inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ void device_atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
type limit = desul::Impl::numeric_limits_max<type>::value; \
asm volatile( \
"{\n\t" \
:: "l"(dest),reg_type(limit) : "memory"); \
}
-// Group ops for integer types
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
+// Instantiate Functions
-#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
-__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
+// General comments:
+// - float/double only support add
+// - inc/dec only supported with uint32_t
+// - int64_t does not support add
-// Instantiate Functions
+// floating point types
__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
+// uint32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(uint32_t,".u32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(uint32_t,".u32","r")
+
+// uint64_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(uint64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(uint64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(uint64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(uint64_t,".u64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(uint64_t,".u64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(uint64_t,".u64","l")
+
+// int32_t
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(int32_t,".s32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(int32_t,".s32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(int32_t,".s32","r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(int32_t,".s32","r")
+
+// int64_t note: add/sub are using unsigned register!
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(int64_t,".s64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(int64_t,".s64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(int64_t,".s64","l")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(int64_t,".s64","l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
-__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
-//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_EXCHANGE() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_exchange(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename ::std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_exchange(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_result = 0u; \
asm volatile("atom.exch" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_exchange(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename ::std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_exchange(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_result = 0u; \
asm volatile("atom.exch" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
#define __DESUL_IMPL_CUDA_ASM_ATOMIC_COMPARE_EXCHANGE() \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_compare_exchange(ctype* dest, ctype compare, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename ::std::enable_if<sizeof(ctype)==4, ctype>::type device_atomic_compare_exchange(ctype* dest, ctype compare, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
uint32_t asm_compare = reinterpret_cast<uint32_t&>(compare); \
uint32_t asm_result = 0u; \
return reinterpret_cast<ctype&>(asm_result); \
} \
template<class ctype> \
-inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_compare_exchange(ctype* dest, ctype compare, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+inline __device__ typename ::std::enable_if<sizeof(ctype)==8, ctype>::type device_atomic_compare_exchange(ctype* dest, ctype compare, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
uint64_t asm_compare = reinterpret_cast<uint64_t&>(compare); \
uint64_t asm_result = 0u; \
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_OPENMP40_HPP_
+#define DESUL_ATOMICS_OPENMP40_HPP_
+
+#include <type_traits>
+
+namespace desul {
+namespace Impl {
+template <class MEMORY_ORDER_TMP, class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_pre_capture_flush(MEMORY_ORDER_TMP, MEMORY_SCOPE_TMP) {}
+template <class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_pre_capture_flush(MemoryOrderAcquire, MEMORY_SCOPE_TMP) {
+ atomic_thread_fence(MemoryOrderAcquire(), MEMORY_SCOPE_TMP());
+}
+template <class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_pre_capture_flush(MemoryOrderAcqRel, MEMORY_SCOPE_TMP) {
+ atomic_thread_fence(MemoryOrderAcqRel(), MEMORY_SCOPE_TMP());
+}
+template <class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_pre_capture_flush(MemoryOrderSeqCst, MEMORY_SCOPE_TMP) {
+ atomic_thread_fence(MemoryOrderSeqCst(), MEMORY_SCOPE_TMP());
+}
+
+template <class MEMORY_ORDER_TMP, class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_post_capture_flush(MEMORY_ORDER_TMP, MEMORY_SCOPE_TMP) {}
+template <class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_post_capture_flush(MemoryOrderRelease, MEMORY_SCOPE_TMP) {
+ atomic_thread_fence(MemoryOrderRelease(), MEMORY_SCOPE_TMP());
+}
+template <class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_post_capture_flush(MemoryOrderAcqRel, MEMORY_SCOPE_TMP) {
+ atomic_thread_fence(MemoryOrderAcqRel(), MEMORY_SCOPE_TMP());
+}
+template <class MEMORY_SCOPE_TMP>
+void openmp_maybe_call_post_capture_flush(MemoryOrderSeqCst, MEMORY_SCOPE_TMP) {
+ atomic_thread_fence(MemoryOrderSeqCst(), MEMORY_SCOPE_TMP());
+}
+
+template <class T>
+constexpr bool is_openmp_atomic_type_v = std::is_arithmetic<T>::value;
+} // namespace Impl
+} // namespace desul
+
+namespace desul {
+namespace impl {
+// Can't use a macro approach to get all definitions since the ops include #pragma omp
+// So gonna use multiple inclusion of the same code snippet here.
+
+// Can't do Node level atomics this way with OpenMP Target, but we could
+// have a define which says whether or not Device level IS node level (e.g. for pure CPU
+// node)
+
+#define MEMORY_ORDER MemoryOrderRelaxed
+// #define MEMORY_SCOPE MemoryScopeNode
+// #include<desul/atomics/openmp/OpenMP_40_op.inc>
+// #undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeDevice
+#include <desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeCore
+#include <desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#undef MEMORY_ORDER
+
+#define MEMORY_ORDER MemoryOrderAcqRel
+// #define MEMORY_SCOPE MemoryScopeNode
+// #include<desul/atomics/openmp/OpenMP_40_op.inc>
+// #undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeDevice
+#include <desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeCore
+#include <desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#undef MEMORY_ORDER
+
+#define MEMORY_ORDER MemoryOrderSeqCst
+// #define MEMORY_SCOPE MemoryScopeNode
+// #include<desul/atomics/openmp/OpenMP_40_op.inc>
+// #undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeDevice
+#include <desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeCore
+#include <desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#undef MEMORY_ORDER
+} // namespace impl
+} // namespace desul
+#endif
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_add(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_fetch_add(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_sub(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_fetch_sub(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_and(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_fetch_and(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_or(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_fetch_or(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_xor(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_fetch_xor(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_add_fetch(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_add_fetch(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_sub_fetch(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_sub_fetch(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_and_fetch(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_and_fetch(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_or_fetch(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_or_fetch(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
return tmp;
}
template <typename T>
- std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_xor_fetch(
+ std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> host_atomic_xor_fetch(
T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {
T tmp;
Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
#include <sstream>
#include <string>
-#ifdef DESUL_HAVE_CUDA_ATOMICS
-#ifdef __CUDACC_RDC__
+#ifdef DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION
namespace desul {
namespace Impl {
__device__ __constant__ int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE = nullptr;
cudaFreeHost(CUDA_SPACE_ATOMIC_LOCKS_NODE_h);
CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
CUDA_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
-#ifdef __CUDACC_RDC__
+#ifdef DESUL_ATOMICS_ENABLE_CUDA_SEPARABLE_COMPILATION
copy_cuda_lock_arrays_to_device();
#endif
}
} // namespace Impl
} // namespace desul
-#endif
#include <sstream>
#include <string>
-#ifdef DESUL_HAVE_HIP_ATOMICS
-#ifdef DESUL_HIP_RDC
+#ifdef DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION
namespace desul {
namespace Impl {
__device__ __constant__ int32_t* HIP_SPACE_ATOMIC_LOCKS_DEVICE = nullptr;
"init_lock_arrays_hip: hipMallocHost host locks");
auto error_sync1 = hipDeviceSynchronize();
- DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
+ copy_hip_lock_arrays_to_device();
check_error_and_throw_hip(error_sync1, "init_lock_arrays_hip: post malloc");
init_lock_arrays_hip_kernel<<<(HIP_SPACE_ATOMIC_MASK + 1 + 255) / 256, 256>>>();
check_error_and_throw_hip(error_free2, "finalize_lock_arrays_hip: free host locks");
HIP_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
HIP_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
-#ifdef DESUL_HIP_RDC
- DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
+#ifdef DESUL_ATOMICS_ENABLE_HIP_SEPARABLE_COMPILATION
+ copy_hip_lock_arrays_to_device();
#endif
}
} // namespace Impl
} // namespace desul
-#endif
--- /dev/null
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+// FIXME_SYCL Use SYCL_EXT_ONEAPI_DEVICE_GLOBAL when available instead
+#ifdef DESUL_SYCL_DEVICE_GLOBAL_SUPPORTED
+
+#include <cinttypes>
+#include <desul/atomics/Lock_Array_SYCL.hpp>
+
+namespace desul::Impl {
+
+#ifdef DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+SYCL_EXTERNAL
+sycl_device_global<int32_t*> SYCL_SPACE_ATOMIC_LOCKS_DEVICE;
+SYCL_EXTERNAL
+sycl_device_global<int32_t*> SYCL_SPACE_ATOMIC_LOCKS_NODE;
+#endif
+
+int32_t* SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
+int32_t* SYCL_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
+
+template <>
+void init_lock_arrays_sycl<int>(sycl::queue q) {
+ if (SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h != nullptr) return;
+
+ SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h =
+ sycl::malloc_device<int32_t>(SYCL_SPACE_ATOMIC_MASK + 1, q);
+ SYCL_SPACE_ATOMIC_LOCKS_NODE_h =
+ sycl::malloc_host<int32_t>(SYCL_SPACE_ATOMIC_MASK + 1, q);
+
+ copy_sycl_lock_arrays_to_device(q);
+
+ q.memset(SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h,
+ 0,
+ sizeof(int32_t) * (SYCL_SPACE_ATOMIC_MASK + 1));
+ q.memset(SYCL_SPACE_ATOMIC_LOCKS_NODE_h,
+ 0,
+ sizeof(int32_t) * (SYCL_SPACE_ATOMIC_MASK + 1));
+
+ q.wait_and_throw();
+}
+
+template <>
+void finalize_lock_arrays_sycl<int>(sycl::queue q) {
+ if (SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h == nullptr) return;
+
+ sycl::free(SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h, q);
+ sycl::free(SYCL_SPACE_ATOMIC_LOCKS_NODE_h, q);
+ SYCL_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
+ SYCL_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
+#ifdef DESUL_ATOMICS_ENABLE_SYCL_SEPARABLE_COMPILATION
+ copy_sycl_lock_arrays_to_device(q);
+#endif
+}
+
+} // namespace desul::Impl
+#endif
--- /dev/null
+Copyright 2008, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+This is a fused source version of gtest (v1.11.0). All that should be necessary
+to start using gtest in your package is to declare the dependency and include
+gtest/gtest.h.
+
+However, because some of the packages that are developed in Sierra do not use a
+fused source version of gtest we need to make it possible for them to build with
+this version as well as with their native build. To facilitate this we have
+created symlinks for the other gtest headers that they use to the fused source
+gtest.h. This will make it possible for them find the headers while still using
+the fuse source version. This should not have any ill effects since the header is
+protected and allows for only using the non-gtest.h headers in their files.
+
--- /dev/null
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Google C++ Testing and Mocking Framework (Google Test)
+//
+// Sometimes it's desirable to build Google Test by compiling a single file.
+// This file serves this purpose.
+
+// This line ensures that gtest.h can be compiled on its own, even
+// when it's fused.
+#include "gtest/gtest.h"
+
+// The following lines pull in the real gtest *.cc files.
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Utilities for testing Google Test itself and code that uses Google Test
+// (e.g. frameworks built on top of Google Test).
+
+// GOOGLETEST_CM0004 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+namespace testing {
+
+// This helper class can be used to mock out Google Test failure reporting
+// so that we can test Google Test or code that builds on Google Test.
+//
+// An object of this class appends a TestPartResult object to the
+// TestPartResultArray object given in the constructor whenever a Google Test
+// failure is reported. It can either intercept only failures that are
+// generated in the same thread that created this object or it can intercept
+// all generated failures. The scope of this mock object can be controlled with
+// the second argument to the two arguments constructor.
+class GTEST_API_ ScopedFakeTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ // The two possible mocking modes of this object.
+ enum InterceptMode {
+ INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
+ INTERCEPT_ALL_THREADS // Intercepts all failures.
+ };
+
+ // The c'tor sets this object as the test part result reporter used
+ // by Google Test. The 'result' parameter specifies where to report the
+ // results. This reporter will only catch failures generated in the current
+ // thread. DEPRECATED
+ explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
+
+ // Same as above, but you can choose the interception scope of this object.
+ ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
+ TestPartResultArray* result);
+
+ // The d'tor restores the previous test part result reporter.
+ ~ScopedFakeTestPartResultReporter() override;
+
+ // Appends the TestPartResult object to the TestPartResultArray
+ // received in the constructor.
+ //
+ // This method is from the TestPartResultReporterInterface
+ // interface.
+ void ReportTestPartResult(const TestPartResult& result) override;
+
+ private:
+ void Init();
+
+ const InterceptMode intercept_mode_;
+ TestPartResultReporterInterface* old_reporter_;
+ TestPartResultArray* const result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
+};
+
+namespace internal {
+
+// A helper class for implementing EXPECT_FATAL_FAILURE() and
+// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+class GTEST_API_ SingleFailureChecker {
+ public:
+ // The constructor remembers the arguments.
+ SingleFailureChecker(const TestPartResultArray* results,
+ TestPartResult::Type type, const std::string& substr);
+ ~SingleFailureChecker();
+ private:
+ const TestPartResultArray* const results_;
+ const TestPartResult::Type type_;
+ const std::string substr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+// A set of macros for testing Google Test assertions or code that's expected
+// to generate Google Test fatal failures. It verifies that the given
+// statement will cause exactly one fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - 'statement' cannot reference local non-static variables or
+// non-static members of the current object.
+// - 'statement' cannot return a value.
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
+// gtest_unittest.cc will fail to compile if we do that.
+#define EXPECT_FATAL_FAILURE(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ALL_THREADS, >est_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+// A macro for testing Google Test assertions or code that's expected to
+// generate Google Test non-fatal failures. It asserts that the given
+// statement will cause exactly one non-fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// 'statement' is allowed to reference local variables and members of
+// the current object.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. If we do that, the code won't compile when the user gives
+// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
+// expands to code containing an unprotected comma. The
+// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
+// catches that.
+//
+// For the same reason, we have to write
+// if (::testing::internal::AlwaysTrue()) { statement; }
+// instead of
+// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+// to avoid an MSVC warning on unreachable code.
+#define EXPECT_NONFATAL_FAILURE(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ >est_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ >est_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
+ >est_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <chrono> // NOLINT
+#include <cmath>
+#include <cstdint>
+#include <iomanip>
+#include <limits>
+#include <list>
+#include <map>
+#include <ostream> // NOLINT
+#include <sstream>
+#include <vector>
+
+#if GTEST_OS_LINUX
+
+# include <fcntl.h> // NOLINT
+# include <limits.h> // NOLINT
+# include <sched.h> // NOLINT
+// Declares vsnprintf(). This header is not available on Windows.
+# include <strings.h> // NOLINT
+# include <sys/mman.h> // NOLINT
+# include <sys/time.h> // NOLINT
+# include <unistd.h> // NOLINT
+# include <string>
+
+#elif GTEST_OS_ZOS
+# include <sys/time.h> // NOLINT
+
+// On z/OS we additionally need strings.h for strcasecmp.
+# include <strings.h> // NOLINT
+
+#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE.
+
+# include <windows.h> // NOLINT
+# undef min
+
+#elif GTEST_OS_WINDOWS // We are on Windows proper.
+
+# include <windows.h> // NOLINT
+# undef min
+
+#ifdef _MSC_VER
+# include <crtdbg.h> // NOLINT
+#endif
+
+# include <io.h> // NOLINT
+# include <sys/timeb.h> // NOLINT
+# include <sys/types.h> // NOLINT
+# include <sys/stat.h> // NOLINT
+
+# if GTEST_OS_WINDOWS_MINGW
+# include <sys/time.h> // NOLINT
+# endif // GTEST_OS_WINDOWS_MINGW
+
+#else
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <sys/time.h> // NOLINT
+# include <unistd.h> // NOLINT
+
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h> // NOLINT
+# include <netdb.h> // NOLINT
+# include <sys/socket.h> // NOLINT
+# include <sys/types.h> // NOLINT
+#endif
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions and classes used by the Google C++ testing framework.//
+// This file contains purely Google Test's internal implementation. Please
+// DO NOT #INCLUDE IT IN A USER PROGRAM.
+
+#ifndef GOOGLETEST_SRC_GTEST_INTERNAL_INL_H_
+#define GOOGLETEST_SRC_GTEST_INTERNAL_INL_H_
+
+#ifndef _WIN32_WCE
+# include <errno.h>
+#endif // !_WIN32_WCE
+#include <stddef.h>
+#include <stdlib.h> // For strtoll/_strtoul64/malloc/free.
+#include <string.h> // For memmove.
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h> // NOLINT
+# include <netdb.h> // NOLINT
+#endif
+
+#if GTEST_OS_WINDOWS
+# include <windows.h> // NOLINT
+#endif // GTEST_OS_WINDOWS
+
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+namespace testing {
+
+// Declares the flags.
+//
+// We don't want the users to modify this flag in the code, but want
+// Google Test's own unit tests to be able to access it. Therefore we
+// declare it here as opposed to in gtest.h.
+GTEST_DECLARE_bool_(death_test_use_fork);
+
+namespace internal {
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
+const char kBreakOnFailureFlag[] = "break_on_failure";
+const char kCatchExceptionsFlag[] = "catch_exceptions";
+const char kColorFlag[] = "color";
+const char kFailFast[] = "fail_fast";
+const char kFilterFlag[] = "filter";
+const char kListTestsFlag[] = "list_tests";
+const char kOutputFlag[] = "output";
+const char kBriefFlag[] = "brief";
+const char kPrintTimeFlag[] = "print_time";
+const char kPrintUTF8Flag[] = "print_utf8";
+const char kRandomSeedFlag[] = "random_seed";
+const char kRepeatFlag[] = "repeat";
+const char kShuffleFlag[] = "shuffle";
+const char kStackTraceDepthFlag[] = "stack_trace_depth";
+const char kStreamResultToFlag[] = "stream_result_to";
+const char kThrowOnFailureFlag[] = "throw_on_failure";
+const char kFlagfileFlag[] = "flagfile";
+
+// A valid random seed must be in [1, kMaxRandomSeed].
+const int kMaxRandomSeed = 99999;
+
+// g_help_flag is true if and only if the --help flag or an equivalent form
+// is specified on the command line.
+GTEST_API_ extern bool g_help_flag;
+
+// Returns the current time in milliseconds.
+GTEST_API_ TimeInMillis GetTimeInMillis();
+
+// Returns true if and only if Google Test should use colors in the output.
+GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
+
+// Formats the given time in milliseconds as seconds.
+GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
+
+// Converts the given time in milliseconds to a date string in the ISO 8601
+// format, without the timezone information. N.B.: due to the use the
+// non-reentrant localtime() function, this function is not thread safe. Do
+// not use it in any code that can be called from multiple threads.
+GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+GTEST_API_ bool ParseInt32Flag(
+ const char* str, const char* flag, int32_t* value);
+
+// Returns a random seed in range [1, kMaxRandomSeed] based on the
+// given --gtest_random_seed flag value.
+inline int GetRandomSeedFromFlag(int32_t random_seed_flag) {
+ const unsigned int raw_seed = (random_seed_flag == 0) ?
+ static_cast<unsigned int>(GetTimeInMillis()) :
+ static_cast<unsigned int>(random_seed_flag);
+
+ // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
+ // it's easy to type.
+ const int normalized_seed =
+ static_cast<int>((raw_seed - 1U) %
+ static_cast<unsigned int>(kMaxRandomSeed)) + 1;
+ return normalized_seed;
+}
+
+// Returns the first valid random seed after 'seed'. The behavior is
+// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is
+// considered to be 1.
+inline int GetNextRandomSeed(int seed) {
+ GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
+ << "Invalid random seed " << seed << " - must be in [1, "
+ << kMaxRandomSeed << "].";
+ const int next_seed = seed + 1;
+ return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
+}
+
+// This class saves the values of all Google Test flags in its c'tor, and
+// restores them in its d'tor.
+class GTestFlagSaver {
+ public:
+ // The c'tor.
+ GTestFlagSaver() {
+ also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
+ break_on_failure_ = GTEST_FLAG(break_on_failure);
+ catch_exceptions_ = GTEST_FLAG(catch_exceptions);
+ color_ = GTEST_FLAG(color);
+ death_test_style_ = GTEST_FLAG(death_test_style);
+ death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
+ fail_fast_ = GTEST_FLAG(fail_fast);
+ filter_ = GTEST_FLAG(filter);
+ internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
+ list_tests_ = GTEST_FLAG(list_tests);
+ output_ = GTEST_FLAG(output);
+ brief_ = GTEST_FLAG(brief);
+ print_time_ = GTEST_FLAG(print_time);
+ print_utf8_ = GTEST_FLAG(print_utf8);
+ random_seed_ = GTEST_FLAG(random_seed);
+ repeat_ = GTEST_FLAG(repeat);
+ shuffle_ = GTEST_FLAG(shuffle);
+ stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
+ stream_result_to_ = GTEST_FLAG(stream_result_to);
+ throw_on_failure_ = GTEST_FLAG(throw_on_failure);
+ }
+
+ // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS.
+ ~GTestFlagSaver() {
+ GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
+ GTEST_FLAG(break_on_failure) = break_on_failure_;
+ GTEST_FLAG(catch_exceptions) = catch_exceptions_;
+ GTEST_FLAG(color) = color_;
+ GTEST_FLAG(death_test_style) = death_test_style_;
+ GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
+ GTEST_FLAG(filter) = filter_;
+ GTEST_FLAG(fail_fast) = fail_fast_;
+ GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
+ GTEST_FLAG(list_tests) = list_tests_;
+ GTEST_FLAG(output) = output_;
+ GTEST_FLAG(brief) = brief_;
+ GTEST_FLAG(print_time) = print_time_;
+ GTEST_FLAG(print_utf8) = print_utf8_;
+ GTEST_FLAG(random_seed) = random_seed_;
+ GTEST_FLAG(repeat) = repeat_;
+ GTEST_FLAG(shuffle) = shuffle_;
+ GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
+ GTEST_FLAG(stream_result_to) = stream_result_to_;
+ GTEST_FLAG(throw_on_failure) = throw_on_failure_;
+ }
+
+ private:
+ // Fields for saving the original values of flags.
+ bool also_run_disabled_tests_;
+ bool break_on_failure_;
+ bool catch_exceptions_;
+ std::string color_;
+ std::string death_test_style_;
+ bool death_test_use_fork_;
+ bool fail_fast_;
+ std::string filter_;
+ std::string internal_run_death_test_;
+ bool list_tests_;
+ std::string output_;
+ bool brief_;
+ bool print_time_;
+ bool print_utf8_;
+ int32_t random_seed_;
+ int32_t repeat_;
+ bool shuffle_;
+ int32_t stack_trace_depth_;
+ std::string stream_result_to_;
+ bool throw_on_failure_;
+} GTEST_ATTRIBUTE_UNUSED_;
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
+// to "(Invalid Unicode 0xXXXXXXXX)".
+GTEST_API_ std::string CodePointToUtf8(uint32_t code_point);
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars);
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded();
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (e.g., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+GTEST_API_ bool ShouldShard(const char* total_shards_str,
+ const char* shard_index_str,
+ bool in_subprocess_for_death_test);
+
+// Parses the environment variable var as a 32-bit integer. If it is unset,
+// returns default_val. If it is not a 32-bit integer, prints an error and
+// and aborts.
+GTEST_API_ int32_t Int32FromEnvOrDie(const char* env_var, int32_t default_val);
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true if and only if the test should be run on this shard. The test id
+// is some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+GTEST_API_ bool ShouldRunTestOnShard(
+ int total_shards, int shard_index, int test_id);
+
+// STL container utilities.
+
+// Returns the number of elements in the given container that satisfy
+// the given predicate.
+template <class Container, typename Predicate>
+inline int CountIf(const Container& c, Predicate predicate) {
+ // Implemented as an explicit loop since std::count_if() in libCstd on
+ // Solaris has a non-standard signature.
+ int count = 0;
+ for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) {
+ if (predicate(*it))
+ ++count;
+ }
+ return count;
+}
+
+// Applies a function/functor to each element in the container.
+template <class Container, typename Functor>
+void ForEach(const Container& c, Functor functor) {
+ std::for_each(c.begin(), c.end(), functor);
+}
+
+// Returns the i-th element of the vector, or default_value if i is not
+// in range [0, v.size()).
+template <typename E>
+inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
+ return (i < 0 || i >= static_cast<int>(v.size())) ? default_value
+ : v[static_cast<size_t>(i)];
+}
+
+// Performs an in-place shuffle of a range of the vector's elements.
+// 'begin' and 'end' are element indices as an STL-style range;
+// i.e. [begin, end) are shuffled, where 'end' == size() means to
+// shuffle to the end of the vector.
+template <typename E>
+void ShuffleRange(internal::Random* random, int begin, int end,
+ std::vector<E>* v) {
+ const int size = static_cast<int>(v->size());
+ GTEST_CHECK_(0 <= begin && begin <= size)
+ << "Invalid shuffle range start " << begin << ": must be in range [0, "
+ << size << "].";
+ GTEST_CHECK_(begin <= end && end <= size)
+ << "Invalid shuffle range finish " << end << ": must be in range ["
+ << begin << ", " << size << "].";
+
+ // Fisher-Yates shuffle, from
+ // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+ for (int range_width = end - begin; range_width >= 2; range_width--) {
+ const int last_in_range = begin + range_width - 1;
+ const int selected =
+ begin +
+ static_cast<int>(random->Generate(static_cast<uint32_t>(range_width)));
+ std::swap((*v)[static_cast<size_t>(selected)],
+ (*v)[static_cast<size_t>(last_in_range)]);
+ }
+}
+
+// Performs an in-place shuffle of the vector's elements.
+template <typename E>
+inline void Shuffle(internal::Random* random, std::vector<E>* v) {
+ ShuffleRange(random, 0, static_cast<int>(v->size()), v);
+}
+
+// A function for deleting an object. Handy for being used as a
+// functor.
+template <typename T>
+static void Delete(T* x) {
+ delete x;
+}
+
+// A predicate that checks the key of a TestProperty against a known key.
+//
+// TestPropertyKeyIs is copyable.
+class TestPropertyKeyIs {
+ public:
+ // Constructor.
+ //
+ // TestPropertyKeyIs has NO default constructor.
+ explicit TestPropertyKeyIs(const std::string& key) : key_(key) {}
+
+ // Returns true if and only if the test name of test property matches on key_.
+ bool operator()(const TestProperty& test_property) const {
+ return test_property.key() == key_;
+ }
+
+ private:
+ std::string key_;
+};
+
+// Class UnitTestOptions.
+//
+// This class contains functions for processing options the user
+// specifies when running the tests. It has only static members.
+//
+// In most cases, the user can specify an option using either an
+// environment variable or a command line flag. E.g. you can set the
+// test filter using either GTEST_FILTER or --gtest_filter. If both
+// the variable and the flag are present, the latter overrides the
+// former.
+class GTEST_API_ UnitTestOptions {
+ public:
+ // Functions for processing the gtest_output flag.
+
+ // Returns the output format, or "" for normal printed output.
+ static std::string GetOutputFormat();
+
+ // Returns the absolute path of the requested output file, or the
+ // default (test_detail.xml in the original working directory) if
+ // none was explicitly specified.
+ static std::string GetAbsolutePathToOutputFile();
+
+ // Functions for processing the gtest_filter flag.
+
+ // Returns true if and only if the user-specified filter matches the test
+ // suite name and the test name.
+ static bool FilterMatchesTest(const std::string& test_suite_name,
+ const std::string& test_name);
+
+#if GTEST_OS_WINDOWS
+ // Function for supporting the gtest_catch_exception flag.
+
+ // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+ // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+ // This function is useful as an __except condition.
+ static int GTestShouldProcessSEH(DWORD exception_code);
+#endif // GTEST_OS_WINDOWS
+
+ // Returns true if "name" matches the ':' separated list of glob-style
+ // filters in "filter".
+ static bool MatchesFilter(const std::string& name, const char* filter);
+};
+
+// Returns the current application's name, removing directory path if that
+// is present. Used by UnitTestOptions::GetOutputFile.
+GTEST_API_ FilePath GetCurrentExecutableName();
+
+// The role interface for getting the OS stack trace as a string.
+class OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetterInterface() {}
+ virtual ~OsStackTraceGetterInterface() {}
+
+ // Returns the current OS stack trace as an std::string. Parameters:
+ //
+ // max_depth - the maximum number of stack frames to be included
+ // in the trace.
+ // skip_count - the number of top frames to be skipped; doesn't count
+ // against max_depth.
+ virtual std::string CurrentStackTrace(int max_depth, int skip_count) = 0;
+
+ // UponLeavingGTest() should be called immediately before Google Test calls
+ // user code. It saves some information about the current stack that
+ // CurrentStackTrace() will use to find and hide Google Test stack frames.
+ virtual void UponLeavingGTest() = 0;
+
+ // This string is inserted in place of stack frames that are part of
+ // Google Test's implementation.
+ static const char* const kElidedFramesMarker;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
+};
+
+// A working implementation of the OsStackTraceGetterInterface interface.
+class OsStackTraceGetter : public OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetter() {}
+
+ std::string CurrentStackTrace(int max_depth, int skip_count) override;
+ void UponLeavingGTest() override;
+
+ private:
+#if GTEST_HAS_ABSL
+ Mutex mutex_; // Protects all internal state.
+
+ // We save the stack frame below the frame that calls user code.
+ // We do this because the address of the frame immediately below
+ // the user code changes between the call to UponLeavingGTest()
+ // and any calls to the stack trace code from within the user code.
+ void* caller_frame_ = nullptr;
+#endif // GTEST_HAS_ABSL
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
+};
+
+// Information about a Google Test trace point.
+struct TraceInfo {
+ const char* file;
+ int line;
+ std::string message;
+};
+
+// This is the default global test part result reporter used in UnitTestImpl.
+// This class should only be used by UnitTestImpl.
+class DefaultGlobalTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. Reports the test part
+ // result in the current test.
+ void ReportTestPartResult(const TestPartResult& result) override;
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
+};
+
+// This is the default per thread test part result reporter used in
+// UnitTestImpl. This class should only be used by UnitTestImpl.
+class DefaultPerThreadTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. The implementation just
+ // delegates to the current global test part result reporter of *unit_test_.
+ void ReportTestPartResult(const TestPartResult& result) override;
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
+};
+
+// The private implementation of the UnitTest class. We don't protect
+// the methods under a mutex, as this class is not accessible by a
+// user and the UnitTest class that delegates work to this class does
+// proper locking.
+class GTEST_API_ UnitTestImpl {
+ public:
+ explicit UnitTestImpl(UnitTest* parent);
+ virtual ~UnitTestImpl();
+
+ // There are two different ways to register your own TestPartResultReporter.
+ // You can register your own repoter to listen either only for test results
+ // from the current thread or for results from all threads.
+ // By default, each per-thread test result repoter just passes a new
+ // TestPartResult to the global test result reporter, which registers the
+ // test part result for the currently running test.
+
+ // Returns the global test part result reporter.
+ TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
+
+ // Sets the global test part result reporter.
+ void SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter);
+
+ // Returns the test part result reporter for the current thread.
+ TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
+
+ // Sets the test part result reporter for the current thread.
+ void SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter);
+
+ // Gets the number of successful test suites.
+ int successful_test_suite_count() const;
+
+ // Gets the number of failed test suites.
+ int failed_test_suite_count() const;
+
+ // Gets the number of all test suites.
+ int total_test_suite_count() const;
+
+ // Gets the number of all test suites that contain at least one test
+ // that should run.
+ int test_suite_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of skipped tests.
+ int skipped_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the time of the test program start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const { return start_timestamp_; }
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns true if and only if the unit test passed (i.e. all test suites
+ // passed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true if and only if the unit test failed (i.e. some test suite
+ // failed or something outside of all tests failed).
+ bool Failed() const {
+ return failed_test_suite_count() > 0 || ad_hoc_test_result()->Failed();
+ }
+
+ // Gets the i-th test suite among all the test suites. i can range from 0 to
+ // total_test_suite_count() - 1. If i is not in that range, returns NULL.
+ const TestSuite* GetTestSuite(int i) const {
+ const int index = GetElementOr(test_suite_indices_, i, -1);
+ return index < 0 ? nullptr : test_suites_[static_cast<size_t>(i)];
+ }
+
+ // Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ const TestCase* GetTestCase(int i) const { return GetTestSuite(i); }
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Gets the i-th test suite among all the test suites. i can range from 0 to
+ // total_test_suite_count() - 1. If i is not in that range, returns NULL.
+ TestSuite* GetMutableSuiteCase(int i) {
+ const int index = GetElementOr(test_suite_indices_, i, -1);
+ return index < 0 ? nullptr : test_suites_[static_cast<size_t>(index)];
+ }
+
+ // Provides access to the event listener list.
+ TestEventListeners* listeners() { return &listeners_; }
+
+ // Returns the TestResult for the test that's currently running, or
+ // the TestResult for the ad hoc test if no test is running.
+ TestResult* current_test_result();
+
+ // Returns the TestResult for the ad hoc test.
+ const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
+
+ // Sets the OS stack trace getter.
+ //
+ // Does nothing if the input and the current OS stack trace getter
+ // are the same; otherwise, deletes the old getter and makes the
+ // input the current getter.
+ void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
+
+ // Returns the current OS stack trace getter if it is not NULL;
+ // otherwise, creates an OsStackTraceGetter, makes it the current
+ // getter, and returns it.
+ OsStackTraceGetterInterface* os_stack_trace_getter();
+
+ // Returns the current OS stack trace as an std::string.
+ //
+ // The maximum number of stack frames to be included is specified by
+ // the gtest_stack_trace_depth flag. The skip_count parameter
+ // specifies the number of top frames to be skipped, which doesn't
+ // count against the number of frames to be included.
+ //
+ // For example, if Foo() calls Bar(), which in turn calls
+ // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+ // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+ std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_;
+
+ // Finds and returns a TestSuite with the given name. If one doesn't
+ // exist, creates one and returns it.
+ //
+ // Arguments:
+ //
+ // test_suite_name: name of the test suite
+ // type_param: the name of the test's type parameter, or NULL if
+ // this is not a typed or a type-parameterized test.
+ // set_up_tc: pointer to the function that sets up the test suite
+ // tear_down_tc: pointer to the function that tears down the test suite
+ TestSuite* GetTestSuite(const char* test_suite_name, const char* type_param,
+ internal::SetUpTestSuiteFunc set_up_tc,
+ internal::TearDownTestSuiteFunc tear_down_tc);
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ TestCase* GetTestCase(const char* test_case_name, const char* type_param,
+ internal::SetUpTestSuiteFunc set_up_tc,
+ internal::TearDownTestSuiteFunc tear_down_tc) {
+ return GetTestSuite(test_case_name, type_param, set_up_tc, tear_down_tc);
+ }
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Adds a TestInfo to the unit test.
+ //
+ // Arguments:
+ //
+ // set_up_tc: pointer to the function that sets up the test suite
+ // tear_down_tc: pointer to the function that tears down the test suite
+ // test_info: the TestInfo object
+ void AddTestInfo(internal::SetUpTestSuiteFunc set_up_tc,
+ internal::TearDownTestSuiteFunc tear_down_tc,
+ TestInfo* test_info) {
+#if GTEST_HAS_DEATH_TEST
+ // In order to support thread-safe death tests, we need to
+ // remember the original working directory when the test program
+ // was first invoked. We cannot do this in RUN_ALL_TESTS(), as
+ // the user may have changed the current directory before calling
+ // RUN_ALL_TESTS(). Therefore we capture the current directory in
+ // AddTestInfo(), which is called to register a TEST or TEST_F
+ // before main() is reached.
+ if (original_working_dir_.IsEmpty()) {
+ original_working_dir_.Set(FilePath::GetCurrentDir());
+ GTEST_CHECK_(!original_working_dir_.IsEmpty())
+ << "Failed to get the current working directory.";
+ }
+#endif // GTEST_HAS_DEATH_TEST
+
+ GetTestSuite(test_info->test_suite_name(), test_info->type_param(),
+ set_up_tc, tear_down_tc)
+ ->AddTestInfo(test_info);
+ }
+
+ // Returns ParameterizedTestSuiteRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ internal::ParameterizedTestSuiteRegistry& parameterized_test_registry() {
+ return parameterized_test_registry_;
+ }
+
+ std::set<std::string>* ignored_parameterized_test_suites() {
+ return &ignored_parameterized_test_suites_;
+ }
+
+ // Returns TypeParameterizedTestSuiteRegistry object used to keep track of
+ // type-parameterized tests and instantiations of them.
+ internal::TypeParameterizedTestSuiteRegistry&
+ type_parameterized_test_registry() {
+ return type_parameterized_test_registry_;
+ }
+
+ // Sets the TestSuite object for the test that's currently running.
+ void set_current_test_suite(TestSuite* a_current_test_suite) {
+ current_test_suite_ = a_current_test_suite;
+ }
+
+ // Sets the TestInfo object for the test that's currently running. If
+ // current_test_info is NULL, the assertion results will be stored in
+ // ad_hoc_test_result_.
+ void set_current_test_info(TestInfo* a_current_test_info) {
+ current_test_info_ = a_current_test_info;
+ }
+
+ // Registers all parameterized tests defined using TEST_P and
+ // INSTANTIATE_TEST_SUITE_P, creating regular tests for each test/parameter
+ // combination. This method can be called more then once; it has guards
+ // protecting from registering the tests more then once. If
+ // value-parameterized tests are disabled, RegisterParameterizedTests is
+ // present but does nothing.
+ void RegisterParameterizedTests();
+
+ // Runs all tests in this UnitTest object, prints the result, and
+ // returns true if all tests are successful. If any exception is
+ // thrown during a test, this test is considered to be failed, but
+ // the rest of the tests will still be run.
+ bool RunAllTests();
+
+ // Clears the results of all tests, except the ad hoc tests.
+ void ClearNonAdHocTestResult() {
+ ForEach(test_suites_, TestSuite::ClearTestSuiteResult);
+ }
+
+ // Clears the results of ad-hoc test assertions.
+ void ClearAdHocTestResult() {
+ ad_hoc_test_result_.Clear();
+ }
+
+ // Adds a TestProperty to the current TestResult object when invoked in a
+ // context of a test or a test suite, or to the global property set. If the
+ // result already contains a property with the same key, the value will be
+ // updated.
+ void RecordProperty(const TestProperty& test_property);
+
+ enum ReactionToSharding {
+ HONOR_SHARDING_PROTOCOL,
+ IGNORE_SHARDING_PROTOCOL
+ };
+
+ // Matches the full name of each test against the user-specified
+ // filter to decide whether the test should run, then records the
+ // result in each TestSuite and TestInfo object.
+ // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
+ // based on sharding variables in the environment.
+ // Returns the number of tests that should run.
+ int FilterTests(ReactionToSharding shard_tests);
+
+ // Prints the names of the tests matching the user-specified filter flag.
+ void ListTestsMatchingFilter();
+
+ const TestSuite* current_test_suite() const { return current_test_suite_; }
+ TestInfo* current_test_info() { return current_test_info_; }
+ const TestInfo* current_test_info() const { return current_test_info_; }
+
+ // Returns the vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*>& environments() { return environments_; }
+
+ // Getters for the per-thread Google Test trace stack.
+ std::vector<TraceInfo>& gtest_trace_stack() {
+ return *(gtest_trace_stack_.pointer());
+ }
+ const std::vector<TraceInfo>& gtest_trace_stack() const {
+ return gtest_trace_stack_.get();
+ }
+
+#if GTEST_HAS_DEATH_TEST
+ void InitDeathTestSubprocessControlInfo() {
+ internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+ }
+ // Returns a pointer to the parsed --gtest_internal_run_death_test
+ // flag, or NULL if that flag was not specified.
+ // This information is useful only in a death test child process.
+ // Must not be called before a call to InitGoogleTest.
+ const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
+ return internal_run_death_test_flag_.get();
+ }
+
+ // Returns a pointer to the current death test factory.
+ internal::DeathTestFactory* death_test_factory() {
+ return death_test_factory_.get();
+ }
+
+ void SuppressTestEventsIfInSubprocess();
+
+ friend class ReplaceDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Initializes the event listener performing XML output as specified by
+ // UnitTestOptions. Must not be called before InitGoogleTest.
+ void ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+ // Initializes the event listener for streaming test results to a socket.
+ // Must not be called before InitGoogleTest.
+ void ConfigureStreamingOutput();
+#endif
+
+ // Performs initialization dependent upon flag values obtained in
+ // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+ // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+ // this function is also called from RunAllTests. Since this function can be
+ // called more than once, it has to be idempotent.
+ void PostFlagParsingInit();
+
+ // Gets the random seed used at the start of the current test iteration.
+ int random_seed() const { return random_seed_; }
+
+ // Gets the random number generator.
+ internal::Random* random() { return &random_; }
+
+ // Shuffles all test suites, and the tests within each test suite,
+ // making sure that death tests are still run first.
+ void ShuffleTests();
+
+ // Restores the test suites and tests to their order before the first shuffle.
+ void UnshuffleTests();
+
+ // Returns the value of GTEST_FLAG(catch_exceptions) at the moment
+ // UnitTest::Run() starts.
+ bool catch_exceptions() const { return catch_exceptions_; }
+
+ private:
+ friend class ::testing::UnitTest;
+
+ // Used by UnitTest::Run() to capture the state of
+ // GTEST_FLAG(catch_exceptions) at the moment it starts.
+ void set_catch_exceptions(bool value) { catch_exceptions_ = value; }
+
+ // The UnitTest object that owns this implementation object.
+ UnitTest* const parent_;
+
+ // The working directory when the first TEST() or TEST_F() was
+ // executed.
+ internal::FilePath original_working_dir_;
+
+ // The default test part result reporters.
+ DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
+ DefaultPerThreadTestPartResultReporter
+ default_per_thread_test_part_result_reporter_;
+
+ // Points to (but doesn't own) the global test part result reporter.
+ TestPartResultReporterInterface* global_test_part_result_repoter_;
+
+ // Protects read and write access to global_test_part_result_reporter_.
+ internal::Mutex global_test_part_result_reporter_mutex_;
+
+ // Points to (but doesn't own) the per-thread test part result reporter.
+ internal::ThreadLocal<TestPartResultReporterInterface*>
+ per_thread_test_part_result_reporter_;
+
+ // The vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*> environments_;
+
+ // The vector of TestSuites in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestSuite*> test_suites_;
+
+ // Provides a level of indirection for the test suite list to allow
+ // easy shuffling and restoring the test suite order. The i-th
+ // element of this vector is the index of the i-th test suite in the
+ // shuffled order.
+ std::vector<int> test_suite_indices_;
+
+ // ParameterizedTestRegistry object used to register value-parameterized
+ // tests.
+ internal::ParameterizedTestSuiteRegistry parameterized_test_registry_;
+ internal::TypeParameterizedTestSuiteRegistry
+ type_parameterized_test_registry_;
+
+ // The set holding the name of parameterized
+ // test suites that may go uninstantiated.
+ std::set<std::string> ignored_parameterized_test_suites_;
+
+ // Indicates whether RegisterParameterizedTests() has been called already.
+ bool parameterized_tests_registered_;
+
+ // Index of the last death test suite registered. Initially -1.
+ int last_death_test_suite_;
+
+ // This points to the TestSuite for the currently running test. It
+ // changes as Google Test goes through one test suite after another.
+ // When no test is running, this is set to NULL and Google Test
+ // stores assertion results in ad_hoc_test_result_. Initially NULL.
+ TestSuite* current_test_suite_;
+
+ // This points to the TestInfo for the currently running test. It
+ // changes as Google Test goes through one test after another. When
+ // no test is running, this is set to NULL and Google Test stores
+ // assertion results in ad_hoc_test_result_. Initially NULL.
+ TestInfo* current_test_info_;
+
+ // Normally, a user only writes assertions inside a TEST or TEST_F,
+ // or inside a function called by a TEST or TEST_F. Since Google
+ // Test keeps track of which test is current running, it can
+ // associate such an assertion with the test it belongs to.
+ //
+ // If an assertion is encountered when no TEST or TEST_F is running,
+ // Google Test attributes the assertion result to an imaginary "ad hoc"
+ // test, and records the result in ad_hoc_test_result_.
+ TestResult ad_hoc_test_result_;
+
+ // The list of event listeners that can be used to track events inside
+ // Google Test.
+ TestEventListeners listeners_;
+
+ // The OS stack trace getter. Will be deleted when the UnitTest
+ // object is destructed. By default, an OsStackTraceGetter is used,
+ // but the user can set this field to use a custom getter if that is
+ // desired.
+ OsStackTraceGetterInterface* os_stack_trace_getter_;
+
+ // True if and only if PostFlagParsingInit() has been called.
+ bool post_flag_parse_init_performed_;
+
+ // The random number seed used at the beginning of the test run.
+ int random_seed_;
+
+ // Our random number generator.
+ internal::Random random_;
+
+ // The time of the test program start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp_;
+
+ // How long the test took to run, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+#if GTEST_HAS_DEATH_TEST
+ // The decomposed components of the gtest_internal_run_death_test flag,
+ // parsed when RUN_ALL_TESTS is called.
+ std::unique_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
+ std::unique_ptr<internal::DeathTestFactory> death_test_factory_;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // A per-thread stack of traces created by the SCOPED_TRACE() macro.
+ internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
+
+ // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests()
+ // starts.
+ bool catch_exceptions_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
+}; // class UnitTestImpl
+
+// Convenience function for accessing the global UnitTest
+// implementation object.
+inline UnitTestImpl* GetUnitTestImpl() {
+ return UnitTest::GetInstance()->impl();
+}
+
+#if GTEST_USES_SIMPLE_RE
+
+// Internal helper functions for implementing the simple regular
+// expression matcher.
+GTEST_API_ bool IsInSet(char ch, const char* str);
+GTEST_API_ bool IsAsciiDigit(char ch);
+GTEST_API_ bool IsAsciiPunct(char ch);
+GTEST_API_ bool IsRepeat(char ch);
+GTEST_API_ bool IsAsciiWhiteSpace(char ch);
+GTEST_API_ bool IsAsciiWordChar(char ch);
+GTEST_API_ bool IsValidEscape(char ch);
+GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
+GTEST_API_ bool ValidateRegex(const char* regex);
+GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
+GTEST_API_ bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char ch, char repeat, const char* regex, const char* str);
+GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
+
+#endif // GTEST_USES_SIMPLE_RE
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+
+#if GTEST_HAS_DEATH_TEST
+
+// Returns the message describing the last system error, regardless of the
+// platform.
+GTEST_API_ std::string GetLastErrnoDescription();
+
+// Attempts to parse a string into a positive integer pointed to by the
+// number parameter. Returns true if that is possible.
+// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
+// it here.
+template <typename Integer>
+bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
+ // Fail fast if the given string does not begin with a digit;
+ // this bypasses strtoXXX's "optional leading whitespace and plus
+ // or minus sign" semantics, which are undesirable here.
+ if (str.empty() || !IsDigit(str[0])) {
+ return false;
+ }
+ errno = 0;
+
+ char* end;
+ // BiggestConvertible is the largest integer type that system-provided
+ // string-to-number conversion routines can return.
+ using BiggestConvertible = unsigned long long; // NOLINT
+
+ const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); // NOLINT
+ const bool parse_success = *end == '\0' && errno == 0;
+
+ GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
+
+ const Integer result = static_cast<Integer>(parsed);
+ if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
+ *number = result;
+ return true;
+ }
+ return false;
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// TestResult contains some private methods that should be hidden from
+// Google Test user but are required for testing. This class allow our tests
+// to access them.
+//
+// This class is supplied only for the purpose of testing Google Test's own
+// constructs. Do not use it in user tests, either directly or indirectly.
+class TestResultAccessor {
+ public:
+ static void RecordProperty(TestResult* test_result,
+ const std::string& xml_element,
+ const TestProperty& property) {
+ test_result->RecordProperty(xml_element, property);
+ }
+
+ static void ClearTestPartResults(TestResult* test_result) {
+ test_result->ClearTestPartResults();
+ }
+
+ static const std::vector<testing::TestPartResult>& test_part_results(
+ const TestResult& test_result) {
+ return test_result.test_part_results();
+ }
+};
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Streams test results to the given port on the given host machine.
+class StreamingListener : public EmptyTestEventListener {
+ public:
+ // Abstract base class for writing strings to a socket.
+ class AbstractSocketWriter {
+ public:
+ virtual ~AbstractSocketWriter() {}
+
+ // Sends a string to the socket.
+ virtual void Send(const std::string& message) = 0;
+
+ // Closes the socket.
+ virtual void CloseConnection() {}
+
+ // Sends a string and a newline to the socket.
+ void SendLn(const std::string& message) { Send(message + "\n"); }
+ };
+
+ // Concrete class for actually writing strings to a socket.
+ class SocketWriter : public AbstractSocketWriter {
+ public:
+ SocketWriter(const std::string& host, const std::string& port)
+ : sockfd_(-1), host_name_(host), port_num_(port) {
+ MakeConnection();
+ }
+
+ ~SocketWriter() override {
+ if (sockfd_ != -1)
+ CloseConnection();
+ }
+
+ // Sends a string to the socket.
+ void Send(const std::string& message) override {
+ GTEST_CHECK_(sockfd_ != -1)
+ << "Send() can be called only when there is a connection.";
+
+ const auto len = static_cast<size_t>(message.length());
+ if (write(sockfd_, message.c_str(), len) != static_cast<ssize_t>(len)) {
+ GTEST_LOG_(WARNING)
+ << "stream_result_to: failed to stream to "
+ << host_name_ << ":" << port_num_;
+ }
+ }
+
+ private:
+ // Creates a client socket and connects to the server.
+ void MakeConnection();
+
+ // Closes the socket.
+ void CloseConnection() override {
+ GTEST_CHECK_(sockfd_ != -1)
+ << "CloseConnection() can be called only when there is a connection.";
+
+ close(sockfd_);
+ sockfd_ = -1;
+ }
+
+ int sockfd_; // socket file descriptor
+ const std::string host_name_;
+ const std::string port_num_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter);
+ }; // class SocketWriter
+
+ // Escapes '=', '&', '%', and '\n' characters in str as "%xx".
+ static std::string UrlEncode(const char* str);
+
+ StreamingListener(const std::string& host, const std::string& port)
+ : socket_writer_(new SocketWriter(host, port)) {
+ Start();
+ }
+
+ explicit StreamingListener(AbstractSocketWriter* socket_writer)
+ : socket_writer_(socket_writer) { Start(); }
+
+ void OnTestProgramStart(const UnitTest& /* unit_test */) override {
+ SendLn("event=TestProgramStart");
+ }
+
+ void OnTestProgramEnd(const UnitTest& unit_test) override {
+ // Note that Google Test current only report elapsed time for each
+ // test iteration, not for the entire test program.
+ SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed()));
+
+ // Notify the streaming server to stop.
+ socket_writer_->CloseConnection();
+ }
+
+ void OnTestIterationStart(const UnitTest& /* unit_test */,
+ int iteration) override {
+ SendLn("event=TestIterationStart&iteration=" +
+ StreamableToString(iteration));
+ }
+
+ void OnTestIterationEnd(const UnitTest& unit_test,
+ int /* iteration */) override {
+ SendLn("event=TestIterationEnd&passed=" +
+ FormatBool(unit_test.Passed()) + "&elapsed_time=" +
+ StreamableToString(unit_test.elapsed_time()) + "ms");
+ }
+
+ // Note that "event=TestCaseStart" is a wire format and has to remain
+ // "case" for compatibility
+ void OnTestCaseStart(const TestCase& test_case) override {
+ SendLn(std::string("event=TestCaseStart&name=") + test_case.name());
+ }
+
+ // Note that "event=TestCaseEnd" is a wire format and has to remain
+ // "case" for compatibility
+ void OnTestCaseEnd(const TestCase& test_case) override {
+ SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) +
+ "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) +
+ "ms");
+ }
+
+ void OnTestStart(const TestInfo& test_info) override {
+ SendLn(std::string("event=TestStart&name=") + test_info.name());
+ }
+
+ void OnTestEnd(const TestInfo& test_info) override {
+ SendLn("event=TestEnd&passed=" +
+ FormatBool((test_info.result())->Passed()) +
+ "&elapsed_time=" +
+ StreamableToString((test_info.result())->elapsed_time()) + "ms");
+ }
+
+ void OnTestPartResult(const TestPartResult& test_part_result) override {
+ const char* file_name = test_part_result.file_name();
+ if (file_name == nullptr) file_name = "";
+ SendLn("event=TestPartResult&file=" + UrlEncode(file_name) +
+ "&line=" + StreamableToString(test_part_result.line_number()) +
+ "&message=" + UrlEncode(test_part_result.message()));
+ }
+
+ private:
+ // Sends the given message and a newline to the socket.
+ void SendLn(const std::string& message) { socket_writer_->SendLn(message); }
+
+ // Called at the start of streaming to notify the receiver what
+ // protocol we are using.
+ void Start() { SendLn("gtest_streaming_protocol_version=1.0"); }
+
+ std::string FormatBool(bool value) { return value ? "1" : "0"; }
+
+ const std::unique_ptr<AbstractSocketWriter> socket_writer_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener);
+}; // class StreamingListener
+
+#endif // GTEST_CAN_STREAM_RESULTS_
+
+} // namespace internal
+} // namespace testing
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+#endif // GOOGLETEST_SRC_GTEST_INTERNAL_INL_H_
+
+#if GTEST_OS_WINDOWS
+# define vsnprintf _vsnprintf
+#endif // GTEST_OS_WINDOWS
+
+#if GTEST_OS_MAC
+#ifndef GTEST_OS_IOS
+#include <crt_externs.h>
+#endif
+#endif
+
+#if GTEST_HAS_ABSL
+#include "absl/debugging/failure_signal_handler.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/debugging/symbolize.h"
+#include "absl/strings/str_cat.h"
+#endif // GTEST_HAS_ABSL
+
+namespace testing {
+
+using internal::CountIf;
+using internal::ForEach;
+using internal::GetElementOr;
+using internal::Shuffle;
+
+// Constants.
+
+// A test whose test suite name or test name matches this filter is
+// disabled and not run.
+static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
+
+// A test suite whose name matches this filter is considered a death
+// test suite and will be run before test suites whose name doesn't
+// match this filter.
+static const char kDeathTestSuiteFilter[] = "*DeathTest:*DeathTest/*";
+
+// A test filter that matches everything.
+static const char kUniversalFilter[] = "*";
+
+// The default output format.
+static const char kDefaultOutputFormat[] = "xml";
+// The default output file.
+static const char kDefaultOutputFile[] = "test_detail";
+
+// The environment variable name for the test shard index.
+static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+// The environment variable name for the total number of test shards.
+static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard status file.
+static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
+
+namespace internal {
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+const char kStackTraceMarker[] = "\nStack trace:\n";
+
+// g_help_flag is true if and only if the --help flag or an equivalent form
+// is specified on the command line.
+bool g_help_flag = false;
+
+// Utilty function to Open File for Writing
+static FILE* OpenFileForWriting(const std::string& output_file) {
+ FILE* fileout = nullptr;
+ FilePath output_file_path(output_file);
+ FilePath output_dir(output_file_path.RemoveFileName());
+
+ if (output_dir.CreateDirectoriesRecursively()) {
+ fileout = posix::FOpen(output_file.c_str(), "w");
+ }
+ if (fileout == nullptr) {
+ GTEST_LOG_(FATAL) << "Unable to open file \"" << output_file << "\"";
+ }
+ return fileout;
+}
+
+} // namespace internal
+
+// Bazel passes in the argument to '--test_filter' via the TESTBRIDGE_TEST_ONLY
+// environment variable.
+static const char* GetDefaultFilter() {
+ const char* const testbridge_test_only =
+ internal::posix::GetEnv("TESTBRIDGE_TEST_ONLY");
+ if (testbridge_test_only != nullptr) {
+ return testbridge_test_only;
+ }
+ return kUniversalFilter;
+}
+
+// Bazel passes in the argument to '--test_runner_fail_fast' via the
+// TESTBRIDGE_TEST_RUNNER_FAIL_FAST environment variable.
+static bool GetDefaultFailFast() {
+ const char* const testbridge_test_runner_fail_fast =
+ internal::posix::GetEnv("TESTBRIDGE_TEST_RUNNER_FAIL_FAST");
+ if (testbridge_test_runner_fail_fast != nullptr) {
+ return strcmp(testbridge_test_runner_fail_fast, "1") == 0;
+ }
+ return false;
+}
+
+GTEST_DEFINE_bool_(
+ fail_fast, internal::BoolFromGTestEnv("fail_fast", GetDefaultFailFast()),
+ "True if and only if a test failure should stop further test execution.");
+
+GTEST_DEFINE_bool_(
+ also_run_disabled_tests,
+ internal::BoolFromGTestEnv("also_run_disabled_tests", false),
+ "Run disabled tests too, in addition to the tests normally being run.");
+
+GTEST_DEFINE_bool_(
+ break_on_failure, internal::BoolFromGTestEnv("break_on_failure", false),
+ "True if and only if a failed assertion should be a debugger "
+ "break-point.");
+
+GTEST_DEFINE_bool_(catch_exceptions,
+ internal::BoolFromGTestEnv("catch_exceptions", true),
+ "True if and only if " GTEST_NAME_
+ " should catch exceptions and treat them as test failures.");
+
+GTEST_DEFINE_string_(
+ color,
+ internal::StringFromGTestEnv("color", "auto"),
+ "Whether to use colors in the output. Valid values: yes, no, "
+ "and auto. 'auto' means to use colors if the output is "
+ "being sent to a terminal and the TERM environment variable "
+ "is set to a terminal type that supports colors.");
+
+GTEST_DEFINE_string_(
+ filter,
+ internal::StringFromGTestEnv("filter", GetDefaultFilter()),
+ "A colon-separated list of glob (not regex) patterns "
+ "for filtering the tests to run, optionally followed by a "
+ "'-' and a : separated list of negative patterns (tests to "
+ "exclude). A test is run if it matches one of the positive "
+ "patterns and does not match any of the negative patterns.");
+
+GTEST_DEFINE_bool_(
+ install_failure_signal_handler,
+ internal::BoolFromGTestEnv("install_failure_signal_handler", false),
+ "If true and supported on the current platform, " GTEST_NAME_ " should "
+ "install a signal handler that dumps debugging information when fatal "
+ "signals are raised.");
+
+GTEST_DEFINE_bool_(list_tests, false,
+ "List all tests without running them.");
+
+// The net priority order after flag processing is thus:
+// --gtest_output command line flag
+// GTEST_OUTPUT environment variable
+// XML_OUTPUT_FILE environment variable
+// ''
+GTEST_DEFINE_string_(
+ output,
+ internal::StringFromGTestEnv("output",
+ internal::OutputFlagAlsoCheckEnvVar().c_str()),
+ "A format (defaults to \"xml\" but can be specified to be \"json\"), "
+ "optionally followed by a colon and an output file name or directory. "
+ "A directory is indicated by a trailing pathname separator. "
+ "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
+ "If a directory is specified, output files will be created "
+ "within that directory, with file-names based on the test "
+ "executable's name and, if necessary, made unique by adding "
+ "digits.");
+
+GTEST_DEFINE_bool_(
+ brief, internal::BoolFromGTestEnv("brief", false),
+ "True if only test failures should be displayed in text output.");
+
+GTEST_DEFINE_bool_(print_time, internal::BoolFromGTestEnv("print_time", true),
+ "True if and only if " GTEST_NAME_
+ " should display elapsed time in text output.");
+
+GTEST_DEFINE_bool_(print_utf8, internal::BoolFromGTestEnv("print_utf8", true),
+ "True if and only if " GTEST_NAME_
+ " prints UTF8 characters as text.");
+
+GTEST_DEFINE_int32_(
+ random_seed,
+ internal::Int32FromGTestEnv("random_seed", 0),
+ "Random number seed to use when shuffling test orders. Must be in range "
+ "[1, 99999], or 0 to use a seed based on the current time.");
+
+GTEST_DEFINE_int32_(
+ repeat,
+ internal::Int32FromGTestEnv("repeat", 1),
+ "How many times to repeat each test. Specify a negative number "
+ "for repeating forever. Useful for shaking out flaky tests.");
+
+GTEST_DEFINE_bool_(show_internal_stack_frames, false,
+ "True if and only if " GTEST_NAME_
+ " should include internal stack frames when "
+ "printing test failure stack traces.");
+
+GTEST_DEFINE_bool_(shuffle, internal::BoolFromGTestEnv("shuffle", false),
+ "True if and only if " GTEST_NAME_
+ " should randomize tests' order on every run.");
+
+GTEST_DEFINE_int32_(
+ stack_trace_depth,
+ internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
+ "The maximum number of stack frames to print when an "
+ "assertion fails. The valid range is 0 through 100, inclusive.");
+
+GTEST_DEFINE_string_(
+ stream_result_to,
+ internal::StringFromGTestEnv("stream_result_to", ""),
+ "This flag specifies the host name and the port number on which to stream "
+ "test results. Example: \"localhost:555\". The flag is effective only on "
+ "Linux.");
+
+GTEST_DEFINE_bool_(
+ throw_on_failure,
+ internal::BoolFromGTestEnv("throw_on_failure", false),
+ "When this flag is specified, a failed assertion will throw an exception "
+ "if exceptions are enabled or exit the program with a non-zero code "
+ "otherwise. For use with an external test framework.");
+
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+GTEST_DEFINE_string_(
+ flagfile,
+ internal::StringFromGTestEnv("flagfile", ""),
+ "This flag specifies the flagfile to read command-line flags from.");
+#endif // GTEST_USE_OWN_FLAGFILE_FLAG_
+
+namespace internal {
+
+// Generates a random number from [0, range), using a Linear
+// Congruential Generator (LCG). Crashes if 'range' is 0 or greater
+// than kMaxRange.
+uint32_t Random::Generate(uint32_t range) {
+ // These constants are the same as are used in glibc's rand(3).
+ // Use wider types than necessary to prevent unsigned overflow diagnostics.
+ state_ = static_cast<uint32_t>(1103515245ULL*state_ + 12345U) % kMaxRange;
+
+ GTEST_CHECK_(range > 0)
+ << "Cannot generate a number in the range [0, 0).";
+ GTEST_CHECK_(range <= kMaxRange)
+ << "Generation of a number in [0, " << range << ") was requested, "
+ << "but this can only generate numbers in [0, " << kMaxRange << ").";
+
+ // Converting via modulus introduces a bit of downward bias, but
+ // it's simple, and a linear congruential generator isn't too good
+ // to begin with.
+ return state_ % range;
+}
+
+// GTestIsInitialized() returns true if and only if the user has initialized
+// Google Test. Useful for catching the user mistake of not initializing
+// Google Test before calling RUN_ALL_TESTS().
+static bool GTestIsInitialized() { return GetArgvs().size() > 0; }
+
+// Iterates over a vector of TestSuites, keeping a running sum of the
+// results of calling a given int-returning method on each.
+// Returns the sum.
+static int SumOverTestSuiteList(const std::vector<TestSuite*>& case_list,
+ int (TestSuite::*method)() const) {
+ int sum = 0;
+ for (size_t i = 0; i < case_list.size(); i++) {
+ sum += (case_list[i]->*method)();
+ }
+ return sum;
+}
+
+// Returns true if and only if the test suite passed.
+static bool TestSuitePassed(const TestSuite* test_suite) {
+ return test_suite->should_run() && test_suite->Passed();
+}
+
+// Returns true if and only if the test suite failed.
+static bool TestSuiteFailed(const TestSuite* test_suite) {
+ return test_suite->should_run() && test_suite->Failed();
+}
+
+// Returns true if and only if test_suite contains at least one test that
+// should run.
+static bool ShouldRunTestSuite(const TestSuite* test_suite) {
+ return test_suite->should_run();
+}
+
+// AssertHelper constructor.
+AssertHelper::AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message)
+ : data_(new AssertHelperData(type, file, line, message)) {
+}
+
+AssertHelper::~AssertHelper() {
+ delete data_;
+}
+
+// Message assignment, for assertion streaming support.
+void AssertHelper::operator=(const Message& message) const {
+ UnitTest::GetInstance()->
+ AddTestPartResult(data_->type, data_->file, data_->line,
+ AppendUserMessage(data_->message, message),
+ UnitTest::GetInstance()->impl()
+ ->CurrentOsStackTraceExceptTop(1)
+ // Skips the stack frame for this function itself.
+ ); // NOLINT
+}
+
+namespace {
+
+// When TEST_P is found without a matching INSTANTIATE_TEST_SUITE_P
+// to creates test cases for it, a syntetic test case is
+// inserted to report ether an error or a log message.
+//
+// This configuration bit will likely be removed at some point.
+constexpr bool kErrorOnUninstantiatedParameterizedTest = true;
+constexpr bool kErrorOnUninstantiatedTypeParameterizedTest = true;
+
+// A test that fails at a given file/line location with a given message.
+class FailureTest : public Test {
+ public:
+ explicit FailureTest(const CodeLocation& loc, std::string error_message,
+ bool as_error)
+ : loc_(loc),
+ error_message_(std::move(error_message)),
+ as_error_(as_error) {}
+
+ void TestBody() override {
+ if (as_error_) {
+ AssertHelper(TestPartResult::kNonFatalFailure, loc_.file.c_str(),
+ loc_.line, "") = Message() << error_message_;
+ } else {
+ std::cout << error_message_ << std::endl;
+ }
+ }
+
+ private:
+ const CodeLocation loc_;
+ const std::string error_message_;
+ const bool as_error_;
+};
+
+
+} // namespace
+
+std::set<std::string>* GetIgnoredParameterizedTestSuites() {
+ return UnitTest::GetInstance()->impl()->ignored_parameterized_test_suites();
+}
+
+// Add a given test_suit to the list of them allow to go un-instantiated.
+MarkAsIgnored::MarkAsIgnored(const char* test_suite) {
+ GetIgnoredParameterizedTestSuites()->insert(test_suite);
+}
+
+// If this parameterized test suite has no instantiations (and that
+// has not been marked as okay), emit a test case reporting that.
+void InsertSyntheticTestCase(const std::string& name, CodeLocation location,
+ bool has_test_p) {
+ const auto& ignored = *GetIgnoredParameterizedTestSuites();
+ if (ignored.find(name) != ignored.end()) return;
+
+ const char kMissingInstantiation[] = //
+ " is defined via TEST_P, but never instantiated. None of the test cases "
+ "will run. Either no INSTANTIATE_TEST_SUITE_P is provided or the only "
+ "ones provided expand to nothing."
+ "\n\n"
+ "Ideally, TEST_P definitions should only ever be included as part of "
+ "binaries that intend to use them. (As opposed to, for example, being "
+ "placed in a library that may be linked in to get other utilities.)";
+
+ const char kMissingTestCase[] = //
+ " is instantiated via INSTANTIATE_TEST_SUITE_P, but no tests are "
+ "defined via TEST_P . No test cases will run."
+ "\n\n"
+ "Ideally, INSTANTIATE_TEST_SUITE_P should only ever be invoked from "
+ "code that always depend on code that provides TEST_P. Failing to do "
+ "so is often an indication of dead code, e.g. the last TEST_P was "
+ "removed but the rest got left behind.";
+
+ std::string message =
+ "Parameterized test suite " + name +
+ (has_test_p ? kMissingInstantiation : kMissingTestCase) +
+ "\n\n"
+ "To suppress this error for this test suite, insert the following line "
+ "(in a non-header) in the namespace it is defined in:"
+ "\n\n"
+ "GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(" + name + ");";
+
+ std::string full_name = "UninstantiatedParameterizedTestSuite<" + name + ">";
+ RegisterTest( //
+ "GoogleTestVerification", full_name.c_str(),
+ nullptr, // No type parameter.
+ nullptr, // No value parameter.
+ location.file.c_str(), location.line, [message, location] {
+ return new FailureTest(location, message,
+ kErrorOnUninstantiatedParameterizedTest);
+ });
+}
+
+void RegisterTypeParameterizedTestSuite(const char* test_suite_name,
+ CodeLocation code_location) {
+ GetUnitTestImpl()->type_parameterized_test_registry().RegisterTestSuite(
+ test_suite_name, code_location);
+}
+
+void RegisterTypeParameterizedTestSuiteInstantiation(const char* case_name) {
+ GetUnitTestImpl()
+ ->type_parameterized_test_registry()
+ .RegisterInstantiation(case_name);
+}
+
+void TypeParameterizedTestSuiteRegistry::RegisterTestSuite(
+ const char* test_suite_name, CodeLocation code_location) {
+ suites_.emplace(std::string(test_suite_name),
+ TypeParameterizedTestSuiteInfo(code_location));
+}
+
+void TypeParameterizedTestSuiteRegistry::RegisterInstantiation(
+ const char* test_suite_name) {
+ auto it = suites_.find(std::string(test_suite_name));
+ if (it != suites_.end()) {
+ it->second.instantiated = true;
+ } else {
+ GTEST_LOG_(ERROR) << "Unknown type parameterized test suit '"
+ << test_suite_name << "'";
+ }
+}
+
+void TypeParameterizedTestSuiteRegistry::CheckForInstantiations() {
+ const auto& ignored = *GetIgnoredParameterizedTestSuites();
+ for (const auto& testcase : suites_) {
+ if (testcase.second.instantiated) continue;
+ if (ignored.find(testcase.first) != ignored.end()) continue;
+
+ std::string message =
+ "Type parameterized test suite " + testcase.first +
+ " is defined via REGISTER_TYPED_TEST_SUITE_P, but never instantiated "
+ "via INSTANTIATE_TYPED_TEST_SUITE_P. None of the test cases will run."
+ "\n\n"
+ "Ideally, TYPED_TEST_P definitions should only ever be included as "
+ "part of binaries that intend to use them. (As opposed to, for "
+ "example, being placed in a library that may be linked in to get other "
+ "utilities.)"
+ "\n\n"
+ "To suppress this error for this test suite, insert the following line "
+ "(in a non-header) in the namespace it is defined in:"
+ "\n\n"
+ "GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(" +
+ testcase.first + ");";
+
+ std::string full_name =
+ "UninstantiatedTypeParameterizedTestSuite<" + testcase.first + ">";
+ RegisterTest( //
+ "GoogleTestVerification", full_name.c_str(),
+ nullptr, // No type parameter.
+ nullptr, // No value parameter.
+ testcase.second.code_location.file.c_str(),
+ testcase.second.code_location.line, [message, testcase] {
+ return new FailureTest(testcase.second.code_location, message,
+ kErrorOnUninstantiatedTypeParameterizedTest);
+ });
+ }
+}
+
+// A copy of all command line arguments. Set by InitGoogleTest().
+static ::std::vector<std::string> g_argvs;
+
+::std::vector<std::string> GetArgvs() {
+#if defined(GTEST_CUSTOM_GET_ARGVS_)
+ // GTEST_CUSTOM_GET_ARGVS_() may return a container of std::string or
+ // ::string. This code converts it to the appropriate type.
+ const auto& custom = GTEST_CUSTOM_GET_ARGVS_();
+ return ::std::vector<std::string>(custom.begin(), custom.end());
+#else // defined(GTEST_CUSTOM_GET_ARGVS_)
+ return g_argvs;
+#endif // defined(GTEST_CUSTOM_GET_ARGVS_)
+}
+
+// Returns the current application's name, removing directory path if that
+// is present.
+FilePath GetCurrentExecutableName() {
+ FilePath result;
+
+#if GTEST_OS_WINDOWS || GTEST_OS_OS2
+ result.Set(FilePath(GetArgvs()[0]).RemoveExtension("exe"));
+#else
+ result.Set(FilePath(GetArgvs()[0]));
+#endif // GTEST_OS_WINDOWS
+
+ return result.RemoveDirectoryName();
+}
+
+// Functions for processing the gtest_output flag.
+
+// Returns the output format, or "" for normal printed output.
+std::string UnitTestOptions::GetOutputFormat() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+ const char* const colon = strchr(gtest_output_flag, ':');
+ return (colon == nullptr)
+ ? std::string(gtest_output_flag)
+ : std::string(gtest_output_flag,
+ static_cast<size_t>(colon - gtest_output_flag));
+}
+
+// Returns the name of the requested output file, or the default if none
+// was explicitly specified.
+std::string UnitTestOptions::GetAbsolutePathToOutputFile() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+
+ std::string format = GetOutputFormat();
+ if (format.empty())
+ format = std::string(kDefaultOutputFormat);
+
+ const char* const colon = strchr(gtest_output_flag, ':');
+ if (colon == nullptr)
+ return internal::FilePath::MakeFileName(
+ internal::FilePath(
+ UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(kDefaultOutputFile), 0,
+ format.c_str()).string();
+
+ internal::FilePath output_name(colon + 1);
+ if (!output_name.IsAbsolutePath())
+ output_name = internal::FilePath::ConcatPaths(
+ internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(colon + 1));
+
+ if (!output_name.IsDirectory())
+ return output_name.string();
+
+ internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
+ output_name, internal::GetCurrentExecutableName(),
+ GetOutputFormat().c_str()));
+ return result.string();
+}
+
+// Returns true if and only if the wildcard pattern matches the string. Each
+// pattern consists of regular characters, single-character wildcards (?), and
+// multi-character wildcards (*).
+//
+// This function implements a linear-time string globbing algorithm based on
+// https://research.swtch.com/glob.
+static bool PatternMatchesString(const std::string& name_str,
+ const char* pattern, const char* pattern_end) {
+ const char* name = name_str.c_str();
+ const char* const name_begin = name;
+ const char* const name_end = name + name_str.size();
+
+ const char* pattern_next = pattern;
+ const char* name_next = name;
+
+ while (pattern < pattern_end || name < name_end) {
+ if (pattern < pattern_end) {
+ switch (*pattern) {
+ default: // Match an ordinary character.
+ if (name < name_end && *name == *pattern) {
+ ++pattern;
+ ++name;
+ continue;
+ }
+ break;
+ case '?': // Match any single character.
+ if (name < name_end) {
+ ++pattern;
+ ++name;
+ continue;
+ }
+ break;
+ case '*':
+ // Match zero or more characters. Start by skipping over the wildcard
+ // and matching zero characters from name. If that fails, restart and
+ // match one more character than the last attempt.
+ pattern_next = pattern;
+ name_next = name + 1;
+ ++pattern;
+ continue;
+ }
+ }
+ // Failed to match a character. Restart if possible.
+ if (name_begin < name_next && name_next <= name_end) {
+ pattern = pattern_next;
+ name = name_next;
+ continue;
+ }
+ return false;
+ }
+ return true;
+}
+
+bool UnitTestOptions::MatchesFilter(const std::string& name_str,
+ const char* filter) {
+ // The filter is a list of patterns separated by colons (:).
+ const char* pattern = filter;
+ while (true) {
+ // Find the bounds of this pattern.
+ const char* const next_sep = strchr(pattern, ':');
+ const char* const pattern_end =
+ next_sep != nullptr ? next_sep : pattern + strlen(pattern);
+
+ // Check if this pattern matches name_str.
+ if (PatternMatchesString(name_str, pattern, pattern_end)) {
+ return true;
+ }
+
+ // Give up on this pattern. However, if we found a pattern separator (:),
+ // advance to the next pattern (skipping over the separator) and restart.
+ if (next_sep == nullptr) {
+ return false;
+ }
+ pattern = next_sep + 1;
+ }
+#if defined(__EDG__)
+#pragma diag_suppress code_is_unreachable
+#endif
+ return true;
+}
+
+// Returns true if and only if the user-specified filter matches the test
+// suite name and the test name.
+bool UnitTestOptions::FilterMatchesTest(const std::string& test_suite_name,
+ const std::string& test_name) {
+ const std::string& full_name = test_suite_name + "." + test_name.c_str();
+
+ // Split --gtest_filter at '-', if there is one, to separate into
+ // positive filter and negative filter portions
+ const char* const p = GTEST_FLAG(filter).c_str();
+ const char* const dash = strchr(p, '-');
+ std::string positive;
+ std::string negative;
+ if (dash == nullptr) {
+ positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter
+ negative = "";
+ } else {
+ positive = std::string(p, dash); // Everything up to the dash
+ negative = std::string(dash + 1); // Everything after the dash
+ if (positive.empty()) {
+ // Treat '-test1' as the same as '*-test1'
+ positive = kUniversalFilter;
+ }
+ }
+
+ // A filter is a colon-separated list of patterns. It matches a
+ // test if any pattern in it matches the test.
+ return (MatchesFilter(full_name, positive.c_str()) &&
+ !MatchesFilter(full_name, negative.c_str()));
+}
+
+#if GTEST_HAS_SEH
+// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+// This function is useful as an __except condition.
+int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
+ // Google Test should handle a SEH exception if:
+ // 1. the user wants it to, AND
+ // 2. this is not a breakpoint exception, AND
+ // 3. this is not a C++ exception (VC++ implements them via SEH,
+ // apparently).
+ //
+ // SEH exception code for C++ exceptions.
+ // (see http://support.microsoft.com/kb/185294 for more information).
+ const DWORD kCxxExceptionCode = 0xe06d7363;
+
+ bool should_handle = true;
+
+ if (!GTEST_FLAG(catch_exceptions))
+ should_handle = false;
+ else if (exception_code == EXCEPTION_BREAKPOINT)
+ should_handle = false;
+ else if (exception_code == kCxxExceptionCode)
+ should_handle = false;
+
+ return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
+}
+#endif // GTEST_HAS_SEH
+
+} // namespace internal
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results. Intercepts only failures from the current thread.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ TestPartResultArray* result)
+ : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
+ result_(result) {
+ Init();
+}
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ InterceptMode intercept_mode, TestPartResultArray* result)
+ : intercept_mode_(intercept_mode),
+ result_(result) {
+ Init();
+}
+
+void ScopedFakeTestPartResultReporter::Init() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ old_reporter_ = impl->GetGlobalTestPartResultReporter();
+ impl->SetGlobalTestPartResultReporter(this);
+ } else {
+ old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
+ impl->SetTestPartResultReporterForCurrentThread(this);
+ }
+}
+
+// The d'tor restores the test part result reporter used by Google Test
+// before.
+ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ impl->SetGlobalTestPartResultReporter(old_reporter_);
+ } else {
+ impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
+ }
+}
+
+// Increments the test part result count and remembers the result.
+// This method is from the TestPartResultReporterInterface interface.
+void ScopedFakeTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ result_->Append(result);
+}
+
+namespace internal {
+
+// Returns the type ID of ::testing::Test. We should always call this
+// instead of GetTypeId< ::testing::Test>() to get the type ID of
+// testing::Test. This is to work around a suspected linker bug when
+// using Google Test as a framework on Mac OS X. The bug causes
+// GetTypeId< ::testing::Test>() to return different values depending
+// on whether the call is from the Google Test framework itself or
+// from user test code. GetTestTypeId() is guaranteed to always
+// return the same value, as it always calls GetTypeId<>() from the
+// gtest.cc, which is within the Google Test framework.
+TypeId GetTestTypeId() {
+ return GetTypeId<Test>();
+}
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
+
+// This predicate-formatter checks that 'results' contains a test part
+// failure of the given type and that the failure message contains the
+// given substring.
+static AssertionResult HasOneFailure(const char* /* results_expr */,
+ const char* /* type_expr */,
+ const char* /* substr_expr */,
+ const TestPartResultArray& results,
+ TestPartResult::Type type,
+ const std::string& substr) {
+ const std::string expected(type == TestPartResult::kFatalFailure ?
+ "1 fatal failure" :
+ "1 non-fatal failure");
+ Message msg;
+ if (results.size() != 1) {
+ msg << "Expected: " << expected << "\n"
+ << " Actual: " << results.size() << " failures";
+ for (int i = 0; i < results.size(); i++) {
+ msg << "\n" << results.GetTestPartResult(i);
+ }
+ return AssertionFailure() << msg;
+ }
+
+ const TestPartResult& r = results.GetTestPartResult(0);
+ if (r.type() != type) {
+ return AssertionFailure() << "Expected: " << expected << "\n"
+ << " Actual:\n"
+ << r;
+ }
+
+ if (strstr(r.message(), substr.c_str()) == nullptr) {
+ return AssertionFailure() << "Expected: " << expected << " containing \""
+ << substr << "\"\n"
+ << " Actual:\n"
+ << r;
+ }
+
+ return AssertionSuccess();
+}
+
+// The constructor of SingleFailureChecker remembers where to look up
+// test part results, what type of failure we expect, and what
+// substring the failure message should contain.
+SingleFailureChecker::SingleFailureChecker(const TestPartResultArray* results,
+ TestPartResult::Type type,
+ const std::string& substr)
+ : results_(results), type_(type), substr_(substr) {}
+
+// The destructor of SingleFailureChecker verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+SingleFailureChecker::~SingleFailureChecker() {
+ EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_);
+}
+
+DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->current_test_result()->AddTestPartResult(result);
+ unit_test_->listeners()->repeater()->OnTestPartResult(result);
+}
+
+DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
+}
+
+// Returns the global test part result reporter.
+TestPartResultReporterInterface*
+UnitTestImpl::GetGlobalTestPartResultReporter() {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ return global_test_part_result_repoter_;
+}
+
+// Sets the global test part result reporter.
+void UnitTestImpl::SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter) {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ global_test_part_result_repoter_ = reporter;
+}
+
+// Returns the test part result reporter for the current thread.
+TestPartResultReporterInterface*
+UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
+ return per_thread_test_part_result_reporter_.get();
+}
+
+// Sets the test part result reporter for the current thread.
+void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter) {
+ per_thread_test_part_result_reporter_.set(reporter);
+}
+
+// Gets the number of successful test suites.
+int UnitTestImpl::successful_test_suite_count() const {
+ return CountIf(test_suites_, TestSuitePassed);
+}
+
+// Gets the number of failed test suites.
+int UnitTestImpl::failed_test_suite_count() const {
+ return CountIf(test_suites_, TestSuiteFailed);
+}
+
+// Gets the number of all test suites.
+int UnitTestImpl::total_test_suite_count() const {
+ return static_cast<int>(test_suites_.size());
+}
+
+// Gets the number of all test suites that contain at least one test
+// that should run.
+int UnitTestImpl::test_suite_to_run_count() const {
+ return CountIf(test_suites_, ShouldRunTestSuite);
+}
+
+// Gets the number of successful tests.
+int UnitTestImpl::successful_test_count() const {
+ return SumOverTestSuiteList(test_suites_, &TestSuite::successful_test_count);
+}
+
+// Gets the number of skipped tests.
+int UnitTestImpl::skipped_test_count() const {
+ return SumOverTestSuiteList(test_suites_, &TestSuite::skipped_test_count);
+}
+
+// Gets the number of failed tests.
+int UnitTestImpl::failed_test_count() const {
+ return SumOverTestSuiteList(test_suites_, &TestSuite::failed_test_count);
+}
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int UnitTestImpl::reportable_disabled_test_count() const {
+ return SumOverTestSuiteList(test_suites_,
+ &TestSuite::reportable_disabled_test_count);
+}
+
+// Gets the number of disabled tests.
+int UnitTestImpl::disabled_test_count() const {
+ return SumOverTestSuiteList(test_suites_, &TestSuite::disabled_test_count);
+}
+
+// Gets the number of tests to be printed in the XML report.
+int UnitTestImpl::reportable_test_count() const {
+ return SumOverTestSuiteList(test_suites_, &TestSuite::reportable_test_count);
+}
+
+// Gets the number of all tests.
+int UnitTestImpl::total_test_count() const {
+ return SumOverTestSuiteList(test_suites_, &TestSuite::total_test_count);
+}
+
+// Gets the number of tests that should run.
+int UnitTestImpl::test_to_run_count() const {
+ return SumOverTestSuiteList(test_suites_, &TestSuite::test_to_run_count);
+}
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
+ return os_stack_trace_getter()->CurrentStackTrace(
+ static_cast<int>(GTEST_FLAG(stack_trace_depth)),
+ skip_count + 1
+ // Skips the user-specified number of frames plus this function
+ // itself.
+ ); // NOLINT
+}
+
+// A helper class for measuring elapsed times.
+class Timer {
+ public:
+ Timer() : start_(std::chrono::steady_clock::now()) {}
+
+ // Return time elapsed in milliseconds since the timer was created.
+ TimeInMillis Elapsed() {
+ return std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::steady_clock::now() - start_)
+ .count();
+ }
+
+ private:
+ std::chrono::steady_clock::time_point start_;
+};
+
+// Returns a timestamp as milliseconds since the epoch. Note this time may jump
+// around subject to adjustments by the system, to measure elapsed time use
+// Timer instead.
+TimeInMillis GetTimeInMillis() {
+ return std::chrono::duration_cast<std::chrono::milliseconds>(
+ std::chrono::system_clock::now() -
+ std::chrono::system_clock::from_time_t(0))
+ .count();
+}
+
+// Utilities
+
+// class String.
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Creates a UTF-16 wide string from the given ANSI string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the wide string, or NULL if the
+// input is NULL.
+LPCWSTR String::AnsiToUtf16(const char* ansi) {
+ if (!ansi) return nullptr;
+ const int length = strlen(ansi);
+ const int unicode_length =
+ MultiByteToWideChar(CP_ACP, 0, ansi, length, nullptr, 0);
+ WCHAR* unicode = new WCHAR[unicode_length + 1];
+ MultiByteToWideChar(CP_ACP, 0, ansi, length,
+ unicode, unicode_length);
+ unicode[unicode_length] = 0;
+ return unicode;
+}
+
+// Creates an ANSI string from the given wide string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the ANSI string, or NULL if the
+// input is NULL.
+const char* String::Utf16ToAnsi(LPCWSTR utf16_str) {
+ if (!utf16_str) return nullptr;
+ const int ansi_length = WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, nullptr,
+ 0, nullptr, nullptr);
+ char* ansi = new char[ansi_length + 1];
+ WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, ansi, ansi_length, nullptr,
+ nullptr);
+ ansi[ansi_length] = 0;
+ return ansi;
+}
+
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Compares two C strings. Returns true if and only if they have the same
+// content.
+//
+// Unlike strcmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CStringEquals(const char * lhs, const char * rhs) {
+ if (lhs == nullptr) return rhs == nullptr;
+
+ if (rhs == nullptr) return false;
+
+ return strcmp(lhs, rhs) == 0;
+}
+
+#if GTEST_HAS_STD_WSTRING
+
+// Converts an array of wide chars to a narrow string using the UTF-8
+// encoding, and streams the result to the given Message object.
+static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,
+ Message* msg) {
+ for (size_t i = 0; i != length; ) { // NOLINT
+ if (wstr[i] != L'\0') {
+ *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));
+ while (i != length && wstr[i] != L'\0')
+ i++;
+ } else {
+ *msg << '\0';
+ i++;
+ }
+ }
+}
+
+#endif // GTEST_HAS_STD_WSTRING
+
+void SplitString(const ::std::string& str, char delimiter,
+ ::std::vector< ::std::string>* dest) {
+ ::std::vector< ::std::string> parsed;
+ ::std::string::size_type pos = 0;
+ while (::testing::internal::AlwaysTrue()) {
+ const ::std::string::size_type colon = str.find(delimiter, pos);
+ if (colon == ::std::string::npos) {
+ parsed.push_back(str.substr(pos));
+ break;
+ } else {
+ parsed.push_back(str.substr(pos, colon - pos));
+ pos = colon + 1;
+ }
+ }
+ dest->swap(parsed);
+}
+
+} // namespace internal
+
+// Constructs an empty Message.
+// We allocate the stringstream separately because otherwise each use of
+// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
+// stack frame leading to huge stack frames in some cases; gcc does not reuse
+// the stack space.
+Message::Message() : ss_(new ::std::stringstream) {
+ // By default, we want there to be enough precision when printing
+ // a double to a Message.
+ *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);
+}
+
+// These two overloads allow streaming a wide C string to a Message
+// using the UTF-8 encoding.
+Message& Message::operator <<(const wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+}
+Message& Message::operator <<(wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+}
+
+#if GTEST_HAS_STD_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::std::wstring& wstr) {
+ internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+ return *this;
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+// Gets the text streamed to this object so far as an std::string.
+// Each '\0' character in the buffer is replaced with "\\0".
+std::string Message::GetString() const {
+ return internal::StringStreamToString(ss_.get());
+}
+
+// AssertionResult constructors.
+// Used in EXPECT_TRUE/FALSE(assertion_result).
+AssertionResult::AssertionResult(const AssertionResult& other)
+ : success_(other.success_),
+ message_(other.message_.get() != nullptr
+ ? new ::std::string(*other.message_)
+ : static_cast< ::std::string*>(nullptr)) {}
+
+// Swaps two AssertionResults.
+void AssertionResult::swap(AssertionResult& other) {
+ using std::swap;
+ swap(success_, other.success_);
+ swap(message_, other.message_);
+}
+
+// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+AssertionResult AssertionResult::operator!() const {
+ AssertionResult negation(!success_);
+ if (message_.get() != nullptr) negation << *message_;
+ return negation;
+}
+
+// Makes a successful assertion result.
+AssertionResult AssertionSuccess() {
+ return AssertionResult(true);
+}
+
+// Makes a failed assertion result.
+AssertionResult AssertionFailure() {
+ return AssertionResult(false);
+}
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << message.
+AssertionResult AssertionFailure(const Message& message) {
+ return AssertionFailure() << message;
+}
+
+namespace internal {
+
+namespace edit_distance {
+std::vector<EditType> CalculateOptimalEdits(const std::vector<size_t>& left,
+ const std::vector<size_t>& right) {
+ std::vector<std::vector<double> > costs(
+ left.size() + 1, std::vector<double>(right.size() + 1));
+ std::vector<std::vector<EditType> > best_move(
+ left.size() + 1, std::vector<EditType>(right.size() + 1));
+
+ // Populate for empty right.
+ for (size_t l_i = 0; l_i < costs.size(); ++l_i) {
+ costs[l_i][0] = static_cast<double>(l_i);
+ best_move[l_i][0] = kRemove;
+ }
+ // Populate for empty left.
+ for (size_t r_i = 1; r_i < costs[0].size(); ++r_i) {
+ costs[0][r_i] = static_cast<double>(r_i);
+ best_move[0][r_i] = kAdd;
+ }
+
+ for (size_t l_i = 0; l_i < left.size(); ++l_i) {
+ for (size_t r_i = 0; r_i < right.size(); ++r_i) {
+ if (left[l_i] == right[r_i]) {
+ // Found a match. Consume it.
+ costs[l_i + 1][r_i + 1] = costs[l_i][r_i];
+ best_move[l_i + 1][r_i + 1] = kMatch;
+ continue;
+ }
+
+ const double add = costs[l_i + 1][r_i];
+ const double remove = costs[l_i][r_i + 1];
+ const double replace = costs[l_i][r_i];
+ if (add < remove && add < replace) {
+ costs[l_i + 1][r_i + 1] = add + 1;
+ best_move[l_i + 1][r_i + 1] = kAdd;
+ } else if (remove < add && remove < replace) {
+ costs[l_i + 1][r_i + 1] = remove + 1;
+ best_move[l_i + 1][r_i + 1] = kRemove;
+ } else {
+ // We make replace a little more expensive than add/remove to lower
+ // their priority.
+ costs[l_i + 1][r_i + 1] = replace + 1.00001;
+ best_move[l_i + 1][r_i + 1] = kReplace;
+ }
+ }
+ }
+
+ // Reconstruct the best path. We do it in reverse order.
+ std::vector<EditType> best_path;
+ for (size_t l_i = left.size(), r_i = right.size(); l_i > 0 || r_i > 0;) {
+ EditType move = best_move[l_i][r_i];
+ best_path.push_back(move);
+ l_i -= move != kAdd;
+ r_i -= move != kRemove;
+ }
+ std::reverse(best_path.begin(), best_path.end());
+ return best_path;
+}
+
+namespace {
+
+// Helper class to convert string into ids with deduplication.
+class InternalStrings {
+ public:
+ size_t GetId(const std::string& str) {
+ IdMap::iterator it = ids_.find(str);
+ if (it != ids_.end()) return it->second;
+ size_t id = ids_.size();
+ return ids_[str] = id;
+ }
+
+ private:
+ typedef std::map<std::string, size_t> IdMap;
+ IdMap ids_;
+};
+
+} // namespace
+
+std::vector<EditType> CalculateOptimalEdits(
+ const std::vector<std::string>& left,
+ const std::vector<std::string>& right) {
+ std::vector<size_t> left_ids, right_ids;
+ {
+ InternalStrings intern_table;
+ for (size_t i = 0; i < left.size(); ++i) {
+ left_ids.push_back(intern_table.GetId(left[i]));
+ }
+ for (size_t i = 0; i < right.size(); ++i) {
+ right_ids.push_back(intern_table.GetId(right[i]));
+ }
+ }
+ return CalculateOptimalEdits(left_ids, right_ids);
+}
+
+namespace {
+
+// Helper class that holds the state for one hunk and prints it out to the
+// stream.
+// It reorders adds/removes when possible to group all removes before all
+// adds. It also adds the hunk header before printint into the stream.
+class Hunk {
+ public:
+ Hunk(size_t left_start, size_t right_start)
+ : left_start_(left_start),
+ right_start_(right_start),
+ adds_(),
+ removes_(),
+ common_() {}
+
+ void PushLine(char edit, const char* line) {
+ switch (edit) {
+ case ' ':
+ ++common_;
+ FlushEdits();
+ hunk_.push_back(std::make_pair(' ', line));
+ break;
+ case '-':
+ ++removes_;
+ hunk_removes_.push_back(std::make_pair('-', line));
+ break;
+ case '+':
+ ++adds_;
+ hunk_adds_.push_back(std::make_pair('+', line));
+ break;
+ }
+ }
+
+ void PrintTo(std::ostream* os) {
+ PrintHeader(os);
+ FlushEdits();
+ for (std::list<std::pair<char, const char*> >::const_iterator it =
+ hunk_.begin();
+ it != hunk_.end(); ++it) {
+ *os << it->first << it->second << "\n";
+ }
+ }
+
+ bool has_edits() const { return adds_ || removes_; }
+
+ private:
+ void FlushEdits() {
+ hunk_.splice(hunk_.end(), hunk_removes_);
+ hunk_.splice(hunk_.end(), hunk_adds_);
+ }
+
+ // Print a unified diff header for one hunk.
+ // The format is
+ // "@@ -<left_start>,<left_length> +<right_start>,<right_length> @@"
+ // where the left/right parts are omitted if unnecessary.
+ void PrintHeader(std::ostream* ss) const {
+ *ss << "@@ ";
+ if (removes_) {
+ *ss << "-" << left_start_ << "," << (removes_ + common_);
+ }
+ if (removes_ && adds_) {
+ *ss << " ";
+ }
+ if (adds_) {
+ *ss << "+" << right_start_ << "," << (adds_ + common_);
+ }
+ *ss << " @@\n";
+ }
+
+ size_t left_start_, right_start_;
+ size_t adds_, removes_, common_;
+ std::list<std::pair<char, const char*> > hunk_, hunk_adds_, hunk_removes_;
+};
+
+} // namespace
+
+// Create a list of diff hunks in Unified diff format.
+// Each hunk has a header generated by PrintHeader above plus a body with
+// lines prefixed with ' ' for no change, '-' for deletion and '+' for
+// addition.
+// 'context' represents the desired unchanged prefix/suffix around the diff.
+// If two hunks are close enough that their contexts overlap, then they are
+// joined into one hunk.
+std::string CreateUnifiedDiff(const std::vector<std::string>& left,
+ const std::vector<std::string>& right,
+ size_t context) {
+ const std::vector<EditType> edits = CalculateOptimalEdits(left, right);
+
+ size_t l_i = 0, r_i = 0, edit_i = 0;
+ std::stringstream ss;
+ while (edit_i < edits.size()) {
+ // Find first edit.
+ while (edit_i < edits.size() && edits[edit_i] == kMatch) {
+ ++l_i;
+ ++r_i;
+ ++edit_i;
+ }
+
+ // Find the first line to include in the hunk.
+ const size_t prefix_context = std::min(l_i, context);
+ Hunk hunk(l_i - prefix_context + 1, r_i - prefix_context + 1);
+ for (size_t i = prefix_context; i > 0; --i) {
+ hunk.PushLine(' ', left[l_i - i].c_str());
+ }
+
+ // Iterate the edits until we found enough suffix for the hunk or the input
+ // is over.
+ size_t n_suffix = 0;
+ for (; edit_i < edits.size(); ++edit_i) {
+ if (n_suffix >= context) {
+ // Continue only if the next hunk is very close.
+ auto it = edits.begin() + static_cast<int>(edit_i);
+ while (it != edits.end() && *it == kMatch) ++it;
+ if (it == edits.end() ||
+ static_cast<size_t>(it - edits.begin()) - edit_i >= context) {
+ // There is no next edit or it is too far away.
+ break;
+ }
+ }
+
+ EditType edit = edits[edit_i];
+ // Reset count when a non match is found.
+ n_suffix = edit == kMatch ? n_suffix + 1 : 0;
+
+ if (edit == kMatch || edit == kRemove || edit == kReplace) {
+ hunk.PushLine(edit == kMatch ? ' ' : '-', left[l_i].c_str());
+ }
+ if (edit == kAdd || edit == kReplace) {
+ hunk.PushLine('+', right[r_i].c_str());
+ }
+
+ // Advance indices, depending on edit type.
+ l_i += edit != kAdd;
+ r_i += edit != kRemove;
+ }
+
+ if (!hunk.has_edits()) {
+ // We are done. We don't want this hunk.
+ break;
+ }
+
+ hunk.PrintTo(&ss);
+ }
+ return ss.str();
+}
+
+} // namespace edit_distance
+
+namespace {
+
+// The string representation of the values received in EqFailure() are already
+// escaped. Split them on escaped '\n' boundaries. Leave all other escaped
+// characters the same.
+std::vector<std::string> SplitEscapedString(const std::string& str) {
+ std::vector<std::string> lines;
+ size_t start = 0, end = str.size();
+ if (end > 2 && str[0] == '"' && str[end - 1] == '"') {
+ ++start;
+ --end;
+ }
+ bool escaped = false;
+ for (size_t i = start; i + 1 < end; ++i) {
+ if (escaped) {
+ escaped = false;
+ if (str[i] == 'n') {
+ lines.push_back(str.substr(start, i - start - 1));
+ start = i + 1;
+ }
+ } else {
+ escaped = str[i] == '\\';
+ }
+ }
+ lines.push_back(str.substr(start, end - start));
+ return lines;
+}
+
+} // namespace
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// lhs_expression: "foo"
+// rhs_expression: "bar"
+// lhs_value: "5"
+// rhs_value: "6"
+//
+// The ignoring_case parameter is true if and only if the assertion is a
+// *_STRCASEEQ*. When it's true, the string "Ignoring case" will
+// be inserted into the message.
+AssertionResult EqFailure(const char* lhs_expression,
+ const char* rhs_expression,
+ const std::string& lhs_value,
+ const std::string& rhs_value,
+ bool ignoring_case) {
+ Message msg;
+ msg << "Expected equality of these values:";
+ msg << "\n " << lhs_expression;
+ if (lhs_value != lhs_expression) {
+ msg << "\n Which is: " << lhs_value;
+ }
+ msg << "\n " << rhs_expression;
+ if (rhs_value != rhs_expression) {
+ msg << "\n Which is: " << rhs_value;
+ }
+
+ if (ignoring_case) {
+ msg << "\nIgnoring case";
+ }
+
+ if (!lhs_value.empty() && !rhs_value.empty()) {
+ const std::vector<std::string> lhs_lines =
+ SplitEscapedString(lhs_value);
+ const std::vector<std::string> rhs_lines =
+ SplitEscapedString(rhs_value);
+ if (lhs_lines.size() > 1 || rhs_lines.size() > 1) {
+ msg << "\nWith diff:\n"
+ << edit_distance::CreateUnifiedDiff(lhs_lines, rhs_lines);
+ }
+ }
+
+ return AssertionFailure() << msg;
+}
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+std::string GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value) {
+ const char* actual_message = assertion_result.message();
+ Message msg;
+ msg << "Value of: " << expression_text
+ << "\n Actual: " << actual_predicate_value;
+ if (actual_message[0] != '\0')
+ msg << " (" << actual_message << ")";
+ msg << "\nExpected: " << expected_predicate_value;
+ return msg.GetString();
+}
+
+// Helper function for implementing ASSERT_NEAR.
+AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error) {
+ const double diff = fabs(val1 - val2);
+ if (diff <= abs_error) return AssertionSuccess();
+
+ // Find the value which is closest to zero.
+ const double min_abs = std::min(fabs(val1), fabs(val2));
+ // Find the distance to the next double from that value.
+ const double epsilon =
+ nextafter(min_abs, std::numeric_limits<double>::infinity()) - min_abs;
+ // Detect the case where abs_error is so small that EXPECT_NEAR is
+ // effectively the same as EXPECT_EQUAL, and give an informative error
+ // message so that the situation can be more easily understood without
+ // requiring exotic floating-point knowledge.
+ // Don't do an epsilon check if abs_error is zero because that implies
+ // that an equality check was actually intended.
+ if (!(std::isnan)(val1) && !(std::isnan)(val2) && abs_error > 0 &&
+ abs_error < epsilon) {
+ return AssertionFailure()
+ << "The difference between " << expr1 << " and " << expr2 << " is "
+ << diff << ", where\n"
+ << expr1 << " evaluates to " << val1 << ",\n"
+ << expr2 << " evaluates to " << val2 << ".\nThe abs_error parameter "
+ << abs_error_expr << " evaluates to " << abs_error
+ << " which is smaller than the minimum distance between doubles for "
+ "numbers of this magnitude which is "
+ << epsilon
+ << ", thus making this EXPECT_NEAR check equivalent to "
+ "EXPECT_EQUAL. Consider using EXPECT_DOUBLE_EQ instead.";
+ }
+ return AssertionFailure()
+ << "The difference between " << expr1 << " and " << expr2
+ << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n"
+ << expr1 << " evaluates to " << val1 << ",\n"
+ << expr2 << " evaluates to " << val2 << ", and\n"
+ << abs_error_expr << " evaluates to " << abs_error << ".";
+}
+
+
+// Helper template for implementing FloatLE() and DoubleLE().
+template <typename RawType>
+AssertionResult FloatingPointLE(const char* expr1,
+ const char* expr2,
+ RawType val1,
+ RawType val2) {
+ // Returns success if val1 is less than val2,
+ if (val1 < val2) {
+ return AssertionSuccess();
+ }
+
+ // or if val1 is almost equal to val2.
+ const FloatingPoint<RawType> lhs(val1), rhs(val2);
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ // Note that the above two checks will both fail if either val1 or
+ // val2 is NaN, as the IEEE floating-point standard requires that
+ // any predicate involving a NaN must return false.
+
+ ::std::stringstream val1_ss;
+ val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val1;
+
+ ::std::stringstream val2_ss;
+ val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val2;
+
+ return AssertionFailure()
+ << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n"
+ << " Actual: " << StringStreamToString(&val1_ss) << " vs "
+ << StringStreamToString(&val2_ss);
+}
+
+} // namespace internal
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2) {
+ return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);
+}
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2) {
+ return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);
+}
+
+namespace internal {
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+AssertionResult CmpHelperSTREQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const char* lhs,
+ const char* rhs) {
+ if (String::CStringEquals(lhs, rhs)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ PrintToString(lhs),
+ PrintToString(rhs),
+ false);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+AssertionResult CmpHelperSTRCASEEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const char* lhs,
+ const char* rhs) {
+ if (String::CaseInsensitiveCStringEquals(lhs, rhs)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ PrintToString(lhs),
+ PrintToString(rhs),
+ true);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ }
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CaseInsensitiveCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ return AssertionFailure()
+ << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << ") (ignoring case), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ }
+}
+
+} // namespace internal
+
+namespace {
+
+// Helper functions for implementing IsSubString() and IsNotSubstring().
+
+// This group of overloaded functions return true if and only if needle
+// is a substring of haystack. NULL is considered a substring of
+// itself only.
+
+bool IsSubstringPred(const char* needle, const char* haystack) {
+ if (needle == nullptr || haystack == nullptr) return needle == haystack;
+
+ return strstr(haystack, needle) != nullptr;
+}
+
+bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {
+ if (needle == nullptr || haystack == nullptr) return needle == haystack;
+
+ return wcsstr(haystack, needle) != nullptr;
+}
+
+// StringType here can be either ::std::string or ::std::wstring.
+template <typename StringType>
+bool IsSubstringPred(const StringType& needle,
+ const StringType& haystack) {
+ return haystack.find(needle) != StringType::npos;
+}
+
+// This function implements either IsSubstring() or IsNotSubstring(),
+// depending on the value of the expected_to_be_substring parameter.
+// StringType here can be const char*, const wchar_t*, ::std::string,
+// or ::std::wstring.
+template <typename StringType>
+AssertionResult IsSubstringImpl(
+ bool expected_to_be_substring,
+ const char* needle_expr, const char* haystack_expr,
+ const StringType& needle, const StringType& haystack) {
+ if (IsSubstringPred(needle, haystack) == expected_to_be_substring)
+ return AssertionSuccess();
+
+ const bool is_wide_string = sizeof(needle[0]) > 1;
+ const char* const begin_string_quote = is_wide_string ? "L\"" : "\"";
+ return AssertionFailure()
+ << "Value of: " << needle_expr << "\n"
+ << " Actual: " << begin_string_quote << needle << "\"\n"
+ << "Expected: " << (expected_to_be_substring ? "" : "not ")
+ << "a substring of " << haystack_expr << "\n"
+ << "Which is: " << begin_string_quote << haystack << "\"";
+}
+
+} // namespace
+
+// IsSubstring() and IsNotSubstring() check whether needle is a
+// substring of haystack (NULL is considered a substring of itself
+// only), and return an appropriate error message when they fail.
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+#if GTEST_HAS_STD_WSTRING
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+
+namespace {
+
+// Helper function for IsHRESULT{SuccessFailure} predicates
+AssertionResult HRESULTFailureHelper(const char* expr,
+ const char* expected,
+ long hr) { // NOLINT
+# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_TV_TITLE
+
+ // Windows CE doesn't support FormatMessage.
+ const char error_text[] = "";
+
+# else
+
+ // Looks up the human-readable system message for the HRESULT code
+ // and since we're not passing any params to FormatMessage, we don't
+ // want inserts expanded.
+ const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD kBufSize = 4096;
+ // Gets the system's human readable message string for this HRESULT.
+ char error_text[kBufSize] = { '\0' };
+ DWORD message_length = ::FormatMessageA(kFlags,
+ 0, // no source, we're asking system
+ static_cast<DWORD>(hr), // the error
+ 0, // no line width restrictions
+ error_text, // output buffer
+ kBufSize, // buf size
+ nullptr); // no arguments for inserts
+ // Trims tailing white space (FormatMessage leaves a trailing CR-LF)
+ for (; message_length && IsSpace(error_text[message_length - 1]);
+ --message_length) {
+ error_text[message_length - 1] = '\0';
+ }
+
+# endif // GTEST_OS_WINDOWS_MOBILE
+
+ const std::string error_hex("0x" + String::FormatHexInt(hr));
+ return ::testing::AssertionFailure()
+ << "Expected: " << expr << " " << expected << ".\n"
+ << " Actual: " << error_hex << " " << error_text << "\n";
+}
+
+} // namespace
+
+AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT
+ if (SUCCEEDED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "succeeds", hr);
+}
+
+AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT
+ if (FAILED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "fails", hr);
+}
+
+#endif // GTEST_OS_WINDOWS
+
+// Utility functions for encoding Unicode text (wide strings) in
+// UTF-8.
+
+// A Unicode code-point can have up to 21 bits, and is encoded in UTF-8
+// like this:
+//
+// Code-point length Encoding
+// 0 - 7 bits 0xxxxxxx
+// 8 - 11 bits 110xxxxx 10xxxxxx
+// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx
+// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+
+// The maximum code-point a one-byte UTF-8 sequence can represent.
+constexpr uint32_t kMaxCodePoint1 = (static_cast<uint32_t>(1) << 7) - 1;
+
+// The maximum code-point a two-byte UTF-8 sequence can represent.
+constexpr uint32_t kMaxCodePoint2 = (static_cast<uint32_t>(1) << (5 + 6)) - 1;
+
+// The maximum code-point a three-byte UTF-8 sequence can represent.
+constexpr uint32_t kMaxCodePoint3 = (static_cast<uint32_t>(1) << (4 + 2*6)) - 1;
+
+// The maximum code-point a four-byte UTF-8 sequence can represent.
+constexpr uint32_t kMaxCodePoint4 = (static_cast<uint32_t>(1) << (3 + 3*6)) - 1;
+
+// Chops off the n lowest bits from a bit pattern. Returns the n
+// lowest bits. As a side effect, the original bit pattern will be
+// shifted to the right by n bits.
+inline uint32_t ChopLowBits(uint32_t* bits, int n) {
+ const uint32_t low_bits = *bits & ((static_cast<uint32_t>(1) << n) - 1);
+ *bits >>= n;
+ return low_bits;
+}
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type uint32_t because wchar_t may not be
+// wide enough to contain a code point.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
+// to "(Invalid Unicode 0xXXXXXXXX)".
+std::string CodePointToUtf8(uint32_t code_point) {
+ if (code_point > kMaxCodePoint4) {
+ return "(Invalid Unicode 0x" + String::FormatHexUInt32(code_point) + ")";
+ }
+
+ char str[5]; // Big enough for the largest valid code point.
+ if (code_point <= kMaxCodePoint1) {
+ str[1] = '\0';
+ str[0] = static_cast<char>(code_point); // 0xxxxxxx
+ } else if (code_point <= kMaxCodePoint2) {
+ str[2] = '\0';
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xC0 | code_point); // 110xxxxx
+ } else if (code_point <= kMaxCodePoint3) {
+ str[3] = '\0';
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xE0 | code_point); // 1110xxxx
+ } else { // code_point <= kMaxCodePoint4
+ str[4] = '\0';
+ str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xF0 | code_point); // 11110xxx
+ }
+ return str;
+}
+
+// The following two functions only make sense if the system
+// uses UTF-16 for wide string encoding. All supported systems
+// with 16 bit wchar_t (Windows, Cygwin) do use UTF-16.
+
+// Determines if the arguments constitute UTF-16 surrogate pair
+// and thus should be combined into a single Unicode code point
+// using CreateCodePointFromUtf16SurrogatePair.
+inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
+ return sizeof(wchar_t) == 2 &&
+ (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
+}
+
+// Creates a Unicode code point from UTF16 surrogate pair.
+inline uint32_t CreateCodePointFromUtf16SurrogatePair(wchar_t first,
+ wchar_t second) {
+ const auto first_u = static_cast<uint32_t>(first);
+ const auto second_u = static_cast<uint32_t>(second);
+ const uint32_t mask = (1 << 10) - 1;
+ return (sizeof(wchar_t) == 2)
+ ? (((first_u & mask) << 10) | (second_u & mask)) + 0x10000
+ :
+ // This function should not be called when the condition is
+ // false, but we provide a sensible default in case it is.
+ first_u;
+}
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+std::string WideStringToUtf8(const wchar_t* str, int num_chars) {
+ if (num_chars == -1)
+ num_chars = static_cast<int>(wcslen(str));
+
+ ::std::stringstream stream;
+ for (int i = 0; i < num_chars; ++i) {
+ uint32_t unicode_code_point;
+
+ if (str[i] == L'\0') {
+ break;
+ } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {
+ unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],
+ str[i + 1]);
+ i++;
+ } else {
+ unicode_code_point = static_cast<uint32_t>(str[i]);
+ }
+
+ stream << CodePointToUtf8(unicode_code_point);
+ }
+ return StringStreamToString(&stream);
+}
+
+// Converts a wide C string to an std::string using the UTF-8 encoding.
+// NULL will be converted to "(null)".
+std::string String::ShowWideCString(const wchar_t * wide_c_str) {
+ if (wide_c_str == nullptr) return "(null)";
+
+ return internal::WideStringToUtf8(wide_c_str, -1);
+}
+
+// Compares two wide C strings. Returns true if and only if they have the
+// same content.
+//
+// Unlike wcscmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {
+ if (lhs == nullptr) return rhs == nullptr;
+
+ if (rhs == nullptr) return false;
+
+ return wcscmp(lhs, rhs) == 0;
+}
+
+// Helper function for *_STREQ on wide strings.
+AssertionResult CmpHelperSTREQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const wchar_t* lhs,
+ const wchar_t* rhs) {
+ if (String::WideCStringEquals(lhs, rhs)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ PrintToString(lhs),
+ PrintToString(rhs),
+ false);
+}
+
+// Helper function for *_STRNE on wide strings.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2) {
+ if (!String::WideCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ }
+
+ return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: "
+ << PrintToString(s1)
+ << " vs " << PrintToString(s2);
+}
+
+// Compares two C strings, ignoring case. Returns true if and only if they have
+// the same content.
+//
+// Unlike strcasecmp(), this function can handle NULL argument(s). A
+// NULL C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
+ if (lhs == nullptr) return rhs == nullptr;
+ if (rhs == nullptr) return false;
+ return posix::StrCaseCmp(lhs, rhs) == 0;
+}
+
+// Compares two wide C strings, ignoring case. Returns true if and only if they
+// have the same content.
+//
+// Unlike wcscasecmp(), this function can handle NULL argument(s).
+// A NULL C string is considered different to any non-NULL wide C string,
+// including the empty string.
+// NB: The implementations on different platforms slightly differ.
+// On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+// environment variable. On GNU platform this method uses wcscasecmp
+// which compares according to LC_CTYPE category of the current locale.
+// On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+// current locale.
+bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs) {
+ if (lhs == nullptr) return rhs == nullptr;
+
+ if (rhs == nullptr) return false;
+
+#if GTEST_OS_WINDOWS
+ return _wcsicmp(lhs, rhs) == 0;
+#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID
+ return wcscasecmp(lhs, rhs) == 0;
+#else
+ // Android, Mac OS X and Cygwin don't define wcscasecmp.
+ // Other unknown OSes may not define it either.
+ wint_t left, right;
+ do {
+ left = towlower(static_cast<wint_t>(*lhs++));
+ right = towlower(static_cast<wint_t>(*rhs++));
+ } while (left && left == right);
+ return left == right;
+#endif // OS selector
+}
+
+// Returns true if and only if str ends with the given suffix, ignoring case.
+// Any string is considered to end with an empty suffix.
+bool String::EndsWithCaseInsensitive(
+ const std::string& str, const std::string& suffix) {
+ const size_t str_len = str.length();
+ const size_t suffix_len = suffix.length();
+ return (str_len >= suffix_len) &&
+ CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len,
+ suffix.c_str());
+}
+
+// Formats an int value as "%02d".
+std::string String::FormatIntWidth2(int value) {
+ return FormatIntWidthN(value, 2);
+}
+
+// Formats an int value to given width with leading zeros.
+std::string String::FormatIntWidthN(int value, int width) {
+ std::stringstream ss;
+ ss << std::setfill('0') << std::setw(width) << value;
+ return ss.str();
+}
+
+// Formats an int value as "%X".
+std::string String::FormatHexUInt32(uint32_t value) {
+ std::stringstream ss;
+ ss << std::hex << std::uppercase << value;
+ return ss.str();
+}
+
+// Formats an int value as "%X".
+std::string String::FormatHexInt(int value) {
+ return FormatHexUInt32(static_cast<uint32_t>(value));
+}
+
+// Formats a byte as "%02X".
+std::string String::FormatByte(unsigned char value) {
+ std::stringstream ss;
+ ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase
+ << static_cast<unsigned int>(value);
+ return ss.str();
+}
+
+// Converts the buffer in a stringstream to an std::string, converting NUL
+// bytes to "\\0" along the way.
+std::string StringStreamToString(::std::stringstream* ss) {
+ const ::std::string& str = ss->str();
+ const char* const start = str.c_str();
+ const char* const end = start + str.length();
+
+ std::string result;
+ result.reserve(static_cast<size_t>(2 * (end - start)));
+ for (const char* ch = start; ch != end; ++ch) {
+ if (*ch == '\0') {
+ result += "\\0"; // Replaces NUL with "\\0";
+ } else {
+ result += *ch;
+ }
+ }
+
+ return result;
+}
+
+// Appends the user-supplied message to the Google-Test-generated message.
+std::string AppendUserMessage(const std::string& gtest_msg,
+ const Message& user_msg) {
+ // Appends the user message if it's non-empty.
+ const std::string user_msg_string = user_msg.GetString();
+ if (user_msg_string.empty()) {
+ return gtest_msg;
+ }
+ if (gtest_msg.empty()) {
+ return user_msg_string;
+ }
+ return gtest_msg + "\n" + user_msg_string;
+}
+
+} // namespace internal
+
+// class TestResult
+
+// Creates an empty TestResult.
+TestResult::TestResult()
+ : death_test_count_(0), start_timestamp_(0), elapsed_time_(0) {}
+
+// D'tor.
+TestResult::~TestResult() {
+}
+
+// Returns the i-th test part result among all the results. i can
+// range from 0 to total_part_count() - 1. If i is not in that range,
+// aborts the program.
+const TestPartResult& TestResult::GetTestPartResult(int i) const {
+ if (i < 0 || i >= total_part_count())
+ internal::posix::Abort();
+ return test_part_results_.at(static_cast<size_t>(i));
+}
+
+// Returns the i-th test property. i can range from 0 to
+// test_property_count() - 1. If i is not in that range, aborts the
+// program.
+const TestProperty& TestResult::GetTestProperty(int i) const {
+ if (i < 0 || i >= test_property_count())
+ internal::posix::Abort();
+ return test_properties_.at(static_cast<size_t>(i));
+}
+
+// Clears the test part results.
+void TestResult::ClearTestPartResults() {
+ test_part_results_.clear();
+}
+
+// Adds a test part result to the list.
+void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
+ test_part_results_.push_back(test_part_result);
+}
+
+// Adds a test property to the list. If a property with the same key as the
+// supplied property is already represented, the value of this test_property
+// replaces the old value for that key.
+void TestResult::RecordProperty(const std::string& xml_element,
+ const TestProperty& test_property) {
+ if (!ValidateTestProperty(xml_element, test_property)) {
+ return;
+ }
+ internal::MutexLock lock(&test_properties_mutex_);
+ const std::vector<TestProperty>::iterator property_with_matching_key =
+ std::find_if(test_properties_.begin(), test_properties_.end(),
+ internal::TestPropertyKeyIs(test_property.key()));
+ if (property_with_matching_key == test_properties_.end()) {
+ test_properties_.push_back(test_property);
+ return;
+ }
+ property_with_matching_key->SetValue(test_property.value());
+}
+
+// The list of reserved attributes used in the <testsuites> element of XML
+// output.
+static const char* const kReservedTestSuitesAttributes[] = {
+ "disabled",
+ "errors",
+ "failures",
+ "name",
+ "random_seed",
+ "tests",
+ "time",
+ "timestamp"
+};
+
+// The list of reserved attributes used in the <testsuite> element of XML
+// output.
+static const char* const kReservedTestSuiteAttributes[] = {
+ "disabled", "errors", "failures", "name",
+ "tests", "time", "timestamp", "skipped"};
+
+// The list of reserved attributes used in the <testcase> element of XML output.
+static const char* const kReservedTestCaseAttributes[] = {
+ "classname", "name", "status", "time", "type_param",
+ "value_param", "file", "line"};
+
+// Use a slightly different set for allowed output to ensure existing tests can
+// still RecordProperty("result") or "RecordProperty(timestamp")
+static const char* const kReservedOutputTestCaseAttributes[] = {
+ "classname", "name", "status", "time", "type_param",
+ "value_param", "file", "line", "result", "timestamp"};
+
+template <size_t kSize>
+std::vector<std::string> ArrayAsVector(const char* const (&array)[kSize]) {
+ return std::vector<std::string>(array, array + kSize);
+}
+
+static std::vector<std::string> GetReservedAttributesForElement(
+ const std::string& xml_element) {
+ if (xml_element == "testsuites") {
+ return ArrayAsVector(kReservedTestSuitesAttributes);
+ } else if (xml_element == "testsuite") {
+ return ArrayAsVector(kReservedTestSuiteAttributes);
+ } else if (xml_element == "testcase") {
+ return ArrayAsVector(kReservedTestCaseAttributes);
+ } else {
+ GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element;
+ }
+ // This code is unreachable but some compilers may not realizes that.
+ return std::vector<std::string>();
+}
+
+// TODO(jdesprez): Merge the two getReserved attributes once skip is improved
+static std::vector<std::string> GetReservedOutputAttributesForElement(
+ const std::string& xml_element) {
+ if (xml_element == "testsuites") {
+ return ArrayAsVector(kReservedTestSuitesAttributes);
+ } else if (xml_element == "testsuite") {
+ return ArrayAsVector(kReservedTestSuiteAttributes);
+ } else if (xml_element == "testcase") {
+ return ArrayAsVector(kReservedOutputTestCaseAttributes);
+ } else {
+ GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element;
+ }
+ // This code is unreachable but some compilers may not realizes that.
+ return std::vector<std::string>();
+}
+
+static std::string FormatWordList(const std::vector<std::string>& words) {
+ Message word_list;
+ for (size_t i = 0; i < words.size(); ++i) {
+ if (i > 0 && words.size() > 2) {
+ word_list << ", ";
+ }
+ if (i == words.size() - 1) {
+ word_list << "and ";
+ }
+ word_list << "'" << words[i] << "'";
+ }
+ return word_list.GetString();
+}
+
+static bool ValidateTestPropertyName(
+ const std::string& property_name,
+ const std::vector<std::string>& reserved_names) {
+ if (std::find(reserved_names.begin(), reserved_names.end(), property_name) !=
+ reserved_names.end()) {
+ ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name
+ << " (" << FormatWordList(reserved_names)
+ << " are reserved by " << GTEST_NAME_ << ")";
+ return false;
+ }
+ return true;
+}
+
+// Adds a failure if the key is a reserved attribute of the element named
+// xml_element. Returns true if the property is valid.
+bool TestResult::ValidateTestProperty(const std::string& xml_element,
+ const TestProperty& test_property) {
+ return ValidateTestPropertyName(test_property.key(),
+ GetReservedAttributesForElement(xml_element));
+}
+
+// Clears the object.
+void TestResult::Clear() {
+ test_part_results_.clear();
+ test_properties_.clear();
+ death_test_count_ = 0;
+ elapsed_time_ = 0;
+}
+
+// Returns true off the test part was skipped.
+static bool TestPartSkipped(const TestPartResult& result) {
+ return result.skipped();
+}
+
+// Returns true if and only if the test was skipped.
+bool TestResult::Skipped() const {
+ return !Failed() && CountIf(test_part_results_, TestPartSkipped) > 0;
+}
+
+// Returns true if and only if the test failed.
+bool TestResult::Failed() const {
+ for (int i = 0; i < total_part_count(); ++i) {
+ if (GetTestPartResult(i).failed())
+ return true;
+ }
+ return false;
+}
+
+// Returns true if and only if the test part fatally failed.
+static bool TestPartFatallyFailed(const TestPartResult& result) {
+ return result.fatally_failed();
+}
+
+// Returns true if and only if the test fatally failed.
+bool TestResult::HasFatalFailure() const {
+ return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
+}
+
+// Returns true if and only if the test part non-fatally failed.
+static bool TestPartNonfatallyFailed(const TestPartResult& result) {
+ return result.nonfatally_failed();
+}
+
+// Returns true if and only if the test has a non-fatal failure.
+bool TestResult::HasNonfatalFailure() const {
+ return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
+}
+
+// Gets the number of all test parts. This is the sum of the number
+// of successful test parts and the number of failed test parts.
+int TestResult::total_part_count() const {
+ return static_cast<int>(test_part_results_.size());
+}
+
+// Returns the number of the test properties.
+int TestResult::test_property_count() const {
+ return static_cast<int>(test_properties_.size());
+}
+
+// class Test
+
+// Creates a Test object.
+
+// The c'tor saves the states of all flags.
+Test::Test()
+ : gtest_flag_saver_(new GTEST_FLAG_SAVER_) {
+}
+
+// The d'tor restores the states of all flags. The actual work is
+// done by the d'tor of the gtest_flag_saver_ field, and thus not
+// visible here.
+Test::~Test() {
+}
+
+// Sets up the test fixture.
+//
+// A sub-class may override this.
+void Test::SetUp() {
+}
+
+// Tears down the test fixture.
+//
+// A sub-class may override this.
+void Test::TearDown() {
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const std::string& key, const std::string& value) {
+ UnitTest::GetInstance()->RecordProperty(key, value);
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const std::string& key, int value) {
+ Message value_message;
+ value_message << value;
+ RecordProperty(key, value_message.GetString().c_str());
+}
+
+namespace internal {
+
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const std::string& message) {
+ // This function is a friend of UnitTest and as such has access to
+ // AddTestPartResult.
+ UnitTest::GetInstance()->AddTestPartResult(
+ result_type,
+ nullptr, // No info about the source file where the exception occurred.
+ -1, // We have no info on which line caused the exception.
+ message,
+ ""); // No stack trace, either.
+}
+
+} // namespace internal
+
+// Google Test requires all tests in the same test suite to use the same test
+// fixture class. This function checks if the current test has the
+// same fixture class as the first test in the current test suite. If
+// yes, it returns true; otherwise it generates a Google Test failure and
+// returns false.
+bool Test::HasSameFixtureClass() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ const TestSuite* const test_suite = impl->current_test_suite();
+
+ // Info about the first test in the current test suite.
+ const TestInfo* const first_test_info = test_suite->test_info_list()[0];
+ const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_;
+ const char* const first_test_name = first_test_info->name();
+
+ // Info about the current test.
+ const TestInfo* const this_test_info = impl->current_test_info();
+ const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_;
+ const char* const this_test_name = this_test_info->name();
+
+ if (this_fixture_id != first_fixture_id) {
+ // Is the first test defined using TEST?
+ const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();
+ // Is this test defined using TEST?
+ const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();
+
+ if (first_is_TEST || this_is_TEST) {
+ // Both TEST and TEST_F appear in same test suite, which is incorrect.
+ // Tell the user how to fix this.
+
+ // Gets the name of the TEST and the name of the TEST_F. Note
+ // that first_is_TEST and this_is_TEST cannot both be true, as
+ // the fixture IDs are different for the two tests.
+ const char* const TEST_name =
+ first_is_TEST ? first_test_name : this_test_name;
+ const char* const TEST_F_name =
+ first_is_TEST ? this_test_name : first_test_name;
+
+ ADD_FAILURE()
+ << "All tests in the same test suite must use the same test fixture\n"
+ << "class, so mixing TEST_F and TEST in the same test suite is\n"
+ << "illegal. In test suite " << this_test_info->test_suite_name()
+ << ",\n"
+ << "test " << TEST_F_name << " is defined using TEST_F but\n"
+ << "test " << TEST_name << " is defined using TEST. You probably\n"
+ << "want to change the TEST to TEST_F or move it to another test\n"
+ << "case.";
+ } else {
+ // Two fixture classes with the same name appear in two different
+ // namespaces, which is not allowed. Tell the user how to fix this.
+ ADD_FAILURE()
+ << "All tests in the same test suite must use the same test fixture\n"
+ << "class. However, in test suite "
+ << this_test_info->test_suite_name() << ",\n"
+ << "you defined test " << first_test_name << " and test "
+ << this_test_name << "\n"
+ << "using two different test fixture classes. This can happen if\n"
+ << "the two classes are from different namespaces or translation\n"
+ << "units and have the same name. You should probably rename one\n"
+ << "of the classes to put the tests into different test suites.";
+ }
+ return false;
+ }
+
+ return true;
+}
+
+#if GTEST_HAS_SEH
+
+// Adds an "exception thrown" fatal failure to the current test. This
+// function returns its result via an output parameter pointer because VC++
+// prohibits creation of objects with destructors on stack in functions
+// using __try (see error C2712).
+static std::string* FormatSehExceptionMessage(DWORD exception_code,
+ const char* location) {
+ Message message;
+ message << "SEH exception with code 0x" << std::setbase(16) <<
+ exception_code << std::setbase(10) << " thrown in " << location << ".";
+
+ return new std::string(message.GetString());
+}
+
+#endif // GTEST_HAS_SEH
+
+namespace internal {
+
+#if GTEST_HAS_EXCEPTIONS
+
+// Adds an "exception thrown" fatal failure to the current test.
+static std::string FormatCxxExceptionMessage(const char* description,
+ const char* location) {
+ Message message;
+ if (description != nullptr) {
+ message << "C++ exception with description \"" << description << "\"";
+ } else {
+ message << "Unknown C++ exception";
+ }
+ message << " thrown in " << location << ".";
+
+ return message.GetString();
+}
+
+static std::string PrintTestPartResultToString(
+ const TestPartResult& test_part_result);
+
+GoogleTestFailureException::GoogleTestFailureException(
+ const TestPartResult& failure)
+ : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+// We put these helper functions in the internal namespace as IBM's xlC
+// compiler rejects the code if they were declared static.
+
+// Runs the given method and handles SEH exceptions it throws, when
+// SEH is supported; returns the 0-value for type Result in case of an
+// SEH exception. (Microsoft compilers cannot handle SEH and C++
+// exceptions in the same function. Therefore, we provide a separate
+// wrapper function for handling SEH exceptions.)
+template <class T, typename Result>
+Result HandleSehExceptionsInMethodIfSupported(
+ T* object, Result (T::*method)(), const char* location) {
+#if GTEST_HAS_SEH
+ __try {
+ return (object->*method)();
+ } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT
+ GetExceptionCode())) {
+ // We create the exception message on the heap because VC++ prohibits
+ // creation of objects with destructors on stack in functions using __try
+ // (see error C2712).
+ std::string* exception_message = FormatSehExceptionMessage(
+ GetExceptionCode(), location);
+ internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,
+ *exception_message);
+ delete exception_message;
+ return static_cast<Result>(0);
+ }
+#else
+ (void)location;
+ return (object->*method)();
+#endif // GTEST_HAS_SEH
+}
+
+// Runs the given method and catches and reports C++ and/or SEH-style
+// exceptions, if they are supported; returns the 0-value for type
+// Result in case of an SEH exception.
+template <class T, typename Result>
+Result HandleExceptionsInMethodIfSupported(
+ T* object, Result (T::*method)(), const char* location) {
+ // NOTE: The user code can affect the way in which Google Test handles
+ // exceptions by setting GTEST_FLAG(catch_exceptions), but only before
+ // RUN_ALL_TESTS() starts. It is technically possible to check the flag
+ // after the exception is caught and either report or re-throw the
+ // exception based on the flag's value:
+ //
+ // try {
+ // // Perform the test method.
+ // } catch (...) {
+ // if (GTEST_FLAG(catch_exceptions))
+ // // Report the exception as failure.
+ // else
+ // throw; // Re-throws the original exception.
+ // }
+ //
+ // However, the purpose of this flag is to allow the program to drop into
+ // the debugger when the exception is thrown. On most platforms, once the
+ // control enters the catch block, the exception origin information is
+ // lost and the debugger will stop the program at the point of the
+ // re-throw in this function -- instead of at the point of the original
+ // throw statement in the code under test. For this reason, we perform
+ // the check early, sacrificing the ability to affect Google Test's
+ // exception handling in the method where the exception is thrown.
+ if (internal::GetUnitTestImpl()->catch_exceptions()) {
+#if GTEST_HAS_EXCEPTIONS
+ try {
+ return HandleSehExceptionsInMethodIfSupported(object, method, location);
+ } catch (const AssertionException&) { // NOLINT
+ // This failure was reported already.
+ } catch (const internal::GoogleTestFailureException&) { // NOLINT
+ // This exception type can only be thrown by a failed Google
+ // Test assertion with the intention of letting another testing
+ // framework catch it. Therefore we just re-throw it.
+ throw;
+ } catch (const std::exception& e) { // NOLINT
+ internal::ReportFailureInUnknownLocation(
+ TestPartResult::kFatalFailure,
+ FormatCxxExceptionMessage(e.what(), location));
+ } catch (...) { // NOLINT
+ internal::ReportFailureInUnknownLocation(
+ TestPartResult::kFatalFailure,
+ FormatCxxExceptionMessage(nullptr, location));
+ }
+ return static_cast<Result>(0);
+#else
+ return HandleSehExceptionsInMethodIfSupported(object, method, location);
+#endif // GTEST_HAS_EXCEPTIONS
+ } else {
+ return (object->*method)();
+ }
+}
+
+} // namespace internal
+
+// Runs the test and updates the test result.
+void Test::Run() {
+ if (!HasSameFixtureClass()) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()");
+ // We will run the test only if SetUp() was successful and didn't call
+ // GTEST_SKIP().
+ if (!HasFatalFailure() && !IsSkipped()) {
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &Test::TestBody, "the test body");
+ }
+
+ // However, we want to clean up as much as possible. Hence we will
+ // always call TearDown(), even if SetUp() or the test body has
+ // failed.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &Test::TearDown, "TearDown()");
+}
+
+// Returns true if and only if the current test has a fatal failure.
+bool Test::HasFatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
+}
+
+// Returns true if and only if the current test has a non-fatal failure.
+bool Test::HasNonfatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->
+ HasNonfatalFailure();
+}
+
+// Returns true if and only if the current test was skipped.
+bool Test::IsSkipped() {
+ return internal::GetUnitTestImpl()->current_test_result()->Skipped();
+}
+
+// class TestInfo
+
+// Constructs a TestInfo object. It assumes ownership of the test factory
+// object.
+TestInfo::TestInfo(const std::string& a_test_suite_name,
+ const std::string& a_name, const char* a_type_param,
+ const char* a_value_param,
+ internal::CodeLocation a_code_location,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory)
+ : test_suite_name_(a_test_suite_name),
+ name_(a_name),
+ type_param_(a_type_param ? new std::string(a_type_param) : nullptr),
+ value_param_(a_value_param ? new std::string(a_value_param) : nullptr),
+ location_(a_code_location),
+ fixture_class_id_(fixture_class_id),
+ should_run_(false),
+ is_disabled_(false),
+ matches_filter_(false),
+ is_in_another_shard_(false),
+ factory_(factory),
+ result_() {}
+
+// Destructs a TestInfo object.
+TestInfo::~TestInfo() { delete factory_; }
+
+namespace internal {
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_suite_name: name of the test suite
+// name: name of the test
+// type_param: the name of the test's type parameter, or NULL if
+// this is not a typed or a type-parameterized test.
+// value_param: text representation of the test's value parameter,
+// or NULL if this is not a value-parameterized test.
+// code_location: code location where the test is defined
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test suite
+// tear_down_tc: pointer to the function that tears down the test suite
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+TestInfo* MakeAndRegisterTestInfo(
+ const char* test_suite_name, const char* name, const char* type_param,
+ const char* value_param, CodeLocation code_location,
+ TypeId fixture_class_id, SetUpTestSuiteFunc set_up_tc,
+ TearDownTestSuiteFunc tear_down_tc, TestFactoryBase* factory) {
+ TestInfo* const test_info =
+ new TestInfo(test_suite_name, name, type_param, value_param,
+ code_location, fixture_class_id, factory);
+ GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);
+ return test_info;
+}
+
+void ReportInvalidTestSuiteType(const char* test_suite_name,
+ CodeLocation code_location) {
+ Message errors;
+ errors
+ << "Attempted redefinition of test suite " << test_suite_name << ".\n"
+ << "All tests in the same test suite must use the same test fixture\n"
+ << "class. However, in test suite " << test_suite_name << ", you tried\n"
+ << "to define a test using a fixture class different from the one\n"
+ << "used earlier. This can happen if the two fixture classes are\n"
+ << "from different namespaces and have the same name. You should\n"
+ << "probably rename one of the classes to put the tests into different\n"
+ << "test suites.";
+
+ GTEST_LOG_(ERROR) << FormatFileLocation(code_location.file.c_str(),
+ code_location.line)
+ << " " << errors.GetString();
+}
+} // namespace internal
+
+namespace internal {
+
+// This method expands all parameterized tests registered with macros TEST_P
+// and INSTANTIATE_TEST_SUITE_P into regular tests and registers those.
+// This will be done just once during the program runtime.
+void UnitTestImpl::RegisterParameterizedTests() {
+ if (!parameterized_tests_registered_) {
+ parameterized_test_registry_.RegisterTests();
+ type_parameterized_test_registry_.CheckForInstantiations();
+ parameterized_tests_registered_ = true;
+ }
+}
+
+} // namespace internal
+
+// Creates the test object, runs it, records its result, and then
+// deletes it.
+void TestInfo::Run() {
+ if (!should_run_) return;
+
+ // Tells UnitTest where to store test result.
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_info(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Notifies the unit test event listeners that a test is about to start.
+ repeater->OnTestStart(*this);
+
+ result_.set_start_timestamp(internal::GetTimeInMillis());
+ internal::Timer timer;
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+
+ // Creates the test object.
+ Test* const test = internal::HandleExceptionsInMethodIfSupported(
+ factory_, &internal::TestFactoryBase::CreateTest,
+ "the test fixture's constructor");
+
+ // Runs the test if the constructor didn't generate a fatal failure or invoke
+ // GTEST_SKIP().
+ // Note that the object will not be null
+ if (!Test::HasFatalFailure() && !Test::IsSkipped()) {
+ // This doesn't throw as all user code that can throw are wrapped into
+ // exception handling code.
+ test->Run();
+ }
+
+ if (test != nullptr) {
+ // Deletes the test object.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ test, &Test::DeleteSelf_, "the test fixture's destructor");
+ }
+
+ result_.set_elapsed_time(timer.Elapsed());
+
+ // Notifies the unit test event listener that a test has just finished.
+ repeater->OnTestEnd(*this);
+
+ // Tells UnitTest to stop associating assertion results to this
+ // test.
+ impl->set_current_test_info(nullptr);
+}
+
+// Skip and records a skipped test result for this object.
+void TestInfo::Skip() {
+ if (!should_run_) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_info(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Notifies the unit test event listeners that a test is about to start.
+ repeater->OnTestStart(*this);
+
+ const TestPartResult test_part_result =
+ TestPartResult(TestPartResult::kSkip, this->file(), this->line(), "");
+ impl->GetTestPartResultReporterForCurrentThread()->ReportTestPartResult(
+ test_part_result);
+
+ // Notifies the unit test event listener that a test has just finished.
+ repeater->OnTestEnd(*this);
+ impl->set_current_test_info(nullptr);
+}
+
+// class TestSuite
+
+// Gets the number of successful tests in this test suite.
+int TestSuite::successful_test_count() const {
+ return CountIf(test_info_list_, TestPassed);
+}
+
+// Gets the number of successful tests in this test suite.
+int TestSuite::skipped_test_count() const {
+ return CountIf(test_info_list_, TestSkipped);
+}
+
+// Gets the number of failed tests in this test suite.
+int TestSuite::failed_test_count() const {
+ return CountIf(test_info_list_, TestFailed);
+}
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int TestSuite::reportable_disabled_test_count() const {
+ return CountIf(test_info_list_, TestReportableDisabled);
+}
+
+// Gets the number of disabled tests in this test suite.
+int TestSuite::disabled_test_count() const {
+ return CountIf(test_info_list_, TestDisabled);
+}
+
+// Gets the number of tests to be printed in the XML report.
+int TestSuite::reportable_test_count() const {
+ return CountIf(test_info_list_, TestReportable);
+}
+
+// Get the number of tests in this test suite that should run.
+int TestSuite::test_to_run_count() const {
+ return CountIf(test_info_list_, ShouldRunTest);
+}
+
+// Gets the number of all tests.
+int TestSuite::total_test_count() const {
+ return static_cast<int>(test_info_list_.size());
+}
+
+// Creates a TestSuite with the given name.
+//
+// Arguments:
+//
+// a_name: name of the test suite
+// a_type_param: the name of the test suite's type parameter, or NULL if
+// this is not a typed or a type-parameterized test suite.
+// set_up_tc: pointer to the function that sets up the test suite
+// tear_down_tc: pointer to the function that tears down the test suite
+TestSuite::TestSuite(const char* a_name, const char* a_type_param,
+ internal::SetUpTestSuiteFunc set_up_tc,
+ internal::TearDownTestSuiteFunc tear_down_tc)
+ : name_(a_name),
+ type_param_(a_type_param ? new std::string(a_type_param) : nullptr),
+ set_up_tc_(set_up_tc),
+ tear_down_tc_(tear_down_tc),
+ should_run_(false),
+ start_timestamp_(0),
+ elapsed_time_(0) {}
+
+// Destructor of TestSuite.
+TestSuite::~TestSuite() {
+ // Deletes every Test in the collection.
+ ForEach(test_info_list_, internal::Delete<TestInfo>);
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+const TestInfo* TestSuite::GetTestInfo(int i) const {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? nullptr : test_info_list_[static_cast<size_t>(index)];
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+TestInfo* TestSuite::GetMutableTestInfo(int i) {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? nullptr : test_info_list_[static_cast<size_t>(index)];
+}
+
+// Adds a test to this test suite. Will delete the test upon
+// destruction of the TestSuite object.
+void TestSuite::AddTestInfo(TestInfo* test_info) {
+ test_info_list_.push_back(test_info);
+ test_indices_.push_back(static_cast<int>(test_indices_.size()));
+}
+
+// Runs every test in this TestSuite.
+void TestSuite::Run() {
+ if (!should_run_) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_suite(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Call both legacy and the new API
+ repeater->OnTestSuiteStart(*this);
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ repeater->OnTestCaseStart(*this);
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &TestSuite::RunSetUpTestSuite, "SetUpTestSuite()");
+
+ start_timestamp_ = internal::GetTimeInMillis();
+ internal::Timer timer;
+ for (int i = 0; i < total_test_count(); i++) {
+ GetMutableTestInfo(i)->Run();
+ if (GTEST_FLAG(fail_fast) && GetMutableTestInfo(i)->result()->Failed()) {
+ for (int j = i + 1; j < total_test_count(); j++) {
+ GetMutableTestInfo(j)->Skip();
+ }
+ break;
+ }
+ }
+ elapsed_time_ = timer.Elapsed();
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &TestSuite::RunTearDownTestSuite, "TearDownTestSuite()");
+
+ // Call both legacy and the new API
+ repeater->OnTestSuiteEnd(*this);
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ repeater->OnTestCaseEnd(*this);
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ impl->set_current_test_suite(nullptr);
+}
+
+// Skips all tests under this TestSuite.
+void TestSuite::Skip() {
+ if (!should_run_) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_suite(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Call both legacy and the new API
+ repeater->OnTestSuiteStart(*this);
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ repeater->OnTestCaseStart(*this);
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ for (int i = 0; i < total_test_count(); i++) {
+ GetMutableTestInfo(i)->Skip();
+ }
+
+ // Call both legacy and the new API
+ repeater->OnTestSuiteEnd(*this);
+ // Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ repeater->OnTestCaseEnd(*this);
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ impl->set_current_test_suite(nullptr);
+}
+
+// Clears the results of all tests in this test suite.
+void TestSuite::ClearResult() {
+ ad_hoc_test_result_.Clear();
+ ForEach(test_info_list_, TestInfo::ClearTestResult);
+}
+
+// Shuffles the tests in this test suite.
+void TestSuite::ShuffleTests(internal::Random* random) {
+ Shuffle(random, &test_indices_);
+}
+
+// Restores the test order to before the first shuffle.
+void TestSuite::UnshuffleTests() {
+ for (size_t i = 0; i < test_indices_.size(); i++) {
+ test_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// Formats a countable noun. Depending on its quantity, either the
+// singular form or the plural form is used. e.g.
+//
+// FormatCountableNoun(1, "formula", "formuli") returns "1 formula".
+// FormatCountableNoun(5, "book", "books") returns "5 books".
+static std::string FormatCountableNoun(int count,
+ const char * singular_form,
+ const char * plural_form) {
+ return internal::StreamableToString(count) + " " +
+ (count == 1 ? singular_form : plural_form);
+}
+
+// Formats the count of tests.
+static std::string FormatTestCount(int test_count) {
+ return FormatCountableNoun(test_count, "test", "tests");
+}
+
+// Formats the count of test suites.
+static std::string FormatTestSuiteCount(int test_suite_count) {
+ return FormatCountableNoun(test_suite_count, "test suite", "test suites");
+}
+
+// Converts a TestPartResult::Type enum to human-friendly string
+// representation. Both kNonFatalFailure and kFatalFailure are translated
+// to "Failure", as the user usually doesn't care about the difference
+// between the two when viewing the test result.
+static const char * TestPartResultTypeToString(TestPartResult::Type type) {
+ switch (type) {
+ case TestPartResult::kSkip:
+ return "Skipped\n";
+ case TestPartResult::kSuccess:
+ return "Success";
+
+ case TestPartResult::kNonFatalFailure:
+ case TestPartResult::kFatalFailure:
+#ifdef _MSC_VER
+ return "error: ";
+#else
+ return "Failure\n";
+#endif
+ default:
+ return "Unknown result type";
+ }
+}
+
+namespace internal {
+namespace {
+enum class GTestColor { kDefault, kRed, kGreen, kYellow };
+} // namespace
+
+// Prints a TestPartResult to an std::string.
+static std::string PrintTestPartResultToString(
+ const TestPartResult& test_part_result) {
+ return (Message()
+ << internal::FormatFileLocation(test_part_result.file_name(),
+ test_part_result.line_number())
+ << " " << TestPartResultTypeToString(test_part_result.type())
+ << test_part_result.message()).GetString();
+}
+
+// Prints a TestPartResult.
+static void PrintTestPartResult(const TestPartResult& test_part_result) {
+ const std::string& result =
+ PrintTestPartResultToString(test_part_result);
+ printf("%s\n", result.c_str());
+ fflush(stdout);
+ // If the test program runs in Visual Studio or a debugger, the
+ // following statements add the test part result message to the Output
+ // window such that the user can double-click on it to jump to the
+ // corresponding source code location; otherwise they do nothing.
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ // We don't call OutputDebugString*() on Windows Mobile, as printing
+ // to stdout is done by OutputDebugString() there already - we don't
+ // want the same message printed twice.
+ ::OutputDebugStringA(result.c_str());
+ ::OutputDebugStringA("\n");
+#endif
+}
+
+// class PrettyUnitTestResultPrinter
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
+ !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW
+
+// Returns the character attribute for the given color.
+static WORD GetColorAttribute(GTestColor color) {
+ switch (color) {
+ case GTestColor::kRed:
+ return FOREGROUND_RED;
+ case GTestColor::kGreen:
+ return FOREGROUND_GREEN;
+ case GTestColor::kYellow:
+ return FOREGROUND_RED | FOREGROUND_GREEN;
+ default: return 0;
+ }
+}
+
+static int GetBitOffset(WORD color_mask) {
+ if (color_mask == 0) return 0;
+
+ int bitOffset = 0;
+ while ((color_mask & 1) == 0) {
+ color_mask >>= 1;
+ ++bitOffset;
+ }
+ return bitOffset;
+}
+
+static WORD GetNewColor(GTestColor color, WORD old_color_attrs) {
+ // Let's reuse the BG
+ static const WORD background_mask = BACKGROUND_BLUE | BACKGROUND_GREEN |
+ BACKGROUND_RED | BACKGROUND_INTENSITY;
+ static const WORD foreground_mask = FOREGROUND_BLUE | FOREGROUND_GREEN |
+ FOREGROUND_RED | FOREGROUND_INTENSITY;
+ const WORD existing_bg = old_color_attrs & background_mask;
+
+ WORD new_color =
+ GetColorAttribute(color) | existing_bg | FOREGROUND_INTENSITY;
+ static const int bg_bitOffset = GetBitOffset(background_mask);
+ static const int fg_bitOffset = GetBitOffset(foreground_mask);
+
+ if (((new_color & background_mask) >> bg_bitOffset) ==
+ ((new_color & foreground_mask) >> fg_bitOffset)) {
+ new_color ^= FOREGROUND_INTENSITY; // invert intensity
+ }
+ return new_color;
+}
+
+#else
+
+// Returns the ANSI color code for the given color. GTestColor::kDefault is
+// an invalid input.
+static const char* GetAnsiColorCode(GTestColor color) {
+ switch (color) {
+ case GTestColor::kRed:
+ return "1";
+ case GTestColor::kGreen:
+ return "2";
+ case GTestColor::kYellow:
+ return "3";
+ default:
+ return nullptr;
+ }
+}
+
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+
+// Returns true if and only if Google Test should use colors in the output.
+bool ShouldUseColor(bool stdout_is_tty) {
+ const char* const gtest_color = GTEST_FLAG(color).c_str();
+
+ if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW
+ // On Windows the TERM variable is usually not set, but the
+ // console there does support colors.
+ return stdout_is_tty;
+#else
+ // On non-Windows platforms, we rely on the TERM variable.
+ const char* const term = posix::GetEnv("TERM");
+ const bool term_supports_color =
+ String::CStringEquals(term, "xterm") ||
+ String::CStringEquals(term, "xterm-color") ||
+ String::CStringEquals(term, "xterm-256color") ||
+ String::CStringEquals(term, "screen") ||
+ String::CStringEquals(term, "screen-256color") ||
+ String::CStringEquals(term, "tmux") ||
+ String::CStringEquals(term, "tmux-256color") ||
+ String::CStringEquals(term, "rxvt-unicode") ||
+ String::CStringEquals(term, "rxvt-unicode-256color") ||
+ String::CStringEquals(term, "linux") ||
+ String::CStringEquals(term, "cygwin");
+ return stdout_is_tty && term_supports_color;
+#endif // GTEST_OS_WINDOWS
+ }
+
+ return String::CaseInsensitiveCStringEquals(gtest_color, "yes") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "true") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "t") ||
+ String::CStringEquals(gtest_color, "1");
+ // We take "yes", "true", "t", and "1" as meaning "yes". If the
+ // value is neither one of these nor "auto", we treat it as "no" to
+ // be conservative.
+}
+
+// Helpers for printing colored strings to stdout. Note that on Windows, we
+// cannot simply emit special characters and have the terminal change colors.
+// This routine must actually emit the characters rather than return a string
+// that would be colored when printed, as can be done on Linux.
+
+GTEST_ATTRIBUTE_PRINTF_(2, 3)
+static void ColoredPrintf(GTestColor color, const char *fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_ZOS || GTEST_OS_IOS || \
+ GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT || defined(ESP_PLATFORM)
+ const bool use_color = AlwaysFalse();
+#else
+ static const bool in_color_mode =
+ ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);
+ const bool use_color = in_color_mode && (color != GTestColor::kDefault);
+#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_ZOS
+
+ if (!use_color) {
+ vprintf(fmt, args);
+ va_end(args);
+ return;
+ }
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
+ !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT && !GTEST_OS_WINDOWS_MINGW
+ const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ // Gets the current text color.
+ CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+ GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+ const WORD old_color_attrs = buffer_info.wAttributes;
+ const WORD new_color = GetNewColor(color, old_color_attrs);
+
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
+ SetConsoleTextAttribute(stdout_handle, new_color);
+
+ vprintf(fmt, args);
+
+ fflush(stdout);
+ // Restores the text color.
+ SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+ printf("\033[0;3%sm", GetAnsiColorCode(color));
+ vprintf(fmt, args);
+ printf("\033[m"); // Resets the terminal to default.
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ va_end(args);
+}
+
+// Text printed in Google Test's text output and --gtest_list_tests
+// output to label the type parameter and value parameter for a test.
+static const char kTypeParamLabel[] = "TypeParam";
+static const char kValueParamLabel[] = "GetParam()";
+
+static void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
+ const char* const type_param = test_info.type_param();
+ const char* const value_param = test_info.value_param();
+
+ if (type_param != nullptr || value_param != nullptr) {
+ printf(", where ");
+ if (type_param != nullptr) {
+ printf("%s = %s", kTypeParamLabel, type_param);
+ if (value_param != nullptr) printf(" and ");
+ }
+ if (value_param != nullptr) {
+ printf("%s = %s", kValueParamLabel, value_param);
+ }
+ }
+}
+
+// This class implements the TestEventListener interface.
+//
+// Class PrettyUnitTestResultPrinter is copyable.
+class PrettyUnitTestResultPrinter : public TestEventListener {
+ public:
+ PrettyUnitTestResultPrinter() {}
+ static void PrintTestName(const char* test_suite, const char* test) {
+ printf("%s.%s", test_suite, test);
+ }
+
+ // The following methods override what's in the TestEventListener class.
+ void OnTestProgramStart(const UnitTest& /*unit_test*/) override {}
+ void OnTestIterationStart(const UnitTest& unit_test, int iteration) override;
+ void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override;
+ void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {}
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseStart(const TestCase& test_case) override;
+#else
+ void OnTestSuiteStart(const TestSuite& test_suite) override;
+#endif // OnTestCaseStart
+
+ void OnTestStart(const TestInfo& test_info) override;
+
+ void OnTestPartResult(const TestPartResult& result) override;
+ void OnTestEnd(const TestInfo& test_info) override;
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseEnd(const TestCase& test_case) override;
+#else
+ void OnTestSuiteEnd(const TestSuite& test_suite) override;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override;
+ void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {}
+ void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override;
+ void OnTestProgramEnd(const UnitTest& /*unit_test*/) override {}
+
+ private:
+ static void PrintFailedTests(const UnitTest& unit_test);
+ static void PrintFailedTestSuites(const UnitTest& unit_test);
+ static void PrintSkippedTests(const UnitTest& unit_test);
+};
+
+ // Fired before each iteration of tests starts.
+void PrettyUnitTestResultPrinter::OnTestIterationStart(
+ const UnitTest& unit_test, int iteration) {
+ if (GTEST_FLAG(repeat) != 1)
+ printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1);
+
+ const char* const filter = GTEST_FLAG(filter).c_str();
+
+ // Prints the filter if it's not *. This reminds the user that some
+ // tests may be skipped.
+ if (!String::CStringEquals(filter, kUniversalFilter)) {
+ ColoredPrintf(GTestColor::kYellow, "Note: %s filter = %s\n", GTEST_NAME_,
+ filter);
+ }
+
+ if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
+ const int32_t shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);
+ ColoredPrintf(GTestColor::kYellow, "Note: This is test shard %d of %s.\n",
+ static_cast<int>(shard_index) + 1,
+ internal::posix::GetEnv(kTestTotalShards));
+ }
+
+ if (GTEST_FLAG(shuffle)) {
+ ColoredPrintf(GTestColor::kYellow,
+ "Note: Randomizing tests' orders with a seed of %d .\n",
+ unit_test.random_seed());
+ }
+
+ ColoredPrintf(GTestColor::kGreen, "[==========] ");
+ printf("Running %s from %s.\n",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestSuiteCount(unit_test.test_suite_to_run_count()).c_str());
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(GTestColor::kGreen, "[----------] ");
+ printf("Global test environment set-up.\n");
+ fflush(stdout);
+}
+
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
+ const std::string counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(GTestColor::kGreen, "[----------] ");
+ printf("%s from %s", counts.c_str(), test_case.name());
+ if (test_case.type_param() == nullptr) {
+ printf("\n");
+ } else {
+ printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param());
+ }
+ fflush(stdout);
+}
+#else
+void PrettyUnitTestResultPrinter::OnTestSuiteStart(
+ const TestSuite& test_suite) {
+ const std::string counts =
+ FormatCountableNoun(test_suite.test_to_run_count(), "test", "tests");
+ ColoredPrintf(GTestColor::kGreen, "[----------] ");
+ printf("%s from %s", counts.c_str(), test_suite.name());
+ if (test_suite.type_param() == nullptr) {
+ printf("\n");
+ } else {
+ printf(", where %s = %s\n", kTypeParamLabel, test_suite.type_param());
+ }
+ fflush(stdout);
+}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
+ ColoredPrintf(GTestColor::kGreen, "[ RUN ] ");
+ PrintTestName(test_info.test_suite_name(), test_info.name());
+ printf("\n");
+ fflush(stdout);
+}
+
+// Called after an assertion failure.
+void PrettyUnitTestResultPrinter::OnTestPartResult(
+ const TestPartResult& result) {
+ switch (result.type()) {
+ // If the test part succeeded, we don't need to do anything.
+ case TestPartResult::kSuccess:
+ return;
+ default:
+ // Print failure message from the assertion
+ // (e.g. expected this and got that).
+ PrintTestPartResult(result);
+ fflush(stdout);
+ }
+}
+
+void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
+ if (test_info.result()->Passed()) {
+ ColoredPrintf(GTestColor::kGreen, "[ OK ] ");
+ } else if (test_info.result()->Skipped()) {
+ ColoredPrintf(GTestColor::kGreen, "[ SKIPPED ] ");
+ } else {
+ ColoredPrintf(GTestColor::kRed, "[ FAILED ] ");
+ }
+ PrintTestName(test_info.test_suite_name(), test_info.name());
+ if (test_info.result()->Failed())
+ PrintFullTestCommentIfPresent(test_info);
+
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms)\n", internal::StreamableToString(
+ test_info.result()->elapsed_time()).c_str());
+ } else {
+ printf("\n");
+ }
+ fflush(stdout);
+}
+
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
+ if (!GTEST_FLAG(print_time)) return;
+
+ const std::string counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(GTestColor::kGreen, "[----------] ");
+ printf("%s from %s (%s ms total)\n\n", counts.c_str(), test_case.name(),
+ internal::StreamableToString(test_case.elapsed_time()).c_str());
+ fflush(stdout);
+}
+#else
+void PrettyUnitTestResultPrinter::OnTestSuiteEnd(const TestSuite& test_suite) {
+ if (!GTEST_FLAG(print_time)) return;
+
+ const std::string counts =
+ FormatCountableNoun(test_suite.test_to_run_count(), "test", "tests");
+ ColoredPrintf(GTestColor::kGreen, "[----------] ");
+ printf("%s from %s (%s ms total)\n\n", counts.c_str(), test_suite.name(),
+ internal::StreamableToString(test_suite.elapsed_time()).c_str());
+ fflush(stdout);
+}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(GTestColor::kGreen, "[----------] ");
+ printf("Global test environment tear-down\n");
+ fflush(stdout);
+}
+
+// Internal helper for printing the list of failed tests.
+void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {
+ const int failed_test_count = unit_test.failed_test_count();
+ ColoredPrintf(GTestColor::kRed, "[ FAILED ] ");
+ printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
+
+ for (int i = 0; i < unit_test.total_test_suite_count(); ++i) {
+ const TestSuite& test_suite = *unit_test.GetTestSuite(i);
+ if (!test_suite.should_run() || (test_suite.failed_test_count() == 0)) {
+ continue;
+ }
+ for (int j = 0; j < test_suite.total_test_count(); ++j) {
+ const TestInfo& test_info = *test_suite.GetTestInfo(j);
+ if (!test_info.should_run() || !test_info.result()->Failed()) {
+ continue;
+ }
+ ColoredPrintf(GTestColor::kRed, "[ FAILED ] ");
+ printf("%s.%s", test_suite.name(), test_info.name());
+ PrintFullTestCommentIfPresent(test_info);
+ printf("\n");
+ }
+ }
+ printf("\n%2d FAILED %s\n", failed_test_count,
+ failed_test_count == 1 ? "TEST" : "TESTS");
+}
+
+// Internal helper for printing the list of test suite failures not covered by
+// PrintFailedTests.
+void PrettyUnitTestResultPrinter::PrintFailedTestSuites(
+ const UnitTest& unit_test) {
+ int suite_failure_count = 0;
+ for (int i = 0; i < unit_test.total_test_suite_count(); ++i) {
+ const TestSuite& test_suite = *unit_test.GetTestSuite(i);
+ if (!test_suite.should_run()) {
+ continue;
+ }
+ if (test_suite.ad_hoc_test_result().Failed()) {
+ ColoredPrintf(GTestColor::kRed, "[ FAILED ] ");
+ printf("%s: SetUpTestSuite or TearDownTestSuite\n", test_suite.name());
+ ++suite_failure_count;
+ }
+ }
+ if (suite_failure_count > 0) {
+ printf("\n%2d FAILED TEST %s\n", suite_failure_count,
+ suite_failure_count == 1 ? "SUITE" : "SUITES");
+ }
+}
+
+// Internal helper for printing the list of skipped tests.
+void PrettyUnitTestResultPrinter::PrintSkippedTests(const UnitTest& unit_test) {
+ const int skipped_test_count = unit_test.skipped_test_count();
+ if (skipped_test_count == 0) {
+ return;
+ }
+
+ for (int i = 0; i < unit_test.total_test_suite_count(); ++i) {
+ const TestSuite& test_suite = *unit_test.GetTestSuite(i);
+ if (!test_suite.should_run() || (test_suite.skipped_test_count() == 0)) {
+ continue;
+ }
+ for (int j = 0; j < test_suite.total_test_count(); ++j) {
+ const TestInfo& test_info = *test_suite.GetTestInfo(j);
+ if (!test_info.should_run() || !test_info.result()->Skipped()) {
+ continue;
+ }
+ ColoredPrintf(GTestColor::kGreen, "[ SKIPPED ] ");
+ printf("%s.%s", test_suite.name(), test_info.name());
+ printf("\n");
+ }
+ }
+}
+
+void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ ColoredPrintf(GTestColor::kGreen, "[==========] ");
+ printf("%s from %s ran.",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestSuiteCount(unit_test.test_suite_to_run_count()).c_str());
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms total)",
+ internal::StreamableToString(unit_test.elapsed_time()).c_str());
+ }
+ printf("\n");
+ ColoredPrintf(GTestColor::kGreen, "[ PASSED ] ");
+ printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
+
+ const int skipped_test_count = unit_test.skipped_test_count();
+ if (skipped_test_count > 0) {
+ ColoredPrintf(GTestColor::kGreen, "[ SKIPPED ] ");
+ printf("%s, listed below:\n", FormatTestCount(skipped_test_count).c_str());
+ PrintSkippedTests(unit_test);
+ }
+
+ if (!unit_test.Passed()) {
+ PrintFailedTests(unit_test);
+ PrintFailedTestSuites(unit_test);
+ }
+
+ int num_disabled = unit_test.reportable_disabled_test_count();
+ if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
+ if (unit_test.Passed()) {
+ printf("\n"); // Add a spacer if no FAILURE banner is displayed.
+ }
+ ColoredPrintf(GTestColor::kYellow, " YOU HAVE %d DISABLED %s\n\n",
+ num_disabled, num_disabled == 1 ? "TEST" : "TESTS");
+ }
+ // Ensure that Google Test output is printed before, e.g., heapchecker output.
+ fflush(stdout);
+}
+
+// End PrettyUnitTestResultPrinter
+
+// This class implements the TestEventListener interface.
+//
+// Class BriefUnitTestResultPrinter is copyable.
+class BriefUnitTestResultPrinter : public TestEventListener {
+ public:
+ BriefUnitTestResultPrinter() {}
+ static void PrintTestName(const char* test_suite, const char* test) {
+ printf("%s.%s", test_suite, test);
+ }
+
+ // The following methods override what's in the TestEventListener class.
+ void OnTestProgramStart(const UnitTest& /*unit_test*/) override {}
+ void OnTestIterationStart(const UnitTest& /*unit_test*/,
+ int /*iteration*/) override {}
+ void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) override {}
+ void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {}
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseStart(const TestCase& /*test_case*/) override {}
+#else
+ void OnTestSuiteStart(const TestSuite& /*test_suite*/) override {}
+#endif // OnTestCaseStart
+
+ void OnTestStart(const TestInfo& /*test_info*/) override {}
+
+ void OnTestPartResult(const TestPartResult& result) override;
+ void OnTestEnd(const TestInfo& test_info) override;
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseEnd(const TestCase& /*test_case*/) override {}
+#else
+ void OnTestSuiteEnd(const TestSuite& /*test_suite*/) override {}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) override {}
+ void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {}
+ void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override;
+ void OnTestProgramEnd(const UnitTest& /*unit_test*/) override {}
+};
+
+// Called after an assertion failure.
+void BriefUnitTestResultPrinter::OnTestPartResult(
+ const TestPartResult& result) {
+ switch (result.type()) {
+ // If the test part succeeded, we don't need to do anything.
+ case TestPartResult::kSuccess:
+ return;
+ default:
+ // Print failure message from the assertion
+ // (e.g. expected this and got that).
+ PrintTestPartResult(result);
+ fflush(stdout);
+ }
+}
+
+void BriefUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
+ if (test_info.result()->Failed()) {
+ ColoredPrintf(GTestColor::kRed, "[ FAILED ] ");
+ PrintTestName(test_info.test_suite_name(), test_info.name());
+ PrintFullTestCommentIfPresent(test_info);
+
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms)\n",
+ internal::StreamableToString(test_info.result()->elapsed_time())
+ .c_str());
+ } else {
+ printf("\n");
+ }
+ fflush(stdout);
+ }
+}
+
+void BriefUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ ColoredPrintf(GTestColor::kGreen, "[==========] ");
+ printf("%s from %s ran.",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestSuiteCount(unit_test.test_suite_to_run_count()).c_str());
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms total)",
+ internal::StreamableToString(unit_test.elapsed_time()).c_str());
+ }
+ printf("\n");
+ ColoredPrintf(GTestColor::kGreen, "[ PASSED ] ");
+ printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
+
+ const int skipped_test_count = unit_test.skipped_test_count();
+ if (skipped_test_count > 0) {
+ ColoredPrintf(GTestColor::kGreen, "[ SKIPPED ] ");
+ printf("%s.\n", FormatTestCount(skipped_test_count).c_str());
+ }
+
+ int num_disabled = unit_test.reportable_disabled_test_count();
+ if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
+ if (unit_test.Passed()) {
+ printf("\n"); // Add a spacer if no FAILURE banner is displayed.
+ }
+ ColoredPrintf(GTestColor::kYellow, " YOU HAVE %d DISABLED %s\n\n",
+ num_disabled, num_disabled == 1 ? "TEST" : "TESTS");
+ }
+ // Ensure that Google Test output is printed before, e.g., heapchecker output.
+ fflush(stdout);
+}
+
+// End BriefUnitTestResultPrinter
+
+// class TestEventRepeater
+//
+// This class forwards events to other event listeners.
+class TestEventRepeater : public TestEventListener {
+ public:
+ TestEventRepeater() : forwarding_enabled_(true) {}
+ ~TestEventRepeater() override;
+ void Append(TestEventListener *listener);
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled() const { return forwarding_enabled_; }
+ void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }
+
+ void OnTestProgramStart(const UnitTest& unit_test) override;
+ void OnTestIterationStart(const UnitTest& unit_test, int iteration) override;
+ void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override;
+ void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) override;
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseStart(const TestSuite& parameter) override;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestSuiteStart(const TestSuite& parameter) override;
+ void OnTestStart(const TestInfo& test_info) override;
+ void OnTestPartResult(const TestPartResult& result) override;
+ void OnTestEnd(const TestInfo& test_info) override;
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseEnd(const TestCase& parameter) override;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestSuiteEnd(const TestSuite& parameter) override;
+ void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override;
+ void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) override;
+ void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override;
+ void OnTestProgramEnd(const UnitTest& unit_test) override;
+
+ private:
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled_;
+ // The list of listeners that receive events.
+ std::vector<TestEventListener*> listeners_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);
+};
+
+TestEventRepeater::~TestEventRepeater() {
+ ForEach(listeners_, Delete<TestEventListener>);
+}
+
+void TestEventRepeater::Append(TestEventListener *listener) {
+ listeners_.push_back(listener);
+}
+
+TestEventListener* TestEventRepeater::Release(TestEventListener *listener) {
+ for (size_t i = 0; i < listeners_.size(); ++i) {
+ if (listeners_[i] == listener) {
+ listeners_.erase(listeners_.begin() + static_cast<int>(i));
+ return listener;
+ }
+ }
+
+ return nullptr;
+}
+
+// Since most methods are very similar, use macros to reduce boilerplate.
+// This defines a member that forwards the call to all listeners.
+#define GTEST_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (size_t i = 0; i < listeners_.size(); i++) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+// This defines a member that forwards the call to all listeners in reverse
+// order.
+#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \
+ void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (size_t i = listeners_.size(); i != 0; i--) { \
+ listeners_[i - 1]->Name(parameter); \
+ } \
+ } \
+ }
+
+GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+GTEST_REPEATER_METHOD_(OnTestCaseStart, TestSuite)
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+GTEST_REPEATER_METHOD_(OnTestSuiteStart, TestSuite)
+GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
+GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)
+GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestSuite)
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+GTEST_REVERSE_REPEATER_METHOD_(OnTestSuiteEnd, TestSuite)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)
+
+#undef GTEST_REPEATER_METHOD_
+#undef GTEST_REVERSE_REPEATER_METHOD_
+
+void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (size_t i = 0; i < listeners_.size(); i++) {
+ listeners_[i]->OnTestIterationStart(unit_test, iteration);
+ }
+ }
+}
+
+void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (size_t i = listeners_.size(); i > 0; i--) {
+ listeners_[i - 1]->OnTestIterationEnd(unit_test, iteration);
+ }
+ }
+}
+
+// End TestEventRepeater
+
+// This class generates an XML output file.
+class XmlUnitTestResultPrinter : public EmptyTestEventListener {
+ public:
+ explicit XmlUnitTestResultPrinter(const char* output_file);
+
+ void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override;
+ void ListTestsMatchingFilter(const std::vector<TestSuite*>& test_suites);
+
+ // Prints an XML summary of all unit tests.
+ static void PrintXmlTestsList(std::ostream* stream,
+ const std::vector<TestSuite*>& test_suites);
+
+ private:
+ // Is c a whitespace character that is normalized to a space character
+ // when it appears in an XML attribute value?
+ static bool IsNormalizableWhitespace(char c) {
+ return c == 0x9 || c == 0xA || c == 0xD;
+ }
+
+ // May c appear in a well-formed XML document?
+ static bool IsValidXmlCharacter(char c) {
+ return IsNormalizableWhitespace(c) || c >= 0x20;
+ }
+
+ // Returns an XML-escaped copy of the input string str. If
+ // is_attribute is true, the text is meant to appear as an attribute
+ // value, and normalizable whitespace is preserved by replacing it
+ // with character references.
+ static std::string EscapeXml(const std::string& str, bool is_attribute);
+
+ // Returns the given string with all characters invalid in XML removed.
+ static std::string RemoveInvalidXmlCharacters(const std::string& str);
+
+ // Convenience wrapper around EscapeXml when str is an attribute value.
+ static std::string EscapeXmlAttribute(const std::string& str) {
+ return EscapeXml(str, true);
+ }
+
+ // Convenience wrapper around EscapeXml when str is not an attribute value.
+ static std::string EscapeXmlText(const char* str) {
+ return EscapeXml(str, false);
+ }
+
+ // Verifies that the given attribute belongs to the given element and
+ // streams the attribute as XML.
+ static void OutputXmlAttribute(std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ const std::string& value);
+
+ // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+ static void OutputXmlCDataSection(::std::ostream* stream, const char* data);
+
+ // Streams a test suite XML stanza containing the given test result.
+ //
+ // Requires: result.Failed()
+ static void OutputXmlTestSuiteForTestResult(::std::ostream* stream,
+ const TestResult& result);
+
+ // Streams an XML representation of a TestResult object.
+ static void OutputXmlTestResult(::std::ostream* stream,
+ const TestResult& result);
+
+ // Streams an XML representation of a TestInfo object.
+ static void OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_suite_name,
+ const TestInfo& test_info);
+
+ // Prints an XML representation of a TestSuite object
+ static void PrintXmlTestSuite(::std::ostream* stream,
+ const TestSuite& test_suite);
+
+ // Prints an XML summary of unit_test to output stream out.
+ static void PrintXmlUnitTest(::std::ostream* stream,
+ const UnitTest& unit_test);
+
+ // Produces a string representing the test properties in a result as space
+ // delimited XML attributes based on the property key="value" pairs.
+ // When the std::string is not empty, it includes a space at the beginning,
+ // to delimit this attribute from prior attributes.
+ static std::string TestPropertiesAsXmlAttributes(const TestResult& result);
+
+ // Streams an XML representation of the test properties of a TestResult
+ // object.
+ static void OutputXmlTestProperties(std::ostream* stream,
+ const TestResult& result);
+
+ // The output file.
+ const std::string output_file_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
+};
+
+// Creates a new XmlUnitTestResultPrinter.
+XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
+ : output_file_(output_file) {
+ if (output_file_.empty()) {
+ GTEST_LOG_(FATAL) << "XML output file may not be null";
+ }
+}
+
+// Called after the unit test ends.
+void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ FILE* xmlout = OpenFileForWriting(output_file_);
+ std::stringstream stream;
+ PrintXmlUnitTest(&stream, unit_test);
+ fprintf(xmlout, "%s", StringStreamToString(&stream).c_str());
+ fclose(xmlout);
+}
+
+void XmlUnitTestResultPrinter::ListTestsMatchingFilter(
+ const std::vector<TestSuite*>& test_suites) {
+ FILE* xmlout = OpenFileForWriting(output_file_);
+ std::stringstream stream;
+ PrintXmlTestsList(&stream, test_suites);
+ fprintf(xmlout, "%s", StringStreamToString(&stream).c_str());
+ fclose(xmlout);
+}
+
+// Returns an XML-escaped copy of the input string str. If is_attribute
+// is true, the text is meant to appear as an attribute value, and
+// normalizable whitespace is preserved by replacing it with character
+// references.
+//
+// Invalid XML characters in str, if any, are stripped from the output.
+// It is expected that most, if not all, of the text processed by this
+// module will consist of ordinary English text.
+// If this module is ever modified to produce version 1.1 XML output,
+// most invalid characters can be retained using character references.
+std::string XmlUnitTestResultPrinter::EscapeXml(
+ const std::string& str, bool is_attribute) {
+ Message m;
+
+ for (size_t i = 0; i < str.size(); ++i) {
+ const char ch = str[i];
+ switch (ch) {
+ case '<':
+ m << "<";
+ break;
+ case '>':
+ m << ">";
+ break;
+ case '&':
+ m << "&";
+ break;
+ case '\'':
+ if (is_attribute)
+ m << "'";
+ else
+ m << '\'';
+ break;
+ case '"':
+ if (is_attribute)
+ m << """;
+ else
+ m << '"';
+ break;
+ default:
+ if (IsValidXmlCharacter(ch)) {
+ if (is_attribute && IsNormalizableWhitespace(ch))
+ m << "&#x" << String::FormatByte(static_cast<unsigned char>(ch))
+ << ";";
+ else
+ m << ch;
+ }
+ break;
+ }
+ }
+
+ return m.GetString();
+}
+
+// Returns the given string with all characters invalid in XML removed.
+// Currently invalid characters are dropped from the string. An
+// alternative is to replace them with certain characters such as . or ?.
+std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(
+ const std::string& str) {
+ std::string output;
+ output.reserve(str.size());
+ for (std::string::const_iterator it = str.begin(); it != str.end(); ++it)
+ if (IsValidXmlCharacter(*it))
+ output.push_back(*it);
+
+ return output;
+}
+
+// The following routines generate an XML representation of a UnitTest
+// object.
+// GOOGLETEST_CM0009 DO NOT DELETE
+//
+// This is how Google Test concepts map to the DTD:
+//
+// <testsuites name="AllTests"> <-- corresponds to a UnitTest object
+// <testsuite name="testcase-name"> <-- corresponds to a TestSuite object
+// <testcase name="test-name"> <-- corresponds to a TestInfo object
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <-- individual assertion failures
+// </testcase>
+// </testsuite>
+// </testsuites>
+
+// Formats the given time in milliseconds as seconds.
+std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {
+ ::std::stringstream ss;
+ ss << (static_cast<double>(ms) * 1e-3);
+ return ss.str();
+}
+
+static bool PortableLocaltime(time_t seconds, struct tm* out) {
+#if defined(_MSC_VER)
+ return localtime_s(out, &seconds) == 0;
+#elif defined(__MINGW32__) || defined(__MINGW64__)
+ // MINGW <time.h> provides neither localtime_r nor localtime_s, but uses
+ // Windows' localtime(), which has a thread-local tm buffer.
+ struct tm* tm_ptr = localtime(&seconds); // NOLINT
+ if (tm_ptr == nullptr) return false;
+ *out = *tm_ptr;
+ return true;
+#elif defined(__STDC_LIB_EXT1__)
+ // Uses localtime_s when available as localtime_r is only available from
+ // C23 standard.
+ return localtime_s(&seconds, out) != nullptr;
+#else
+ return localtime_r(&seconds, out) != nullptr;
+#endif
+}
+
+// Converts the given epoch time in milliseconds to a date string in the ISO
+// 8601 format, without the timezone information.
+std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) {
+ struct tm time_struct;
+ if (!PortableLocaltime(static_cast<time_t>(ms / 1000), &time_struct))
+ return "";
+ // YYYY-MM-DDThh:mm:ss.sss
+ return StreamableToString(time_struct.tm_year + 1900) + "-" +
+ String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" +
+ String::FormatIntWidth2(time_struct.tm_mday) + "T" +
+ String::FormatIntWidth2(time_struct.tm_hour) + ":" +
+ String::FormatIntWidth2(time_struct.tm_min) + ":" +
+ String::FormatIntWidth2(time_struct.tm_sec) + "." +
+ String::FormatIntWidthN(static_cast<int>(ms % 1000), 3);
+}
+
+// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,
+ const char* data) {
+ const char* segment = data;
+ *stream << "<![CDATA[";
+ for (;;) {
+ const char* const next_segment = strstr(segment, "]]>");
+ if (next_segment != nullptr) {
+ stream->write(
+ segment, static_cast<std::streamsize>(next_segment - segment));
+ *stream << "]]>]]><![CDATA[";
+ segment = next_segment + strlen("]]>");
+ } else {
+ *stream << segment;
+ break;
+ }
+ }
+ *stream << "]]>";
+}
+
+void XmlUnitTestResultPrinter::OutputXmlAttribute(
+ std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ const std::string& value) {
+ const std::vector<std::string>& allowed_names =
+ GetReservedOutputAttributesForElement(element_name);
+
+ GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) !=
+ allowed_names.end())
+ << "Attribute " << name << " is not allowed for element <" << element_name
+ << ">.";
+
+ *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\"";
+}
+
+// Streams a test suite XML stanza containing the given test result.
+void XmlUnitTestResultPrinter::OutputXmlTestSuiteForTestResult(
+ ::std::ostream* stream, const TestResult& result) {
+ // Output the boilerplate for a minimal test suite with one test.
+ *stream << " <testsuite";
+ OutputXmlAttribute(stream, "testsuite", "name", "NonTestSuiteFailure");
+ OutputXmlAttribute(stream, "testsuite", "tests", "1");
+ OutputXmlAttribute(stream, "testsuite", "failures", "1");
+ OutputXmlAttribute(stream, "testsuite", "disabled", "0");
+ OutputXmlAttribute(stream, "testsuite", "skipped", "0");
+ OutputXmlAttribute(stream, "testsuite", "errors", "0");
+ OutputXmlAttribute(stream, "testsuite", "time",
+ FormatTimeInMillisAsSeconds(result.elapsed_time()));
+ OutputXmlAttribute(
+ stream, "testsuite", "timestamp",
+ FormatEpochTimeInMillisAsIso8601(result.start_timestamp()));
+ *stream << ">";
+
+ // Output the boilerplate for a minimal test case with a single test.
+ *stream << " <testcase";
+ OutputXmlAttribute(stream, "testcase", "name", "");
+ OutputXmlAttribute(stream, "testcase", "status", "run");
+ OutputXmlAttribute(stream, "testcase", "result", "completed");
+ OutputXmlAttribute(stream, "testcase", "classname", "");
+ OutputXmlAttribute(stream, "testcase", "time",
+ FormatTimeInMillisAsSeconds(result.elapsed_time()));
+ OutputXmlAttribute(
+ stream, "testcase", "timestamp",
+ FormatEpochTimeInMillisAsIso8601(result.start_timestamp()));
+
+ // Output the actual test result.
+ OutputXmlTestResult(stream, result);
+
+ // Complete the test suite.
+ *stream << " </testsuite>\n";
+}
+
+// Prints an XML representation of a TestInfo object.
+void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_suite_name,
+ const TestInfo& test_info) {
+ const TestResult& result = *test_info.result();
+ const std::string kTestsuite = "testcase";
+
+ if (test_info.is_in_another_shard()) {
+ return;
+ }
+
+ *stream << " <testcase";
+ OutputXmlAttribute(stream, kTestsuite, "name", test_info.name());
+
+ if (test_info.value_param() != nullptr) {
+ OutputXmlAttribute(stream, kTestsuite, "value_param",
+ test_info.value_param());
+ }
+ if (test_info.type_param() != nullptr) {
+ OutputXmlAttribute(stream, kTestsuite, "type_param",
+ test_info.type_param());
+ }
+ if (GTEST_FLAG(list_tests)) {
+ OutputXmlAttribute(stream, kTestsuite, "file", test_info.file());
+ OutputXmlAttribute(stream, kTestsuite, "line",
+ StreamableToString(test_info.line()));
+ *stream << " />\n";
+ return;
+ }
+
+ OutputXmlAttribute(stream, kTestsuite, "status",
+ test_info.should_run() ? "run" : "notrun");
+ OutputXmlAttribute(stream, kTestsuite, "result",
+ test_info.should_run()
+ ? (result.Skipped() ? "skipped" : "completed")
+ : "suppressed");
+ OutputXmlAttribute(stream, kTestsuite, "time",
+ FormatTimeInMillisAsSeconds(result.elapsed_time()));
+ OutputXmlAttribute(
+ stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsIso8601(result.start_timestamp()));
+ OutputXmlAttribute(stream, kTestsuite, "classname", test_suite_name);
+
+ OutputXmlTestResult(stream, result);
+}
+
+void XmlUnitTestResultPrinter::OutputXmlTestResult(::std::ostream* stream,
+ const TestResult& result) {
+ int failures = 0;
+ int skips = 0;
+ for (int i = 0; i < result.total_part_count(); ++i) {
+ const TestPartResult& part = result.GetTestPartResult(i);
+ if (part.failed()) {
+ if (++failures == 1 && skips == 0) {
+ *stream << ">\n";
+ }
+ const std::string location =
+ internal::FormatCompilerIndependentFileLocation(part.file_name(),
+ part.line_number());
+ const std::string summary = location + "\n" + part.summary();
+ *stream << " <failure message=\""
+ << EscapeXmlAttribute(summary)
+ << "\" type=\"\">";
+ const std::string detail = location + "\n" + part.message();
+ OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str());
+ *stream << "</failure>\n";
+ } else if (part.skipped()) {
+ if (++skips == 1 && failures == 0) {
+ *stream << ">\n";
+ }
+ const std::string location =
+ internal::FormatCompilerIndependentFileLocation(part.file_name(),
+ part.line_number());
+ const std::string summary = location + "\n" + part.summary();
+ *stream << " <skipped message=\""
+ << EscapeXmlAttribute(summary.c_str()) << "\">";
+ const std::string detail = location + "\n" + part.message();
+ OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str());
+ *stream << "</skipped>\n";
+ }
+ }
+
+ if (failures == 0 && skips == 0 && result.test_property_count() == 0) {
+ *stream << " />\n";
+ } else {
+ if (failures == 0 && skips == 0) {
+ *stream << ">\n";
+ }
+ OutputXmlTestProperties(stream, result);
+ *stream << " </testcase>\n";
+ }
+}
+
+// Prints an XML representation of a TestSuite object
+void XmlUnitTestResultPrinter::PrintXmlTestSuite(std::ostream* stream,
+ const TestSuite& test_suite) {
+ const std::string kTestsuite = "testsuite";
+ *stream << " <" << kTestsuite;
+ OutputXmlAttribute(stream, kTestsuite, "name", test_suite.name());
+ OutputXmlAttribute(stream, kTestsuite, "tests",
+ StreamableToString(test_suite.reportable_test_count()));
+ if (!GTEST_FLAG(list_tests)) {
+ OutputXmlAttribute(stream, kTestsuite, "failures",
+ StreamableToString(test_suite.failed_test_count()));
+ OutputXmlAttribute(
+ stream, kTestsuite, "disabled",
+ StreamableToString(test_suite.reportable_disabled_test_count()));
+ OutputXmlAttribute(stream, kTestsuite, "skipped",
+ StreamableToString(test_suite.skipped_test_count()));
+
+ OutputXmlAttribute(stream, kTestsuite, "errors", "0");
+
+ OutputXmlAttribute(stream, kTestsuite, "time",
+ FormatTimeInMillisAsSeconds(test_suite.elapsed_time()));
+ OutputXmlAttribute(
+ stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsIso8601(test_suite.start_timestamp()));
+ *stream << TestPropertiesAsXmlAttributes(test_suite.ad_hoc_test_result());
+ }
+ *stream << ">\n";
+ for (int i = 0; i < test_suite.total_test_count(); ++i) {
+ if (test_suite.GetTestInfo(i)->is_reportable())
+ OutputXmlTestInfo(stream, test_suite.name(), *test_suite.GetTestInfo(i));
+ }
+ *stream << " </" << kTestsuite << ">\n";
+}
+
+// Prints an XML summary of unit_test to output stream out.
+void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream,
+ const UnitTest& unit_test) {
+ const std::string kTestsuites = "testsuites";
+
+ *stream << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
+ *stream << "<" << kTestsuites;
+
+ OutputXmlAttribute(stream, kTestsuites, "tests",
+ StreamableToString(unit_test.reportable_test_count()));
+ OutputXmlAttribute(stream, kTestsuites, "failures",
+ StreamableToString(unit_test.failed_test_count()));
+ OutputXmlAttribute(
+ stream, kTestsuites, "disabled",
+ StreamableToString(unit_test.reportable_disabled_test_count()));
+ OutputXmlAttribute(stream, kTestsuites, "errors", "0");
+ OutputXmlAttribute(stream, kTestsuites, "time",
+ FormatTimeInMillisAsSeconds(unit_test.elapsed_time()));
+ OutputXmlAttribute(
+ stream, kTestsuites, "timestamp",
+ FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp()));
+
+ if (GTEST_FLAG(shuffle)) {
+ OutputXmlAttribute(stream, kTestsuites, "random_seed",
+ StreamableToString(unit_test.random_seed()));
+ }
+ *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result());
+
+ OutputXmlAttribute(stream, kTestsuites, "name", "AllTests");
+ *stream << ">\n";
+
+ for (int i = 0; i < unit_test.total_test_suite_count(); ++i) {
+ if (unit_test.GetTestSuite(i)->reportable_test_count() > 0)
+ PrintXmlTestSuite(stream, *unit_test.GetTestSuite(i));
+ }
+
+ // If there was a test failure outside of one of the test suites (like in a
+ // test environment) include that in the output.
+ if (unit_test.ad_hoc_test_result().Failed()) {
+ OutputXmlTestSuiteForTestResult(stream, unit_test.ad_hoc_test_result());
+ }
+
+ *stream << "</" << kTestsuites << ">\n";
+}
+
+void XmlUnitTestResultPrinter::PrintXmlTestsList(
+ std::ostream* stream, const std::vector<TestSuite*>& test_suites) {
+ const std::string kTestsuites = "testsuites";
+
+ *stream << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
+ *stream << "<" << kTestsuites;
+
+ int total_tests = 0;
+ for (auto test_suite : test_suites) {
+ total_tests += test_suite->total_test_count();
+ }
+ OutputXmlAttribute(stream, kTestsuites, "tests",
+ StreamableToString(total_tests));
+ OutputXmlAttribute(stream, kTestsuites, "name", "AllTests");
+ *stream << ">\n";
+
+ for (auto test_suite : test_suites) {
+ PrintXmlTestSuite(stream, *test_suite);
+ }
+ *stream << "</" << kTestsuites << ">\n";
+}
+
+// Produces a string representing the test properties in a result as space
+// delimited XML attributes based on the property key="value" pairs.
+std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
+ const TestResult& result) {
+ Message attributes;
+ for (int i = 0; i < result.test_property_count(); ++i) {
+ const TestProperty& property = result.GetTestProperty(i);
+ attributes << " " << property.key() << "="
+ << "\"" << EscapeXmlAttribute(property.value()) << "\"";
+ }
+ return attributes.GetString();
+}
+
+void XmlUnitTestResultPrinter::OutputXmlTestProperties(
+ std::ostream* stream, const TestResult& result) {
+ const std::string kProperties = "properties";
+ const std::string kProperty = "property";
+
+ if (result.test_property_count() <= 0) {
+ return;
+ }
+
+ *stream << "<" << kProperties << ">\n";
+ for (int i = 0; i < result.test_property_count(); ++i) {
+ const TestProperty& property = result.GetTestProperty(i);
+ *stream << "<" << kProperty;
+ *stream << " name=\"" << EscapeXmlAttribute(property.key()) << "\"";
+ *stream << " value=\"" << EscapeXmlAttribute(property.value()) << "\"";
+ *stream << "/>\n";
+ }
+ *stream << "</" << kProperties << ">\n";
+}
+
+// End XmlUnitTestResultPrinter
+
+// This class generates an JSON output file.
+class JsonUnitTestResultPrinter : public EmptyTestEventListener {
+ public:
+ explicit JsonUnitTestResultPrinter(const char* output_file);
+
+ void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override;
+
+ // Prints an JSON summary of all unit tests.
+ static void PrintJsonTestList(::std::ostream* stream,
+ const std::vector<TestSuite*>& test_suites);
+
+ private:
+ // Returns an JSON-escaped copy of the input string str.
+ static std::string EscapeJson(const std::string& str);
+
+ //// Verifies that the given attribute belongs to the given element and
+ //// streams the attribute as JSON.
+ static void OutputJsonKey(std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ const std::string& value,
+ const std::string& indent,
+ bool comma = true);
+ static void OutputJsonKey(std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ int value,
+ const std::string& indent,
+ bool comma = true);
+
+ // Streams a test suite JSON stanza containing the given test result.
+ //
+ // Requires: result.Failed()
+ static void OutputJsonTestSuiteForTestResult(::std::ostream* stream,
+ const TestResult& result);
+
+ // Streams a JSON representation of a TestResult object.
+ static void OutputJsonTestResult(::std::ostream* stream,
+ const TestResult& result);
+
+ // Streams a JSON representation of a TestInfo object.
+ static void OutputJsonTestInfo(::std::ostream* stream,
+ const char* test_suite_name,
+ const TestInfo& test_info);
+
+ // Prints a JSON representation of a TestSuite object
+ static void PrintJsonTestSuite(::std::ostream* stream,
+ const TestSuite& test_suite);
+
+ // Prints a JSON summary of unit_test to output stream out.
+ static void PrintJsonUnitTest(::std::ostream* stream,
+ const UnitTest& unit_test);
+
+ // Produces a string representing the test properties in a result as
+ // a JSON dictionary.
+ static std::string TestPropertiesAsJson(const TestResult& result,
+ const std::string& indent);
+
+ // The output file.
+ const std::string output_file_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(JsonUnitTestResultPrinter);
+};
+
+// Creates a new JsonUnitTestResultPrinter.
+JsonUnitTestResultPrinter::JsonUnitTestResultPrinter(const char* output_file)
+ : output_file_(output_file) {
+ if (output_file_.empty()) {
+ GTEST_LOG_(FATAL) << "JSON output file may not be null";
+ }
+}
+
+void JsonUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ FILE* jsonout = OpenFileForWriting(output_file_);
+ std::stringstream stream;
+ PrintJsonUnitTest(&stream, unit_test);
+ fprintf(jsonout, "%s", StringStreamToString(&stream).c_str());
+ fclose(jsonout);
+}
+
+// Returns an JSON-escaped copy of the input string str.
+std::string JsonUnitTestResultPrinter::EscapeJson(const std::string& str) {
+ Message m;
+
+ for (size_t i = 0; i < str.size(); ++i) {
+ const char ch = str[i];
+ switch (ch) {
+ case '\\':
+ case '"':
+ case '/':
+ m << '\\' << ch;
+ break;
+ case '\b':
+ m << "\\b";
+ break;
+ case '\t':
+ m << "\\t";
+ break;
+ case '\n':
+ m << "\\n";
+ break;
+ case '\f':
+ m << "\\f";
+ break;
+ case '\r':
+ m << "\\r";
+ break;
+ default:
+ if (ch < ' ') {
+ m << "\\u00" << String::FormatByte(static_cast<unsigned char>(ch));
+ } else {
+ m << ch;
+ }
+ break;
+ }
+ }
+
+ return m.GetString();
+}
+
+// The following routines generate an JSON representation of a UnitTest
+// object.
+
+// Formats the given time in milliseconds as seconds.
+static std::string FormatTimeInMillisAsDuration(TimeInMillis ms) {
+ ::std::stringstream ss;
+ ss << (static_cast<double>(ms) * 1e-3) << "s";
+ return ss.str();
+}
+
+// Converts the given epoch time in milliseconds to a date string in the
+// RFC3339 format, without the timezone information.
+static std::string FormatEpochTimeInMillisAsRFC3339(TimeInMillis ms) {
+ struct tm time_struct;
+ if (!PortableLocaltime(static_cast<time_t>(ms / 1000), &time_struct))
+ return "";
+ // YYYY-MM-DDThh:mm:ss
+ return StreamableToString(time_struct.tm_year + 1900) + "-" +
+ String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" +
+ String::FormatIntWidth2(time_struct.tm_mday) + "T" +
+ String::FormatIntWidth2(time_struct.tm_hour) + ":" +
+ String::FormatIntWidth2(time_struct.tm_min) + ":" +
+ String::FormatIntWidth2(time_struct.tm_sec) + "Z";
+}
+
+static inline std::string Indent(size_t width) {
+ return std::string(width, ' ');
+}
+
+void JsonUnitTestResultPrinter::OutputJsonKey(
+ std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ const std::string& value,
+ const std::string& indent,
+ bool comma) {
+ const std::vector<std::string>& allowed_names =
+ GetReservedOutputAttributesForElement(element_name);
+
+ GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) !=
+ allowed_names.end())
+ << "Key \"" << name << "\" is not allowed for value \"" << element_name
+ << "\".";
+
+ *stream << indent << "\"" << name << "\": \"" << EscapeJson(value) << "\"";
+ if (comma)
+ *stream << ",\n";
+}
+
+void JsonUnitTestResultPrinter::OutputJsonKey(
+ std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ int value,
+ const std::string& indent,
+ bool comma) {
+ const std::vector<std::string>& allowed_names =
+ GetReservedOutputAttributesForElement(element_name);
+
+ GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) !=
+ allowed_names.end())
+ << "Key \"" << name << "\" is not allowed for value \"" << element_name
+ << "\".";
+
+ *stream << indent << "\"" << name << "\": " << StreamableToString(value);
+ if (comma)
+ *stream << ",\n";
+}
+
+// Streams a test suite JSON stanza containing the given test result.
+void JsonUnitTestResultPrinter::OutputJsonTestSuiteForTestResult(
+ ::std::ostream* stream, const TestResult& result) {
+ // Output the boilerplate for a new test suite.
+ *stream << Indent(4) << "{\n";
+ OutputJsonKey(stream, "testsuite", "name", "NonTestSuiteFailure", Indent(6));
+ OutputJsonKey(stream, "testsuite", "tests", 1, Indent(6));
+ if (!GTEST_FLAG(list_tests)) {
+ OutputJsonKey(stream, "testsuite", "failures", 1, Indent(6));
+ OutputJsonKey(stream, "testsuite", "disabled", 0, Indent(6));
+ OutputJsonKey(stream, "testsuite", "skipped", 0, Indent(6));
+ OutputJsonKey(stream, "testsuite", "errors", 0, Indent(6));
+ OutputJsonKey(stream, "testsuite", "time",
+ FormatTimeInMillisAsDuration(result.elapsed_time()),
+ Indent(6));
+ OutputJsonKey(stream, "testsuite", "timestamp",
+ FormatEpochTimeInMillisAsRFC3339(result.start_timestamp()),
+ Indent(6));
+ }
+ *stream << Indent(6) << "\"testsuite\": [\n";
+
+ // Output the boilerplate for a new test case.
+ *stream << Indent(8) << "{\n";
+ OutputJsonKey(stream, "testcase", "name", "", Indent(10));
+ OutputJsonKey(stream, "testcase", "status", "RUN", Indent(10));
+ OutputJsonKey(stream, "testcase", "result", "COMPLETED", Indent(10));
+ OutputJsonKey(stream, "testcase", "timestamp",
+ FormatEpochTimeInMillisAsRFC3339(result.start_timestamp()),
+ Indent(10));
+ OutputJsonKey(stream, "testcase", "time",
+ FormatTimeInMillisAsDuration(result.elapsed_time()),
+ Indent(10));
+ OutputJsonKey(stream, "testcase", "classname", "", Indent(10), false);
+ *stream << TestPropertiesAsJson(result, Indent(10));
+
+ // Output the actual test result.
+ OutputJsonTestResult(stream, result);
+
+ // Finish the test suite.
+ *stream << "\n" << Indent(6) << "]\n" << Indent(4) << "}";
+}
+
+// Prints a JSON representation of a TestInfo object.
+void JsonUnitTestResultPrinter::OutputJsonTestInfo(::std::ostream* stream,
+ const char* test_suite_name,
+ const TestInfo& test_info) {
+ const TestResult& result = *test_info.result();
+ const std::string kTestsuite = "testcase";
+ const std::string kIndent = Indent(10);
+
+ *stream << Indent(8) << "{\n";
+ OutputJsonKey(stream, kTestsuite, "name", test_info.name(), kIndent);
+
+ if (test_info.value_param() != nullptr) {
+ OutputJsonKey(stream, kTestsuite, "value_param", test_info.value_param(),
+ kIndent);
+ }
+ if (test_info.type_param() != nullptr) {
+ OutputJsonKey(stream, kTestsuite, "type_param", test_info.type_param(),
+ kIndent);
+ }
+ if (GTEST_FLAG(list_tests)) {
+ OutputJsonKey(stream, kTestsuite, "file", test_info.file(), kIndent);
+ OutputJsonKey(stream, kTestsuite, "line", test_info.line(), kIndent, false);
+ *stream << "\n" << Indent(8) << "}";
+ return;
+ }
+
+ OutputJsonKey(stream, kTestsuite, "status",
+ test_info.should_run() ? "RUN" : "NOTRUN", kIndent);
+ OutputJsonKey(stream, kTestsuite, "result",
+ test_info.should_run()
+ ? (result.Skipped() ? "SKIPPED" : "COMPLETED")
+ : "SUPPRESSED",
+ kIndent);
+ OutputJsonKey(stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsRFC3339(result.start_timestamp()),
+ kIndent);
+ OutputJsonKey(stream, kTestsuite, "time",
+ FormatTimeInMillisAsDuration(result.elapsed_time()), kIndent);
+ OutputJsonKey(stream, kTestsuite, "classname", test_suite_name, kIndent,
+ false);
+ *stream << TestPropertiesAsJson(result, kIndent);
+
+ OutputJsonTestResult(stream, result);
+}
+
+void JsonUnitTestResultPrinter::OutputJsonTestResult(::std::ostream* stream,
+ const TestResult& result) {
+ const std::string kIndent = Indent(10);
+
+ int failures = 0;
+ for (int i = 0; i < result.total_part_count(); ++i) {
+ const TestPartResult& part = result.GetTestPartResult(i);
+ if (part.failed()) {
+ *stream << ",\n";
+ if (++failures == 1) {
+ *stream << kIndent << "\"" << "failures" << "\": [\n";
+ }
+ const std::string location =
+ internal::FormatCompilerIndependentFileLocation(part.file_name(),
+ part.line_number());
+ const std::string message = EscapeJson(location + "\n" + part.message());
+ *stream << kIndent << " {\n"
+ << kIndent << " \"failure\": \"" << message << "\",\n"
+ << kIndent << " \"type\": \"\"\n"
+ << kIndent << " }";
+ }
+ }
+
+ if (failures > 0)
+ *stream << "\n" << kIndent << "]";
+ *stream << "\n" << Indent(8) << "}";
+}
+
+// Prints an JSON representation of a TestSuite object
+void JsonUnitTestResultPrinter::PrintJsonTestSuite(
+ std::ostream* stream, const TestSuite& test_suite) {
+ const std::string kTestsuite = "testsuite";
+ const std::string kIndent = Indent(6);
+
+ *stream << Indent(4) << "{\n";
+ OutputJsonKey(stream, kTestsuite, "name", test_suite.name(), kIndent);
+ OutputJsonKey(stream, kTestsuite, "tests", test_suite.reportable_test_count(),
+ kIndent);
+ if (!GTEST_FLAG(list_tests)) {
+ OutputJsonKey(stream, kTestsuite, "failures",
+ test_suite.failed_test_count(), kIndent);
+ OutputJsonKey(stream, kTestsuite, "disabled",
+ test_suite.reportable_disabled_test_count(), kIndent);
+ OutputJsonKey(stream, kTestsuite, "errors", 0, kIndent);
+ OutputJsonKey(
+ stream, kTestsuite, "timestamp",
+ FormatEpochTimeInMillisAsRFC3339(test_suite.start_timestamp()),
+ kIndent);
+ OutputJsonKey(stream, kTestsuite, "time",
+ FormatTimeInMillisAsDuration(test_suite.elapsed_time()),
+ kIndent, false);
+ *stream << TestPropertiesAsJson(test_suite.ad_hoc_test_result(), kIndent)
+ << ",\n";
+ }
+
+ *stream << kIndent << "\"" << kTestsuite << "\": [\n";
+
+ bool comma = false;
+ for (int i = 0; i < test_suite.total_test_count(); ++i) {
+ if (test_suite.GetTestInfo(i)->is_reportable()) {
+ if (comma) {
+ *stream << ",\n";
+ } else {
+ comma = true;
+ }
+ OutputJsonTestInfo(stream, test_suite.name(), *test_suite.GetTestInfo(i));
+ }
+ }
+ *stream << "\n" << kIndent << "]\n" << Indent(4) << "}";
+}
+
+// Prints a JSON summary of unit_test to output stream out.
+void JsonUnitTestResultPrinter::PrintJsonUnitTest(std::ostream* stream,
+ const UnitTest& unit_test) {
+ const std::string kTestsuites = "testsuites";
+ const std::string kIndent = Indent(2);
+ *stream << "{\n";
+
+ OutputJsonKey(stream, kTestsuites, "tests", unit_test.reportable_test_count(),
+ kIndent);
+ OutputJsonKey(stream, kTestsuites, "failures", unit_test.failed_test_count(),
+ kIndent);
+ OutputJsonKey(stream, kTestsuites, "disabled",
+ unit_test.reportable_disabled_test_count(), kIndent);
+ OutputJsonKey(stream, kTestsuites, "errors", 0, kIndent);
+ if (GTEST_FLAG(shuffle)) {
+ OutputJsonKey(stream, kTestsuites, "random_seed", unit_test.random_seed(),
+ kIndent);
+ }
+ OutputJsonKey(stream, kTestsuites, "timestamp",
+ FormatEpochTimeInMillisAsRFC3339(unit_test.start_timestamp()),
+ kIndent);
+ OutputJsonKey(stream, kTestsuites, "time",
+ FormatTimeInMillisAsDuration(unit_test.elapsed_time()), kIndent,
+ false);
+
+ *stream << TestPropertiesAsJson(unit_test.ad_hoc_test_result(), kIndent)
+ << ",\n";
+
+ OutputJsonKey(stream, kTestsuites, "name", "AllTests", kIndent);
+ *stream << kIndent << "\"" << kTestsuites << "\": [\n";
+
+ bool comma = false;
+ for (int i = 0; i < unit_test.total_test_suite_count(); ++i) {
+ if (unit_test.GetTestSuite(i)->reportable_test_count() > 0) {
+ if (comma) {
+ *stream << ",\n";
+ } else {
+ comma = true;
+ }
+ PrintJsonTestSuite(stream, *unit_test.GetTestSuite(i));
+ }
+ }
+
+ // If there was a test failure outside of one of the test suites (like in a
+ // test environment) include that in the output.
+ if (unit_test.ad_hoc_test_result().Failed()) {
+ OutputJsonTestSuiteForTestResult(stream, unit_test.ad_hoc_test_result());
+ }
+
+ *stream << "\n" << kIndent << "]\n" << "}\n";
+}
+
+void JsonUnitTestResultPrinter::PrintJsonTestList(
+ std::ostream* stream, const std::vector<TestSuite*>& test_suites) {
+ const std::string kTestsuites = "testsuites";
+ const std::string kIndent = Indent(2);
+ *stream << "{\n";
+ int total_tests = 0;
+ for (auto test_suite : test_suites) {
+ total_tests += test_suite->total_test_count();
+ }
+ OutputJsonKey(stream, kTestsuites, "tests", total_tests, kIndent);
+
+ OutputJsonKey(stream, kTestsuites, "name", "AllTests", kIndent);
+ *stream << kIndent << "\"" << kTestsuites << "\": [\n";
+
+ for (size_t i = 0; i < test_suites.size(); ++i) {
+ if (i != 0) {
+ *stream << ",\n";
+ }
+ PrintJsonTestSuite(stream, *test_suites[i]);
+ }
+
+ *stream << "\n"
+ << kIndent << "]\n"
+ << "}\n";
+}
+// Produces a string representing the test properties in a result as
+// a JSON dictionary.
+std::string JsonUnitTestResultPrinter::TestPropertiesAsJson(
+ const TestResult& result, const std::string& indent) {
+ Message attributes;
+ for (int i = 0; i < result.test_property_count(); ++i) {
+ const TestProperty& property = result.GetTestProperty(i);
+ attributes << ",\n" << indent << "\"" << property.key() << "\": "
+ << "\"" << EscapeJson(property.value()) << "\"";
+ }
+ return attributes.GetString();
+}
+
+// End JsonUnitTestResultPrinter
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Checks if str contains '=', '&', '%' or '\n' characters. If yes,
+// replaces them by "%xx" where xx is their hexadecimal value. For
+// example, replaces "=" with "%3D". This algorithm is O(strlen(str))
+// in both time and space -- important as the input str may contain an
+// arbitrarily long test failure message and stack trace.
+std::string StreamingListener::UrlEncode(const char* str) {
+ std::string result;
+ result.reserve(strlen(str) + 1);
+ for (char ch = *str; ch != '\0'; ch = *++str) {
+ switch (ch) {
+ case '%':
+ case '=':
+ case '&':
+ case '\n':
+ result.append("%" + String::FormatByte(static_cast<unsigned char>(ch)));
+ break;
+ default:
+ result.push_back(ch);
+ break;
+ }
+ }
+ return result;
+}
+
+void StreamingListener::SocketWriter::MakeConnection() {
+ GTEST_CHECK_(sockfd_ == -1)
+ << "MakeConnection() can't be called when there is already a connection.";
+
+ addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses.
+ hints.ai_socktype = SOCK_STREAM;
+ addrinfo* servinfo = nullptr;
+
+ // Use the getaddrinfo() to get a linked list of IP addresses for
+ // the given host name.
+ const int error_num = getaddrinfo(
+ host_name_.c_str(), port_num_.c_str(), &hints, &servinfo);
+ if (error_num != 0) {
+ GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: "
+ << gai_strerror(error_num);
+ }
+
+ // Loop through all the results and connect to the first we can.
+ for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != nullptr;
+ cur_addr = cur_addr->ai_next) {
+ sockfd_ = socket(
+ cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol);
+ if (sockfd_ != -1) {
+ // Connect the client socket to the server socket.
+ if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) {
+ close(sockfd_);
+ sockfd_ = -1;
+ }
+ }
+ }
+
+ freeaddrinfo(servinfo); // all done with this structure
+
+ if (sockfd_ == -1) {
+ GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to "
+ << host_name_ << ":" << port_num_;
+ }
+}
+
+// End of class Streaming Listener
+#endif // GTEST_CAN_STREAM_RESULTS__
+
+// class OsStackTraceGetter
+
+const char* const OsStackTraceGetterInterface::kElidedFramesMarker =
+ "... " GTEST_NAME_ " internal frames ...";
+
+std::string OsStackTraceGetter::CurrentStackTrace(int max_depth, int skip_count)
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+#if GTEST_HAS_ABSL
+ std::string result;
+
+ if (max_depth <= 0) {
+ return result;
+ }
+
+ max_depth = std::min(max_depth, kMaxStackTraceDepth);
+
+ std::vector<void*> raw_stack(max_depth);
+ // Skips the frames requested by the caller, plus this function.
+ const int raw_stack_size =
+ absl::GetStackTrace(&raw_stack[0], max_depth, skip_count + 1);
+
+ void* caller_frame = nullptr;
+ {
+ MutexLock lock(&mutex_);
+ caller_frame = caller_frame_;
+ }
+
+ for (int i = 0; i < raw_stack_size; ++i) {
+ if (raw_stack[i] == caller_frame &&
+ !GTEST_FLAG(show_internal_stack_frames)) {
+ // Add a marker to the trace and stop adding frames.
+ absl::StrAppend(&result, kElidedFramesMarker, "\n");
+ break;
+ }
+
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ if (absl::Symbolize(raw_stack[i], tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+
+ char line[1024];
+ snprintf(line, sizeof(line), " %p: %s\n", raw_stack[i], symbol);
+ result += line;
+ }
+
+ return result;
+
+#else // !GTEST_HAS_ABSL
+ static_cast<void>(max_depth);
+ static_cast<void>(skip_count);
+ return "";
+#endif // GTEST_HAS_ABSL
+}
+
+void OsStackTraceGetter::UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_) {
+#if GTEST_HAS_ABSL
+ void* caller_frame = nullptr;
+ if (absl::GetStackTrace(&caller_frame, 1, 3) <= 0) {
+ caller_frame = nullptr;
+ }
+
+ MutexLock lock(&mutex_);
+ caller_frame_ = caller_frame;
+#endif // GTEST_HAS_ABSL
+}
+
+// A helper class that creates the premature-exit file in its
+// constructor and deletes the file in its destructor.
+class ScopedPrematureExitFile {
+ public:
+ explicit ScopedPrematureExitFile(const char* premature_exit_filepath)
+ : premature_exit_filepath_(premature_exit_filepath ?
+ premature_exit_filepath : "") {
+ // If a path to the premature-exit file is specified...
+ if (!premature_exit_filepath_.empty()) {
+ // create the file with a single "0" character in it. I/O
+ // errors are ignored as there's nothing better we can do and we
+ // don't want to fail the test because of this.
+ FILE* pfile = posix::FOpen(premature_exit_filepath, "w");
+ fwrite("0", 1, 1, pfile);
+ fclose(pfile);
+ }
+ }
+
+ ~ScopedPrematureExitFile() {
+#if !defined GTEST_OS_ESP8266
+ if (!premature_exit_filepath_.empty()) {
+ int retval = remove(premature_exit_filepath_.c_str());
+ if (retval) {
+ GTEST_LOG_(ERROR) << "Failed to remove premature exit filepath \""
+ << premature_exit_filepath_ << "\" with error "
+ << retval;
+ }
+ }
+#endif
+ }
+
+ private:
+ const std::string premature_exit_filepath_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile);
+};
+
+} // namespace internal
+
+// class TestEventListeners
+
+TestEventListeners::TestEventListeners()
+ : repeater_(new internal::TestEventRepeater()),
+ default_result_printer_(nullptr),
+ default_xml_generator_(nullptr) {}
+
+TestEventListeners::~TestEventListeners() { delete repeater_; }
+
+// Returns the standard listener responsible for the default console
+// output. Can be removed from the listeners list to shut down default
+// console output. Note that removing this object from the listener list
+// with Release transfers its ownership to the user.
+void TestEventListeners::Append(TestEventListener* listener) {
+ repeater_->Append(listener);
+}
+
+// Removes the given event listener from the list and returns it. It then
+// becomes the caller's responsibility to delete the listener. Returns
+// NULL if the listener is not found in the list.
+TestEventListener* TestEventListeners::Release(TestEventListener* listener) {
+ if (listener == default_result_printer_)
+ default_result_printer_ = nullptr;
+ else if (listener == default_xml_generator_)
+ default_xml_generator_ = nullptr;
+ return repeater_->Release(listener);
+}
+
+// Returns repeater that broadcasts the TestEventListener events to all
+// subscribers.
+TestEventListener* TestEventListeners::repeater() { return repeater_; }
+
+// Sets the default_result_printer attribute to the provided listener.
+// The listener is also added to the listener list and previous
+// default_result_printer is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {
+ if (default_result_printer_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_result_printer_);
+ default_result_printer_ = listener;
+ if (listener != nullptr) Append(listener);
+ }
+}
+
+// Sets the default_xml_generator attribute to the provided listener. The
+// listener is also added to the listener list and previous
+// default_xml_generator is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {
+ if (default_xml_generator_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_xml_generator_);
+ default_xml_generator_ = listener;
+ if (listener != nullptr) Append(listener);
+ }
+}
+
+// Controls whether events will be forwarded by the repeater to the
+// listeners in the list.
+bool TestEventListeners::EventForwardingEnabled() const {
+ return repeater_->forwarding_enabled();
+}
+
+void TestEventListeners::SuppressEventForwarding() {
+ repeater_->set_forwarding_enabled(false);
+}
+
+// class UnitTest
+
+// Gets the singleton UnitTest object. The first time this method is
+// called, a UnitTest object is constructed and returned. Consecutive
+// calls will return the same object.
+//
+// We don't protect this under mutex_ as a user is not supposed to
+// call this before main() starts, from which point on the return
+// value will never change.
+UnitTest* UnitTest::GetInstance() {
+ // CodeGear C++Builder insists on a public destructor for the
+ // default implementation. Use this implementation to keep good OO
+ // design with private destructor.
+
+#if defined(__BORLANDC__)
+ static UnitTest* const instance = new UnitTest;
+ return instance;
+#else
+ static UnitTest instance;
+ return &instance;
+#endif // defined(__BORLANDC__)
+}
+
+// Gets the number of successful test suites.
+int UnitTest::successful_test_suite_count() const {
+ return impl()->successful_test_suite_count();
+}
+
+// Gets the number of failed test suites.
+int UnitTest::failed_test_suite_count() const {
+ return impl()->failed_test_suite_count();
+}
+
+// Gets the number of all test suites.
+int UnitTest::total_test_suite_count() const {
+ return impl()->total_test_suite_count();
+}
+
+// Gets the number of all test suites that contain at least one test
+// that should run.
+int UnitTest::test_suite_to_run_count() const {
+ return impl()->test_suite_to_run_count();
+}
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+int UnitTest::successful_test_case_count() const {
+ return impl()->successful_test_suite_count();
+}
+int UnitTest::failed_test_case_count() const {
+ return impl()->failed_test_suite_count();
+}
+int UnitTest::total_test_case_count() const {
+ return impl()->total_test_suite_count();
+}
+int UnitTest::test_case_to_run_count() const {
+ return impl()->test_suite_to_run_count();
+}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+// Gets the number of successful tests.
+int UnitTest::successful_test_count() const {
+ return impl()->successful_test_count();
+}
+
+// Gets the number of skipped tests.
+int UnitTest::skipped_test_count() const {
+ return impl()->skipped_test_count();
+}
+
+// Gets the number of failed tests.
+int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int UnitTest::reportable_disabled_test_count() const {
+ return impl()->reportable_disabled_test_count();
+}
+
+// Gets the number of disabled tests.
+int UnitTest::disabled_test_count() const {
+ return impl()->disabled_test_count();
+}
+
+// Gets the number of tests to be printed in the XML report.
+int UnitTest::reportable_test_count() const {
+ return impl()->reportable_test_count();
+}
+
+// Gets the number of all tests.
+int UnitTest::total_test_count() const { return impl()->total_test_count(); }
+
+// Gets the number of tests that should run.
+int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }
+
+// Gets the time of the test program start, in ms from the start of the
+// UNIX epoch.
+internal::TimeInMillis UnitTest::start_timestamp() const {
+ return impl()->start_timestamp();
+}
+
+// Gets the elapsed time, in milliseconds.
+internal::TimeInMillis UnitTest::elapsed_time() const {
+ return impl()->elapsed_time();
+}
+
+// Returns true if and only if the unit test passed (i.e. all test suites
+// passed).
+bool UnitTest::Passed() const { return impl()->Passed(); }
+
+// Returns true if and only if the unit test failed (i.e. some test suite
+// failed or something outside of all tests failed).
+bool UnitTest::Failed() const { return impl()->Failed(); }
+
+// Gets the i-th test suite among all the test suites. i can range from 0 to
+// total_test_suite_count() - 1. If i is not in that range, returns NULL.
+const TestSuite* UnitTest::GetTestSuite(int i) const {
+ return impl()->GetTestSuite(i);
+}
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+const TestCase* UnitTest::GetTestCase(int i) const {
+ return impl()->GetTestCase(i);
+}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+// Returns the TestResult containing information on test failures and
+// properties logged outside of individual test suites.
+const TestResult& UnitTest::ad_hoc_test_result() const {
+ return *impl()->ad_hoc_test_result();
+}
+
+// Gets the i-th test suite among all the test suites. i can range from 0 to
+// total_test_suite_count() - 1. If i is not in that range, returns NULL.
+TestSuite* UnitTest::GetMutableTestSuite(int i) {
+ return impl()->GetMutableSuiteCase(i);
+}
+
+// Returns the list of event listeners that can be used to track events
+// inside Google Test.
+TestEventListeners& UnitTest::listeners() {
+ return *impl()->listeners();
+}
+
+// Registers and returns a global test environment. When a test
+// program is run, all global test environments will be set-up in the
+// order they were registered. After all tests in the program have
+// finished, all global test environments will be torn-down in the
+// *reverse* order they were registered.
+//
+// The UnitTest object takes ownership of the given environment.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+Environment* UnitTest::AddEnvironment(Environment* env) {
+ if (env == nullptr) {
+ return nullptr;
+ }
+
+ impl_->environments().push_back(env);
+ return env;
+}
+
+// Adds a TestPartResult to the current TestResult object. All Google Test
+// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
+// this to report their results. The user code should use the
+// assertion macros instead of calling this directly.
+void UnitTest::AddTestPartResult(
+ TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const std::string& message,
+ const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) {
+ Message msg;
+ msg << message;
+
+ internal::MutexLock lock(&mutex_);
+ if (impl_->gtest_trace_stack().size() > 0) {
+ msg << "\n" << GTEST_NAME_ << " trace:";
+
+ for (size_t i = impl_->gtest_trace_stack().size(); i > 0; --i) {
+ const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];
+ msg << "\n" << internal::FormatFileLocation(trace.file, trace.line)
+ << " " << trace.message;
+ }
+ }
+
+ if (os_stack_trace.c_str() != nullptr && !os_stack_trace.empty()) {
+ msg << internal::kStackTraceMarker << os_stack_trace;
+ }
+
+ const TestPartResult result = TestPartResult(
+ result_type, file_name, line_number, msg.GetString().c_str());
+ impl_->GetTestPartResultReporterForCurrentThread()->
+ ReportTestPartResult(result);
+
+ if (result_type != TestPartResult::kSuccess &&
+ result_type != TestPartResult::kSkip) {
+ // gtest_break_on_failure takes precedence over
+ // gtest_throw_on_failure. This allows a user to set the latter
+ // in the code (perhaps in order to use Google Test assertions
+ // with another testing framework) and specify the former on the
+ // command line for debugging.
+ if (GTEST_FLAG(break_on_failure)) {
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+ // Using DebugBreak on Windows allows gtest to still break into a debugger
+ // when a failure happens and both the --gtest_break_on_failure and
+ // the --gtest_catch_exceptions flags are specified.
+ DebugBreak();
+#elif (!defined(__native_client__)) && \
+ ((defined(__clang__) || defined(__GNUC__)) && \
+ (defined(__x86_64__) || defined(__i386__)))
+ // with clang/gcc we can achieve the same effect on x86 by invoking int3
+ asm("int3");
+#else
+ // Dereference nullptr through a volatile pointer to prevent the compiler
+ // from removing. We use this rather than abort() or __builtin_trap() for
+ // portability: some debuggers don't correctly trap abort().
+ *static_cast<volatile int*>(nullptr) = 1;
+#endif // GTEST_OS_WINDOWS
+ } else if (GTEST_FLAG(throw_on_failure)) {
+#if GTEST_HAS_EXCEPTIONS
+ throw internal::GoogleTestFailureException(result);
+#else
+ // We cannot call abort() as it generates a pop-up in debug mode
+ // that cannot be suppressed in VC 7.1 or below.
+ exit(1);
+#endif
+ }
+ }
+}
+
+// Adds a TestProperty to the current TestResult object when invoked from
+// inside a test, to current TestSuite's ad_hoc_test_result_ when invoked
+// from SetUpTestSuite or TearDownTestSuite, or to the global property set
+// when invoked elsewhere. If the result already contains a property with
+// the same key, the value will be updated.
+void UnitTest::RecordProperty(const std::string& key,
+ const std::string& value) {
+ impl_->RecordProperty(TestProperty(key, value));
+}
+
+// Runs all tests in this UnitTest object and prints the result.
+// Returns 0 if successful, or 1 otherwise.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+int UnitTest::Run() {
+ const bool in_death_test_child_process =
+ internal::GTEST_FLAG(internal_run_death_test).length() > 0;
+
+ // Google Test implements this protocol for catching that a test
+ // program exits before returning control to Google Test:
+ //
+ // 1. Upon start, Google Test creates a file whose absolute path
+ // is specified by the environment variable
+ // TEST_PREMATURE_EXIT_FILE.
+ // 2. When Google Test has finished its work, it deletes the file.
+ //
+ // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before
+ // running a Google-Test-based test program and check the existence
+ // of the file at the end of the test execution to see if it has
+ // exited prematurely.
+
+ // If we are in the child process of a death test, don't
+ // create/delete the premature exit file, as doing so is unnecessary
+ // and will confuse the parent process. Otherwise, create/delete
+ // the file upon entering/leaving this function. If the program
+ // somehow exits before this function has a chance to return, the
+ // premature-exit file will be left undeleted, causing a test runner
+ // that understands the premature-exit-file protocol to report the
+ // test as having failed.
+ const internal::ScopedPrematureExitFile premature_exit_file(
+ in_death_test_child_process
+ ? nullptr
+ : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE"));
+
+ // Captures the value of GTEST_FLAG(catch_exceptions). This value will be
+ // used for the duration of the program.
+ impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions));
+
+#if GTEST_OS_WINDOWS
+ // Either the user wants Google Test to catch exceptions thrown by the
+ // tests or this is executing in the context of death test child
+ // process. In either case the user does not want to see pop-up dialogs
+ // about crashes - they are expected.
+ if (impl()->catch_exceptions() || in_death_test_child_process) {
+# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+ // SetErrorMode doesn't exist on CE.
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
+ SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
+# endif // !GTEST_OS_WINDOWS_MOBILE
+
+# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE
+ // Death test children can be terminated with _abort(). On Windows,
+ // _abort() can show a dialog with a warning message. This forces the
+ // abort message to go to stderr instead.
+ _set_error_mode(_OUT_TO_STDERR);
+# endif
+
+# if defined(_MSC_VER) && !GTEST_OS_WINDOWS_MOBILE
+ // In the debug version, Visual Studio pops up a separate dialog
+ // offering a choice to debug the aborted program. We need to suppress
+ // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement
+ // executed. Google Test will notify the user of any unexpected
+ // failure via stderr.
+ if (!GTEST_FLAG(break_on_failure))
+ _set_abort_behavior(
+ 0x0, // Clear the following flags:
+ _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump.
+
+ // In debug mode, the Windows CRT can crash with an assertion over invalid
+ // input (e.g. passing an invalid file descriptor). The default handling
+ // for these assertions is to pop up a dialog and wait for user input.
+ // Instead ask the CRT to dump such assertions to stderr non-interactively.
+ if (!IsDebuggerPresent()) {
+ (void)_CrtSetReportMode(_CRT_ASSERT,
+ _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG);
+ (void)_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+ }
+# endif
+ }
+#endif // GTEST_OS_WINDOWS
+
+ return internal::HandleExceptionsInMethodIfSupported(
+ impl(),
+ &internal::UnitTestImpl::RunAllTests,
+ "auxiliary test code (environments or event listeners)") ? 0 : 1;
+}
+
+// Returns the working directory when the first TEST() or TEST_F() was
+// executed.
+const char* UnitTest::original_working_dir() const {
+ return impl_->original_working_dir_.c_str();
+}
+
+// Returns the TestSuite object for the test that's currently running,
+// or NULL if no test is running.
+const TestSuite* UnitTest::current_test_suite() const
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_suite();
+}
+
+// Legacy API is still available but deprecated
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+const TestCase* UnitTest::current_test_case() const
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_suite();
+}
+#endif
+
+// Returns the TestInfo object for the test that's currently running,
+// or NULL if no test is running.
+const TestInfo* UnitTest::current_test_info() const
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_info();
+}
+
+// Returns the random seed used at the start of the current test run.
+int UnitTest::random_seed() const { return impl_->random_seed(); }
+
+// Returns ParameterizedTestSuiteRegistry object used to keep track of
+// value-parameterized tests and instantiate and register them.
+internal::ParameterizedTestSuiteRegistry&
+UnitTest::parameterized_test_registry() GTEST_LOCK_EXCLUDED_(mutex_) {
+ return impl_->parameterized_test_registry();
+}
+
+// Creates an empty UnitTest.
+UnitTest::UnitTest() {
+ impl_ = new internal::UnitTestImpl(this);
+}
+
+// Destructor of UnitTest.
+UnitTest::~UnitTest() {
+ delete impl_;
+}
+
+// Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+// Google Test trace stack.
+void UnitTest::PushGTestTrace(const internal::TraceInfo& trace)
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().push_back(trace);
+}
+
+// Pops a trace from the per-thread Google Test trace stack.
+void UnitTest::PopGTestTrace()
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().pop_back();
+}
+
+namespace internal {
+
+UnitTestImpl::UnitTestImpl(UnitTest* parent)
+ : parent_(parent),
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4355 /* using this in initializer */)
+ default_global_test_part_result_reporter_(this),
+ default_per_thread_test_part_result_reporter_(this),
+ GTEST_DISABLE_MSC_WARNINGS_POP_() global_test_part_result_repoter_(
+ &default_global_test_part_result_reporter_),
+ per_thread_test_part_result_reporter_(
+ &default_per_thread_test_part_result_reporter_),
+ parameterized_test_registry_(),
+ parameterized_tests_registered_(false),
+ last_death_test_suite_(-1),
+ current_test_suite_(nullptr),
+ current_test_info_(nullptr),
+ ad_hoc_test_result_(),
+ os_stack_trace_getter_(nullptr),
+ post_flag_parse_init_performed_(false),
+ random_seed_(0), // Will be overridden by the flag before first use.
+ random_(0), // Will be reseeded before first use.
+ start_timestamp_(0),
+ elapsed_time_(0),
+#if GTEST_HAS_DEATH_TEST
+ death_test_factory_(new DefaultDeathTestFactory),
+#endif
+ // Will be overridden by the flag before first use.
+ catch_exceptions_(false) {
+ listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);
+}
+
+UnitTestImpl::~UnitTestImpl() {
+ // Deletes every TestSuite.
+ ForEach(test_suites_, internal::Delete<TestSuite>);
+
+ // Deletes every Environment.
+ ForEach(environments_, internal::Delete<Environment>);
+
+ delete os_stack_trace_getter_;
+}
+
+// Adds a TestProperty to the current TestResult object when invoked in a
+// context of a test, to current test suite's ad_hoc_test_result when invoke
+// from SetUpTestSuite/TearDownTestSuite, or to the global property set
+// otherwise. If the result already contains a property with the same key,
+// the value will be updated.
+void UnitTestImpl::RecordProperty(const TestProperty& test_property) {
+ std::string xml_element;
+ TestResult* test_result; // TestResult appropriate for property recording.
+
+ if (current_test_info_ != nullptr) {
+ xml_element = "testcase";
+ test_result = &(current_test_info_->result_);
+ } else if (current_test_suite_ != nullptr) {
+ xml_element = "testsuite";
+ test_result = &(current_test_suite_->ad_hoc_test_result_);
+ } else {
+ xml_element = "testsuites";
+ test_result = &ad_hoc_test_result_;
+ }
+ test_result->RecordProperty(xml_element, test_property);
+}
+
+#if GTEST_HAS_DEATH_TEST
+// Disables event forwarding if the control is currently in a death test
+// subprocess. Must not be called before InitGoogleTest.
+void UnitTestImpl::SuppressTestEventsIfInSubprocess() {
+ if (internal_run_death_test_flag_.get() != nullptr)
+ listeners()->SuppressEventForwarding();
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// Initializes event listeners performing XML output as specified by
+// UnitTestOptions. Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureXmlOutput() {
+ const std::string& output_format = UnitTestOptions::GetOutputFormat();
+ if (output_format == "xml") {
+ listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
+ } else if (output_format == "json") {
+ listeners()->SetDefaultXmlGenerator(new JsonUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
+ } else if (output_format != "") {
+ GTEST_LOG_(WARNING) << "WARNING: unrecognized output format \""
+ << output_format << "\" ignored.";
+ }
+}
+
+#if GTEST_CAN_STREAM_RESULTS_
+// Initializes event listeners for streaming test results in string form.
+// Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureStreamingOutput() {
+ const std::string& target = GTEST_FLAG(stream_result_to);
+ if (!target.empty()) {
+ const size_t pos = target.find(':');
+ if (pos != std::string::npos) {
+ listeners()->Append(new StreamingListener(target.substr(0, pos),
+ target.substr(pos+1)));
+ } else {
+ GTEST_LOG_(WARNING) << "unrecognized streaming target \"" << target
+ << "\" ignored.";
+ }
+ }
+}
+#endif // GTEST_CAN_STREAM_RESULTS_
+
+// Performs initialization dependent upon flag values obtained in
+// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+// this function is also called from RunAllTests. Since this function can be
+// called more than once, it has to be idempotent.
+void UnitTestImpl::PostFlagParsingInit() {
+ // Ensures that this function does not execute more than once.
+ if (!post_flag_parse_init_performed_) {
+ post_flag_parse_init_performed_ = true;
+
+#if defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)
+ // Register to send notifications about key process state changes.
+ listeners()->Append(new GTEST_CUSTOM_TEST_EVENT_LISTENER_());
+#endif // defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)
+
+#if GTEST_HAS_DEATH_TEST
+ InitDeathTestSubprocessControlInfo();
+ SuppressTestEventsIfInSubprocess();
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Registers parameterized tests. This makes parameterized tests
+ // available to the UnitTest reflection API without running
+ // RUN_ALL_TESTS.
+ RegisterParameterizedTests();
+
+ // Configures listeners for XML output. This makes it possible for users
+ // to shut down the default XML output before invoking RUN_ALL_TESTS.
+ ConfigureXmlOutput();
+
+ if (GTEST_FLAG(brief)) {
+ listeners()->SetDefaultResultPrinter(new BriefUnitTestResultPrinter);
+ }
+
+#if GTEST_CAN_STREAM_RESULTS_
+ // Configures listeners for streaming test results to the specified server.
+ ConfigureStreamingOutput();
+#endif // GTEST_CAN_STREAM_RESULTS_
+
+#if GTEST_HAS_ABSL
+ if (GTEST_FLAG(install_failure_signal_handler)) {
+ absl::FailureSignalHandlerOptions options;
+ absl::InstallFailureSignalHandler(options);
+ }
+#endif // GTEST_HAS_ABSL
+ }
+}
+
+// A predicate that checks the name of a TestSuite against a known
+// value.
+//
+// This is used for implementation of the UnitTest class only. We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestSuiteNameIs is copyable.
+class TestSuiteNameIs {
+ public:
+ // Constructor.
+ explicit TestSuiteNameIs(const std::string& name) : name_(name) {}
+
+ // Returns true if and only if the name of test_suite matches name_.
+ bool operator()(const TestSuite* test_suite) const {
+ return test_suite != nullptr &&
+ strcmp(test_suite->name(), name_.c_str()) == 0;
+ }
+
+ private:
+ std::string name_;
+};
+
+// Finds and returns a TestSuite with the given name. If one doesn't
+// exist, creates one and returns it. It's the CALLER'S
+// RESPONSIBILITY to ensure that this function is only called WHEN THE
+// TESTS ARE NOT SHUFFLED.
+//
+// Arguments:
+//
+// test_suite_name: name of the test suite
+// type_param: the name of the test suite's type parameter, or NULL if
+// this is not a typed or a type-parameterized test suite.
+// set_up_tc: pointer to the function that sets up the test suite
+// tear_down_tc: pointer to the function that tears down the test suite
+TestSuite* UnitTestImpl::GetTestSuite(
+ const char* test_suite_name, const char* type_param,
+ internal::SetUpTestSuiteFunc set_up_tc,
+ internal::TearDownTestSuiteFunc tear_down_tc) {
+ // Can we find a TestSuite with the given name?
+ const auto test_suite =
+ std::find_if(test_suites_.rbegin(), test_suites_.rend(),
+ TestSuiteNameIs(test_suite_name));
+
+ if (test_suite != test_suites_.rend()) return *test_suite;
+
+ // No. Let's create one.
+ auto* const new_test_suite =
+ new TestSuite(test_suite_name, type_param, set_up_tc, tear_down_tc);
+
+ // Is this a death test suite?
+ if (internal::UnitTestOptions::MatchesFilter(test_suite_name,
+ kDeathTestSuiteFilter)) {
+ // Yes. Inserts the test suite after the last death test suite
+ // defined so far. This only works when the test suites haven't
+ // been shuffled. Otherwise we may end up running a death test
+ // after a non-death test.
+ ++last_death_test_suite_;
+ test_suites_.insert(test_suites_.begin() + last_death_test_suite_,
+ new_test_suite);
+ } else {
+ // No. Appends to the end of the list.
+ test_suites_.push_back(new_test_suite);
+ }
+
+ test_suite_indices_.push_back(static_cast<int>(test_suite_indices_.size()));
+ return new_test_suite;
+}
+
+// Helpers for setting up / tearing down the given environment. They
+// are for use in the ForEach() function.
+static void SetUpEnvironment(Environment* env) { env->SetUp(); }
+static void TearDownEnvironment(Environment* env) { env->TearDown(); }
+
+// Runs all tests in this UnitTest object, prints the result, and
+// returns true if all tests are successful. If any exception is
+// thrown during a test, the test is considered to be failed, but the
+// rest of the tests will still be run.
+//
+// When parameterized tests are enabled, it expands and registers
+// parameterized tests first in RegisterParameterizedTests().
+// All other functions called from RunAllTests() may safely assume that
+// parameterized tests are ready to be counted and run.
+bool UnitTestImpl::RunAllTests() {
+ // True if and only if Google Test is initialized before RUN_ALL_TESTS() is
+ // called.
+ const bool gtest_is_initialized_before_run_all_tests = GTestIsInitialized();
+
+ // Do not run any test if the --help flag was specified.
+ if (g_help_flag)
+ return true;
+
+ // Repeats the call to the post-flag parsing initialization in case the
+ // user didn't call InitGoogleTest.
+ PostFlagParsingInit();
+
+ // Even if sharding is not on, test runners may want to use the
+ // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding
+ // protocol.
+ internal::WriteToShardStatusFileIfNeeded();
+
+ // True if and only if we are in a subprocess for running a thread-safe-style
+ // death test.
+ bool in_subprocess_for_death_test = false;
+
+#if GTEST_HAS_DEATH_TEST
+ in_subprocess_for_death_test =
+ (internal_run_death_test_flag_.get() != nullptr);
+# if defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)
+ if (in_subprocess_for_death_test) {
+ GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_();
+ }
+# endif // defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)
+#endif // GTEST_HAS_DEATH_TEST
+
+ const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,
+ in_subprocess_for_death_test);
+
+ // Compares the full test names with the filter to decide which
+ // tests to run.
+ const bool has_tests_to_run = FilterTests(should_shard
+ ? HONOR_SHARDING_PROTOCOL
+ : IGNORE_SHARDING_PROTOCOL) > 0;
+
+ // Lists the tests and exits if the --gtest_list_tests flag was specified.
+ if (GTEST_FLAG(list_tests)) {
+ // This must be called *after* FilterTests() has been called.
+ ListTestsMatchingFilter();
+ return true;
+ }
+
+ random_seed_ = GTEST_FLAG(shuffle) ?
+ GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
+
+ // True if and only if at least one test has failed.
+ bool failed = false;
+
+ TestEventListener* repeater = listeners()->repeater();
+
+ start_timestamp_ = GetTimeInMillis();
+ repeater->OnTestProgramStart(*parent_);
+
+ // How many times to repeat the tests? We don't want to repeat them
+ // when we are inside the subprocess of a death test.
+ const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
+ // Repeats forever if the repeat count is negative.
+ const bool gtest_repeat_forever = repeat < 0;
+ for (int i = 0; gtest_repeat_forever || i != repeat; i++) {
+ // We want to preserve failures generated by ad-hoc test
+ // assertions executed before RUN_ALL_TESTS().
+ ClearNonAdHocTestResult();
+
+ Timer timer;
+
+ // Shuffles test suites and tests if requested.
+ if (has_tests_to_run && GTEST_FLAG(shuffle)) {
+ random()->Reseed(static_cast<uint32_t>(random_seed_));
+ // This should be done before calling OnTestIterationStart(),
+ // such that a test event listener can see the actual test order
+ // in the event.
+ ShuffleTests();
+ }
+
+ // Tells the unit test event listeners that the tests are about to start.
+ repeater->OnTestIterationStart(*parent_, i);
+
+ // Runs each test suite if there is at least one test to run.
+ if (has_tests_to_run) {
+ // Sets up all environments beforehand.
+ repeater->OnEnvironmentsSetUpStart(*parent_);
+ ForEach(environments_, SetUpEnvironment);
+ repeater->OnEnvironmentsSetUpEnd(*parent_);
+
+ // Runs the tests only if there was no fatal failure or skip triggered
+ // during global set-up.
+ if (Test::IsSkipped()) {
+ // Emit diagnostics when global set-up calls skip, as it will not be
+ // emitted by default.
+ TestResult& test_result =
+ *internal::GetUnitTestImpl()->current_test_result();
+ for (int j = 0; j < test_result.total_part_count(); ++j) {
+ const TestPartResult& test_part_result =
+ test_result.GetTestPartResult(j);
+ if (test_part_result.type() == TestPartResult::kSkip) {
+ const std::string& result = test_part_result.message();
+ printf("%s\n", result.c_str());
+ }
+ }
+ fflush(stdout);
+ } else if (!Test::HasFatalFailure()) {
+ for (int test_index = 0; test_index < total_test_suite_count();
+ test_index++) {
+ GetMutableSuiteCase(test_index)->Run();
+ if (GTEST_FLAG(fail_fast) &&
+ GetMutableSuiteCase(test_index)->Failed()) {
+ for (int j = test_index + 1; j < total_test_suite_count(); j++) {
+ GetMutableSuiteCase(j)->Skip();
+ }
+ break;
+ }
+ }
+ } else if (Test::HasFatalFailure()) {
+ // If there was a fatal failure during the global setup then we know we
+ // aren't going to run any tests. Explicitly mark all of the tests as
+ // skipped to make this obvious in the output.
+ for (int test_index = 0; test_index < total_test_suite_count();
+ test_index++) {
+ GetMutableSuiteCase(test_index)->Skip();
+ }
+ }
+
+ // Tears down all environments in reverse order afterwards.
+ repeater->OnEnvironmentsTearDownStart(*parent_);
+ std::for_each(environments_.rbegin(), environments_.rend(),
+ TearDownEnvironment);
+ repeater->OnEnvironmentsTearDownEnd(*parent_);
+ }
+
+ elapsed_time_ = timer.Elapsed();
+
+ // Tells the unit test event listener that the tests have just finished.
+ repeater->OnTestIterationEnd(*parent_, i);
+
+ // Gets the result and clears it.
+ if (!Passed()) {
+ failed = true;
+ }
+
+ // Restores the original test order after the iteration. This
+ // allows the user to quickly repro a failure that happens in the
+ // N-th iteration without repeating the first (N - 1) iterations.
+ // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in
+ // case the user somehow changes the value of the flag somewhere
+ // (it's always safe to unshuffle the tests).
+ UnshuffleTests();
+
+ if (GTEST_FLAG(shuffle)) {
+ // Picks a new random seed for each iteration.
+ random_seed_ = GetNextRandomSeed(random_seed_);
+ }
+ }
+
+ repeater->OnTestProgramEnd(*parent_);
+
+ if (!gtest_is_initialized_before_run_all_tests) {
+ ColoredPrintf(
+ GTestColor::kRed,
+ "\nIMPORTANT NOTICE - DO NOT IGNORE:\n"
+ "This test program did NOT call " GTEST_INIT_GOOGLE_TEST_NAME_
+ "() before calling RUN_ALL_TESTS(). This is INVALID. Soon " GTEST_NAME_
+ " will start to enforce the valid usage. "
+ "Please fix it ASAP, or IT WILL START TO FAIL.\n"); // NOLINT
+#if GTEST_FOR_GOOGLE_
+ ColoredPrintf(GTestColor::kRed,
+ "For more details, see http://wiki/Main/ValidGUnitMain.\n");
+#endif // GTEST_FOR_GOOGLE_
+ }
+
+ return !failed;
+}
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded() {
+ const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);
+ if (test_shard_file != nullptr) {
+ FILE* const file = posix::FOpen(test_shard_file, "w");
+ if (file == nullptr) {
+ ColoredPrintf(GTestColor::kRed,
+ "Could not write to the test shard status file \"%s\" "
+ "specified by the %s environment variable.\n",
+ test_shard_file, kTestShardStatusFile);
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+ fclose(file);
+ }
+}
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (i.e., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+bool ShouldShard(const char* total_shards_env,
+ const char* shard_index_env,
+ bool in_subprocess_for_death_test) {
+ if (in_subprocess_for_death_test) {
+ return false;
+ }
+
+ const int32_t total_shards = Int32FromEnvOrDie(total_shards_env, -1);
+ const int32_t shard_index = Int32FromEnvOrDie(shard_index_env, -1);
+
+ if (total_shards == -1 && shard_index == -1) {
+ return false;
+ } else if (total_shards == -1 && shard_index != -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestShardIndex << " = " << shard_index
+ << ", but have left " << kTestTotalShards << " unset.\n";
+ ColoredPrintf(GTestColor::kRed, "%s", msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (total_shards != -1 && shard_index == -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestTotalShards << " = " << total_shards
+ << ", but have left " << kTestShardIndex << " unset.\n";
+ ColoredPrintf(GTestColor::kRed, "%s", msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (shard_index < 0 || shard_index >= total_shards) {
+ const Message msg = Message()
+ << "Invalid environment variables: we require 0 <= "
+ << kTestShardIndex << " < " << kTestTotalShards
+ << ", but you have " << kTestShardIndex << "=" << shard_index
+ << ", " << kTestTotalShards << "=" << total_shards << ".\n";
+ ColoredPrintf(GTestColor::kRed, "%s", msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+
+ return total_shards > 1;
+}
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error
+// and aborts.
+int32_t Int32FromEnvOrDie(const char* var, int32_t default_val) {
+ const char* str_val = posix::GetEnv(var);
+ if (str_val == nullptr) {
+ return default_val;
+ }
+
+ int32_t result;
+ if (!ParseInt32(Message() << "The value of environment variable " << var,
+ str_val, &result)) {
+ exit(EXIT_FAILURE);
+ }
+ return result;
+}
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true if and only if the test should be run on this shard. The test id
+// is some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
+ return (test_id % total_shards) == shard_index;
+}
+
+// Compares the name of each test with the user-specified filter to
+// decide whether the test should be run, then records the result in
+// each TestSuite and TestInfo object.
+// If shard_tests == true, further filters tests based on sharding
+// variables in the environment - see
+// https://github.com/google/googletest/blob/master/googletest/docs/advanced.md
+// . Returns the number of tests that should run.
+int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
+ const int32_t total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestTotalShards, -1) : -1;
+ const int32_t shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestShardIndex, -1) : -1;
+
+ // num_runnable_tests are the number of tests that will
+ // run across all shards (i.e., match filter and are not disabled).
+ // num_selected_tests are the number of tests to be run on
+ // this shard.
+ int num_runnable_tests = 0;
+ int num_selected_tests = 0;
+ for (auto* test_suite : test_suites_) {
+ const std::string& test_suite_name = test_suite->name();
+ test_suite->set_should_run(false);
+
+ for (size_t j = 0; j < test_suite->test_info_list().size(); j++) {
+ TestInfo* const test_info = test_suite->test_info_list()[j];
+ const std::string test_name(test_info->name());
+ // A test is disabled if test suite name or test name matches
+ // kDisableTestFilter.
+ const bool is_disabled = internal::UnitTestOptions::MatchesFilter(
+ test_suite_name, kDisableTestFilter) ||
+ internal::UnitTestOptions::MatchesFilter(
+ test_name, kDisableTestFilter);
+ test_info->is_disabled_ = is_disabled;
+
+ const bool matches_filter = internal::UnitTestOptions::FilterMatchesTest(
+ test_suite_name, test_name);
+ test_info->matches_filter_ = matches_filter;
+
+ const bool is_runnable =
+ (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&
+ matches_filter;
+
+ const bool is_in_another_shard =
+ shard_tests != IGNORE_SHARDING_PROTOCOL &&
+ !ShouldRunTestOnShard(total_shards, shard_index, num_runnable_tests);
+ test_info->is_in_another_shard_ = is_in_another_shard;
+ const bool is_selected = is_runnable && !is_in_another_shard;
+
+ num_runnable_tests += is_runnable;
+ num_selected_tests += is_selected;
+
+ test_info->should_run_ = is_selected;
+ test_suite->set_should_run(test_suite->should_run() || is_selected);
+ }
+ }
+ return num_selected_tests;
+}
+
+// Prints the given C-string on a single line by replacing all '\n'
+// characters with string "\\n". If the output takes more than
+// max_length characters, only prints the first max_length characters
+// and "...".
+static void PrintOnOneLine(const char* str, int max_length) {
+ if (str != nullptr) {
+ for (int i = 0; *str != '\0'; ++str) {
+ if (i >= max_length) {
+ printf("...");
+ break;
+ }
+ if (*str == '\n') {
+ printf("\\n");
+ i += 2;
+ } else {
+ printf("%c", *str);
+ ++i;
+ }
+ }
+ }
+}
+
+// Prints the names of the tests matching the user-specified filter flag.
+void UnitTestImpl::ListTestsMatchingFilter() {
+ // Print at most this many characters for each type/value parameter.
+ const int kMaxParamLength = 250;
+
+ for (auto* test_suite : test_suites_) {
+ bool printed_test_suite_name = false;
+
+ for (size_t j = 0; j < test_suite->test_info_list().size(); j++) {
+ const TestInfo* const test_info = test_suite->test_info_list()[j];
+ if (test_info->matches_filter_) {
+ if (!printed_test_suite_name) {
+ printed_test_suite_name = true;
+ printf("%s.", test_suite->name());
+ if (test_suite->type_param() != nullptr) {
+ printf(" # %s = ", kTypeParamLabel);
+ // We print the type parameter on a single line to make
+ // the output easy to parse by a program.
+ PrintOnOneLine(test_suite->type_param(), kMaxParamLength);
+ }
+ printf("\n");
+ }
+ printf(" %s", test_info->name());
+ if (test_info->value_param() != nullptr) {
+ printf(" # %s = ", kValueParamLabel);
+ // We print the value parameter on a single line to make the
+ // output easy to parse by a program.
+ PrintOnOneLine(test_info->value_param(), kMaxParamLength);
+ }
+ printf("\n");
+ }
+ }
+ }
+ fflush(stdout);
+ const std::string& output_format = UnitTestOptions::GetOutputFormat();
+ if (output_format == "xml" || output_format == "json") {
+ FILE* fileout = OpenFileForWriting(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str());
+ std::stringstream stream;
+ if (output_format == "xml") {
+ XmlUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str())
+ .PrintXmlTestsList(&stream, test_suites_);
+ } else if (output_format == "json") {
+ JsonUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str())
+ .PrintJsonTestList(&stream, test_suites_);
+ }
+ fprintf(fileout, "%s", StringStreamToString(&stream).c_str());
+ fclose(fileout);
+ }
+}
+
+// Sets the OS stack trace getter.
+//
+// Does nothing if the input and the current OS stack trace getter are
+// the same; otherwise, deletes the old getter and makes the input the
+// current getter.
+void UnitTestImpl::set_os_stack_trace_getter(
+ OsStackTraceGetterInterface* getter) {
+ if (os_stack_trace_getter_ != getter) {
+ delete os_stack_trace_getter_;
+ os_stack_trace_getter_ = getter;
+ }
+}
+
+// Returns the current OS stack trace getter if it is not NULL;
+// otherwise, creates an OsStackTraceGetter, makes it the current
+// getter, and returns it.
+OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
+ if (os_stack_trace_getter_ == nullptr) {
+#ifdef GTEST_OS_STACK_TRACE_GETTER_
+ os_stack_trace_getter_ = new GTEST_OS_STACK_TRACE_GETTER_;
+#else
+ os_stack_trace_getter_ = new OsStackTraceGetter;
+#endif // GTEST_OS_STACK_TRACE_GETTER_
+ }
+
+ return os_stack_trace_getter_;
+}
+
+// Returns the most specific TestResult currently running.
+TestResult* UnitTestImpl::current_test_result() {
+ if (current_test_info_ != nullptr) {
+ return ¤t_test_info_->result_;
+ }
+ if (current_test_suite_ != nullptr) {
+ return ¤t_test_suite_->ad_hoc_test_result_;
+ }
+ return &ad_hoc_test_result_;
+}
+
+// Shuffles all test suites, and the tests within each test suite,
+// making sure that death tests are still run first.
+void UnitTestImpl::ShuffleTests() {
+ // Shuffles the death test suites.
+ ShuffleRange(random(), 0, last_death_test_suite_ + 1, &test_suite_indices_);
+
+ // Shuffles the non-death test suites.
+ ShuffleRange(random(), last_death_test_suite_ + 1,
+ static_cast<int>(test_suites_.size()), &test_suite_indices_);
+
+ // Shuffles the tests inside each test suite.
+ for (auto& test_suite : test_suites_) {
+ test_suite->ShuffleTests(random());
+ }
+}
+
+// Restores the test suites and tests to their order before the first shuffle.
+void UnitTestImpl::UnshuffleTests() {
+ for (size_t i = 0; i < test_suites_.size(); i++) {
+ // Unshuffles the tests in each test suite.
+ test_suites_[i]->UnshuffleTests();
+ // Resets the index of each test suite.
+ test_suite_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,
+ int skip_count) {
+ // We pass skip_count + 1 to skip this wrapper function in addition
+ // to what the user really wants to skip.
+ return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);
+}
+
+// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to
+// suppress unreachable code warnings.
+namespace {
+class ClassUniqueToAlwaysTrue {};
+}
+
+bool IsTrue(bool condition) { return condition; }
+
+bool AlwaysTrue() {
+#if GTEST_HAS_EXCEPTIONS
+ // This condition is always false so AlwaysTrue() never actually throws,
+ // but it makes the compiler think that it may throw.
+ if (IsTrue(false))
+ throw ClassUniqueToAlwaysTrue();
+#endif // GTEST_HAS_EXCEPTIONS
+ return true;
+}
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr) {
+ const size_t prefix_len = strlen(prefix);
+ if (strncmp(*pstr, prefix, prefix_len) == 0) {
+ *pstr += prefix_len;
+ return true;
+ }
+ return false;
+}
+
+// Parses a string as a command line flag. The string should have
+// the format "--flag=value". When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or NULL if the parsing failed.
+static const char* ParseFlagValue(const char* str, const char* flag,
+ bool def_optional) {
+ // str and flag must not be NULL.
+ if (str == nullptr || flag == nullptr) return nullptr;
+
+ // The flag must start with "--" followed by GTEST_FLAG_PREFIX_.
+ const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag;
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) {
+ return flag_end;
+ }
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return nullptr;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true as long as it does
+// not start with '0', 'f', or 'F'.
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+static bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Converts the string value to a bool.
+ *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
+ return true;
+}
+
+// Parses a string for an int32_t flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseInt32(Message() << "The value of flag --" << flag,
+ value_str, value);
+}
+
+// Parses a string for a string flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+template <typename String>
+static bool ParseStringFlag(const char* str, const char* flag, String* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == nullptr) return false;
+
+ // Sets *value to the value of the flag.
+ *value = value_str;
+ return true;
+}
+
+// Determines whether a string has a prefix that Google Test uses for its
+// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.
+// If Google Test detects that a command line flag has its prefix but is not
+// recognized, it will print its help message. Flags starting with
+// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test
+// internal flags and do not trigger the help message.
+static bool HasGoogleTestFlagPrefix(const char* str) {
+ return (SkipPrefix("--", &str) ||
+ SkipPrefix("-", &str) ||
+ SkipPrefix("/", &str)) &&
+ !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) &&
+ (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||
+ SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));
+}
+
+// Prints a string containing code-encoded text. The following escape
+// sequences can be used in the string to control the text color:
+//
+// @@ prints a single '@' character.
+// @R changes the color to red.
+// @G changes the color to green.
+// @Y changes the color to yellow.
+// @D changes to the default terminal text color.
+//
+static void PrintColorEncoded(const char* str) {
+ GTestColor color = GTestColor::kDefault; // The current color.
+
+ // Conceptually, we split the string into segments divided by escape
+ // sequences. Then we print one segment at a time. At the end of
+ // each iteration, the str pointer advances to the beginning of the
+ // next segment.
+ for (;;) {
+ const char* p = strchr(str, '@');
+ if (p == nullptr) {
+ ColoredPrintf(color, "%s", str);
+ return;
+ }
+
+ ColoredPrintf(color, "%s", std::string(str, p).c_str());
+
+ const char ch = p[1];
+ str = p + 2;
+ if (ch == '@') {
+ ColoredPrintf(color, "@");
+ } else if (ch == 'D') {
+ color = GTestColor::kDefault;
+ } else if (ch == 'R') {
+ color = GTestColor::kRed;
+ } else if (ch == 'G') {
+ color = GTestColor::kGreen;
+ } else if (ch == 'Y') {
+ color = GTestColor::kYellow;
+ } else {
+ --str;
+ }
+ }
+}
+
+static const char kColorEncodedHelpMessage[] =
+ "This program contains tests written using " GTEST_NAME_
+ ". You can use the\n"
+ "following command line flags to control its behavior:\n"
+ "\n"
+ "Test Selection:\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "list_tests@D\n"
+ " List the names of all tests instead of running them. The name of\n"
+ " TEST(Foo, Bar) is \"Foo.Bar\".\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "filter=@YPOSITIVE_PATTERNS"
+ "[@G-@YNEGATIVE_PATTERNS]@D\n"
+ " Run only the tests whose name matches one of the positive patterns "
+ "but\n"
+ " none of the negative patterns. '?' matches any single character; "
+ "'*'\n"
+ " matches any substring; ':' separates two patterns.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "also_run_disabled_tests@D\n"
+ " Run all disabled tests too.\n"
+ "\n"
+ "Test Execution:\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "repeat=@Y[COUNT]@D\n"
+ " Run the tests repeatedly; use a negative count to repeat forever.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "shuffle@D\n"
+ " Randomize tests' orders on every iteration.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "random_seed=@Y[NUMBER]@D\n"
+ " Random number seed to use for shuffling test orders (between 1 and\n"
+ " 99999, or 0 to use a seed based on the current time).\n"
+ "\n"
+ "Test Output:\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n"
+ " Enable/disable colored output. The default is @Gauto@D.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "brief=1@D\n"
+ " Only print test failures.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "print_time=0@D\n"
+ " Don't print the elapsed time of each test.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "output=@Y(@Gjson@Y|@Gxml@Y)[@G:@YDIRECTORY_PATH@G" GTEST_PATH_SEP_
+ "@Y|@G:@YFILE_PATH]@D\n"
+ " Generate a JSON or XML report in the given directory or with the "
+ "given\n"
+ " file name. @YFILE_PATH@D defaults to @Gtest_detail.xml@D.\n"
+# if GTEST_CAN_STREAM_RESULTS_
+ " @G--" GTEST_FLAG_PREFIX_
+ "stream_result_to=@YHOST@G:@YPORT@D\n"
+ " Stream test results to the given server.\n"
+# endif // GTEST_CAN_STREAM_RESULTS_
+ "\n"
+ "Assertion Behavior:\n"
+# if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+ " @G--" GTEST_FLAG_PREFIX_
+ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n"
+ " Set the default death test style.\n"
+# endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+ " @G--" GTEST_FLAG_PREFIX_
+ "break_on_failure@D\n"
+ " Turn assertion failures into debugger break-points.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "throw_on_failure@D\n"
+ " Turn assertion failures into C++ exceptions for use by an external\n"
+ " test framework.\n"
+ " @G--" GTEST_FLAG_PREFIX_
+ "catch_exceptions=0@D\n"
+ " Do not report exceptions as test failures. Instead, allow them\n"
+ " to crash the program or throw a pop-up (on Windows).\n"
+ "\n"
+ "Except for @G--" GTEST_FLAG_PREFIX_
+ "list_tests@D, you can alternatively set "
+ "the corresponding\n"
+ "environment variable of a flag (all letters in upper-case). For example, "
+ "to\n"
+ "disable colored text output, you can either specify "
+ "@G--" GTEST_FLAG_PREFIX_
+ "color=no@D or set\n"
+ "the @G" GTEST_FLAG_PREFIX_UPPER_
+ "COLOR@D environment variable to @Gno@D.\n"
+ "\n"
+ "For more information, please read the " GTEST_NAME_
+ " documentation at\n"
+ "@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_
+ "\n"
+ "(not one in your own code or tests), please report it to\n"
+ "@G<" GTEST_DEV_EMAIL_ ">@D.\n";
+
+static bool ParseGoogleTestFlag(const char* const arg) {
+ return ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,
+ >EST_FLAG(also_run_disabled_tests)) ||
+ ParseBoolFlag(arg, kBreakOnFailureFlag,
+ >EST_FLAG(break_on_failure)) ||
+ ParseBoolFlag(arg, kCatchExceptionsFlag,
+ >EST_FLAG(catch_exceptions)) ||
+ ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) ||
+ ParseStringFlag(arg, kDeathTestStyleFlag,
+ >EST_FLAG(death_test_style)) ||
+ ParseBoolFlag(arg, kDeathTestUseFork,
+ >EST_FLAG(death_test_use_fork)) ||
+ ParseBoolFlag(arg, kFailFast, >EST_FLAG(fail_fast)) ||
+ ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) ||
+ ParseStringFlag(arg, kInternalRunDeathTestFlag,
+ >EST_FLAG(internal_run_death_test)) ||
+ ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) ||
+ ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) ||
+ ParseBoolFlag(arg, kBriefFlag, >EST_FLAG(brief)) ||
+ ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) ||
+ ParseBoolFlag(arg, kPrintUTF8Flag, >EST_FLAG(print_utf8)) ||
+ ParseInt32Flag(arg, kRandomSeedFlag, >EST_FLAG(random_seed)) ||
+ ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat)) ||
+ ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) ||
+ ParseInt32Flag(arg, kStackTraceDepthFlag,
+ >EST_FLAG(stack_trace_depth)) ||
+ ParseStringFlag(arg, kStreamResultToFlag,
+ >EST_FLAG(stream_result_to)) ||
+ ParseBoolFlag(arg, kThrowOnFailureFlag, >EST_FLAG(throw_on_failure));
+}
+
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+static void LoadFlagsFromFile(const std::string& path) {
+ FILE* flagfile = posix::FOpen(path.c_str(), "r");
+ if (!flagfile) {
+ GTEST_LOG_(FATAL) << "Unable to open file \"" << GTEST_FLAG(flagfile)
+ << "\"";
+ }
+ std::string contents(ReadEntireFile(flagfile));
+ posix::FClose(flagfile);
+ std::vector<std::string> lines;
+ SplitString(contents, '\n', &lines);
+ for (size_t i = 0; i < lines.size(); ++i) {
+ if (lines[i].empty())
+ continue;
+ if (!ParseGoogleTestFlag(lines[i].c_str()))
+ g_help_flag = true;
+ }
+}
+#endif // GTEST_USE_OWN_FLAGFILE_FLAG_
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test. The type parameter CharType can be
+// instantiated to either char or wchar_t.
+template <typename CharType>
+void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
+ for (int i = 1; i < *argc; i++) {
+ const std::string arg_string = StreamableToString(argv[i]);
+ const char* const arg = arg_string.c_str();
+
+ using internal::ParseBoolFlag;
+ using internal::ParseInt32Flag;
+ using internal::ParseStringFlag;
+
+ bool remove_flag = false;
+ if (ParseGoogleTestFlag(arg)) {
+ remove_flag = true;
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+ } else if (ParseStringFlag(arg, kFlagfileFlag, >EST_FLAG(flagfile))) {
+ LoadFlagsFromFile(GTEST_FLAG(flagfile));
+ remove_flag = true;
+#endif // GTEST_USE_OWN_FLAGFILE_FLAG_
+ } else if (arg_string == "--help" || arg_string == "-h" ||
+ arg_string == "-?" || arg_string == "/?" ||
+ HasGoogleTestFlagPrefix(arg)) {
+ // Both help flag and unrecognized Google Test flags (excluding
+ // internal ones) trigger help display.
+ g_help_flag = true;
+ }
+
+ if (remove_flag) {
+ // Shift the remainder of the argv list left by one. Note
+ // that argv has (*argc + 1) elements, the last one always being
+ // NULL. The following loop moves the trailing NULL element as
+ // well.
+ for (int j = i; j != *argc; j++) {
+ argv[j] = argv[j + 1];
+ }
+
+ // Decrements the argument count.
+ (*argc)--;
+
+ // We also need to decrement the iterator as we just removed
+ // an element.
+ i--;
+ }
+ }
+
+ if (g_help_flag) {
+ // We print the help here instead of in RUN_ALL_TESTS(), as the
+ // latter may not be called at all if the user is using Google
+ // Test with another testing framework.
+ PrintColorEncoded(kColorEncodedHelpMessage);
+ }
+}
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+
+ // Fix the value of *_NSGetArgc() on macOS, but if and only if
+ // *_NSGetArgv() == argv
+ // Only applicable to char** version of argv
+#if GTEST_OS_MAC
+#ifndef GTEST_OS_IOS
+ if (*_NSGetArgv() == argv) {
+ *_NSGetArgc() = *argc;
+ }
+#endif
+#endif
+}
+void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+
+// The internal implementation of InitGoogleTest().
+//
+// The type parameter CharType can be instantiated to either char or
+// wchar_t.
+template <typename CharType>
+void InitGoogleTestImpl(int* argc, CharType** argv) {
+ // We don't want to run the initialization code twice.
+ if (GTestIsInitialized()) return;
+
+ if (*argc <= 0) return;
+
+ g_argvs.clear();
+ for (int i = 0; i != *argc; i++) {
+ g_argvs.push_back(StreamableToString(argv[i]));
+ }
+
+#if GTEST_HAS_ABSL
+ absl::InitializeSymbolizer(g_argvs[0].c_str());
+#endif // GTEST_HAS_ABSL
+
+ ParseGoogleTestFlagsOnly(argc, argv);
+ GetUnitTestImpl()->PostFlagParsingInit();
+}
+
+} // namespace internal
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+void InitGoogleTest(int* argc, char** argv) {
+#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);
+#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ internal::InitGoogleTestImpl(argc, argv);
+#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+}
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+void InitGoogleTest(int* argc, wchar_t** argv) {
+#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);
+#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ internal::InitGoogleTestImpl(argc, argv);
+#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+}
+
+// This overloaded version can be used on Arduino/embedded platforms where
+// there is no argc/argv.
+void InitGoogleTest() {
+ // Since Arduino doesn't have a command line, fake out the argc/argv arguments
+ int argc = 1;
+ const auto arg0 = "dummy";
+ char* argv0 = const_cast<char*>(arg0);
+ char** argv = &argv0;
+
+#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(&argc, argv);
+#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ internal::InitGoogleTestImpl(&argc, argv);
+#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+}
+
+std::string TempDir() {
+#if defined(GTEST_CUSTOM_TEMPDIR_FUNCTION_)
+ return GTEST_CUSTOM_TEMPDIR_FUNCTION_();
+#elif GTEST_OS_WINDOWS_MOBILE
+ return "\\temp\\";
+#elif GTEST_OS_WINDOWS
+ const char* temp_dir = internal::posix::GetEnv("TEMP");
+ if (temp_dir == nullptr || temp_dir[0] == '\0') {
+ return "\\temp\\";
+ } else if (temp_dir[strlen(temp_dir) - 1] == '\\') {
+ return temp_dir;
+ } else {
+ return std::string(temp_dir) + "\\";
+ }
+#elif GTEST_OS_LINUX_ANDROID
+ const char* temp_dir = internal::posix::GetEnv("TEST_TMPDIR");
+ if (temp_dir == nullptr || temp_dir[0] == '\0') {
+ return "/data/local/tmp/";
+ } else {
+ return temp_dir;
+ }
+#elif GTEST_OS_LINUX
+ const char* temp_dir = internal::posix::GetEnv("TEST_TMPDIR");
+ if (temp_dir == nullptr || temp_dir[0] == '\0') {
+ return "/tmp/";
+ } else {
+ return temp_dir;
+ }
+#else
+ return "/tmp/";
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Class ScopedTrace
+
+// Pushes the given source file location and message onto a per-thread
+// trace stack maintained by Google Test.
+void ScopedTrace::PushTrace(const char* file, int line, std::string message) {
+ internal::TraceInfo trace;
+ trace.file = file;
+ trace.line = line;
+ trace.message.swap(message);
+
+ UnitTest::GetInstance()->PushGTestTrace(trace);
+}
+
+// Pops the info pushed by the c'tor.
+ScopedTrace::~ScopedTrace()
+ GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {
+ UnitTest::GetInstance()->PopGTestTrace();
+}
+
+} // namespace testing
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// This file implements death tests.
+
+
+#include <functional>
+#include <utility>
+
+
+#if GTEST_HAS_DEATH_TEST
+
+# if GTEST_OS_MAC
+# include <crt_externs.h>
+# endif // GTEST_OS_MAC
+
+# include <errno.h>
+# include <fcntl.h>
+# include <limits.h>
+
+# if GTEST_OS_LINUX
+# include <signal.h>
+# endif // GTEST_OS_LINUX
+
+# include <stdarg.h>
+
+# if GTEST_OS_WINDOWS
+# include <windows.h>
+# else
+# include <sys/mman.h>
+# include <sys/wait.h>
+# endif // GTEST_OS_WINDOWS
+
+# if GTEST_OS_QNX
+# include <spawn.h>
+# endif // GTEST_OS_QNX
+
+# if GTEST_OS_FUCHSIA
+# include <lib/fdio/fd.h>
+# include <lib/fdio/io.h>
+# include <lib/fdio/spawn.h>
+# include <lib/zx/channel.h>
+# include <lib/zx/port.h>
+# include <lib/zx/process.h>
+# include <lib/zx/socket.h>
+# include <zircon/processargs.h>
+# include <zircon/syscalls.h>
+# include <zircon/syscalls/policy.h>
+# include <zircon/syscalls/port.h>
+# endif // GTEST_OS_FUCHSIA
+
+#endif // GTEST_HAS_DEATH_TEST
+
+
+namespace testing {
+
+// Constants.
+
+// The default death test style.
+//
+// This is defined in internal/gtest-port.h as "fast", but can be overridden by
+// a definition in internal/custom/gtest-port.h. The recommended value, which is
+// used internally at Google, is "threadsafe".
+static const char kDefaultDeathTestStyle[] = GTEST_DEFAULT_DEATH_TEST_STYLE;
+
+GTEST_DEFINE_string_(
+ death_test_style,
+ internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle),
+ "Indicates how to run a death test in a forked child process: "
+ "\"threadsafe\" (child process re-executes the test binary "
+ "from the beginning, running only the specific death test) or "
+ "\"fast\" (child process runs the death test immediately "
+ "after forking).");
+
+GTEST_DEFINE_bool_(
+ death_test_use_fork,
+ internal::BoolFromGTestEnv("death_test_use_fork", false),
+ "Instructs to use fork()/_exit() instead of clone() in death tests. "
+ "Ignored and always uses fork() on POSIX systems where clone() is not "
+ "implemented. Useful when running under valgrind or similar tools if "
+ "those do not support clone(). Valgrind 3.3.1 will just fail if "
+ "it sees an unsupported combination of clone() flags. "
+ "It is not recommended to use this flag w/o valgrind though it will "
+ "work in 99% of the cases. Once valgrind is fixed, this flag will "
+ "most likely be removed.");
+
+namespace internal {
+GTEST_DEFINE_string_(
+ internal_run_death_test, "",
+ "Indicates the file, line number, temporal index of "
+ "the single death test to run, and a file descriptor to "
+ "which a success code may be sent, all separated by "
+ "the '|' characters. This flag is specified if and only if the "
+ "current process is a sub-process launched for running a thread-safe "
+ "death test. FOR INTERNAL USE ONLY.");
+} // namespace internal
+
+#if GTEST_HAS_DEATH_TEST
+
+namespace internal {
+
+// Valid only for fast death tests. Indicates the code is running in the
+// child process of a fast style death test.
+# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
+static bool g_in_fast_death_test_child = false;
+# endif
+
+// Returns a Boolean value indicating whether the caller is currently
+// executing in the context of the death test child process. Tools such as
+// Valgrind heap checkers may need this to modify their behavior in death
+// tests. IMPORTANT: This is an internal utility. Using it may break the
+// implementation of death tests. User code MUST NOT use it.
+bool InDeathTestChild() {
+# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA
+
+ // On Windows and Fuchsia, death tests are thread-safe regardless of the value
+ // of the death_test_style flag.
+ return !GTEST_FLAG(internal_run_death_test).empty();
+
+# else
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe")
+ return !GTEST_FLAG(internal_run_death_test).empty();
+ else
+ return g_in_fast_death_test_child;
+#endif
+}
+
+} // namespace internal
+
+// ExitedWithCode constructor.
+ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
+}
+
+// ExitedWithCode function-call operator.
+bool ExitedWithCode::operator()(int exit_status) const {
+# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA
+
+ return exit_status == exit_code_;
+
+# else
+
+ return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
+
+# endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA
+}
+
+# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
+// KilledBySignal constructor.
+KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
+}
+
+// KilledBySignal function-call operator.
+bool KilledBySignal::operator()(int exit_status) const {
+# if defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)
+ {
+ bool result;
+ if (GTEST_KILLED_BY_SIGNAL_OVERRIDE_(signum_, exit_status, &result)) {
+ return result;
+ }
+ }
+# endif // defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)
+ return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
+}
+# endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
+
+namespace internal {
+
+// Utilities needed for death tests.
+
+// Generates a textual description of a given exit code, in the format
+// specified by wait(2).
+static std::string ExitSummary(int exit_code) {
+ Message m;
+
+# if GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA
+
+ m << "Exited with exit status " << exit_code;
+
+# else
+
+ if (WIFEXITED(exit_code)) {
+ m << "Exited with exit status " << WEXITSTATUS(exit_code);
+ } else if (WIFSIGNALED(exit_code)) {
+ m << "Terminated by signal " << WTERMSIG(exit_code);
+ }
+# ifdef WCOREDUMP
+ if (WCOREDUMP(exit_code)) {
+ m << " (core dumped)";
+ }
+# endif
+# endif // GTEST_OS_WINDOWS || GTEST_OS_FUCHSIA
+
+ return m.GetString();
+}
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+bool ExitedUnsuccessfully(int exit_status) {
+ return !ExitedWithCode(0)(exit_status);
+}
+
+# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
+// Generates a textual failure message when a death test finds more than
+// one thread running, or cannot determine the number of threads, prior
+// to executing the given statement. It is the responsibility of the
+// caller not to pass a thread_count of 1.
+static std::string DeathTestThreadWarning(size_t thread_count) {
+ Message msg;
+ msg << "Death tests use fork(), which is unsafe particularly"
+ << " in a threaded context. For this test, " << GTEST_NAME_ << " ";
+ if (thread_count == 0) {
+ msg << "couldn't detect the number of threads.";
+ } else {
+ msg << "detected " << thread_count << " threads.";
+ }
+ msg << " See "
+ "https://github.com/google/googletest/blob/master/docs/"
+ "advanced.md#death-tests-and-threads"
+ << " for more explanation and suggested solutions, especially if"
+ << " this is the last message you see before your test times out.";
+ return msg.GetString();
+}
+# endif // !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
+
+// Flag characters for reporting a death test that did not die.
+static const char kDeathTestLived = 'L';
+static const char kDeathTestReturned = 'R';
+static const char kDeathTestThrew = 'T';
+static const char kDeathTestInternalError = 'I';
+
+#if GTEST_OS_FUCHSIA
+
+// File descriptor used for the pipe in the child process.
+static const int kFuchsiaReadPipeFd = 3;
+
+#endif
+
+// An enumeration describing all of the possible ways that a death test can
+// conclude. DIED means that the process died while executing the test
+// code; LIVED means that process lived beyond the end of the test code;
+// RETURNED means that the test statement attempted to execute a return
+// statement, which is not allowed; THREW means that the test statement
+// returned control by throwing an exception. IN_PROGRESS means the test
+// has not yet concluded.
+enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW };
+
+// Routine for aborting the program which is safe to call from an
+// exec-style death test child process, in which case the error
+// message is propagated back to the parent process. Otherwise, the
+// message is simply printed to stderr. In either case, the program
+// then exits with status 1.
+static void DeathTestAbort(const std::string& message) {
+ // On a POSIX system, this function may be called from a threadsafe-style
+ // death test child process, which operates on a very small stack. Use
+ // the heap for any additional non-minuscule memory requirements.
+ const InternalRunDeathTestFlag* const flag =
+ GetUnitTestImpl()->internal_run_death_test_flag();
+ if (flag != nullptr) {
+ FILE* parent = posix::FDOpen(flag->write_fd(), "w");
+ fputc(kDeathTestInternalError, parent);
+ fprintf(parent, "%s", message.c_str());
+ fflush(parent);
+ _exit(1);
+ } else {
+ fprintf(stderr, "%s", message.c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+
+// A replacement for CHECK that calls DeathTestAbort if the assertion
+// fails.
+# define GTEST_DEATH_TEST_CHECK_(expression) \
+ do { \
+ if (!::testing::internal::IsTrue(expression)) { \
+ DeathTestAbort( \
+ ::std::string("CHECK failed: File ") + __FILE__ + ", line " \
+ + ::testing::internal::StreamableToString(__LINE__) + ": " \
+ + #expression); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
+// evaluating any system call that fulfills two conditions: it must return
+// -1 on failure, and set errno to EINTR when it is interrupted and
+// should be tried again. The macro expands to a loop that repeatedly
+// evaluates the expression as long as it evaluates to -1 and sets
+// errno to EINTR. If the expression evaluates to -1 but errno is
+// something other than EINTR, DeathTestAbort is called.
+# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
+ do { \
+ int gtest_retval; \
+ do { \
+ gtest_retval = (expression); \
+ } while (gtest_retval == -1 && errno == EINTR); \
+ if (gtest_retval == -1) { \
+ DeathTestAbort( \
+ ::std::string("CHECK failed: File ") + __FILE__ + ", line " \
+ + ::testing::internal::StreamableToString(__LINE__) + ": " \
+ + #expression + " != -1"); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// Returns the message describing the last system error in errno.
+std::string GetLastErrnoDescription() {
+ return errno == 0 ? "" : posix::StrError(errno);
+}
+
+// This is called from a death test parent process to read a failure
+// message from the death test child process and log it with the FATAL
+// severity. On Windows, the message is read from a pipe handle. On other
+// platforms, it is read from a file descriptor.
+static void FailFromInternalError(int fd) {
+ Message error;
+ char buffer[256];
+ int num_read;
+
+ do {
+ while ((num_read = posix::Read(fd, buffer, 255)) > 0) {
+ buffer[num_read] = '\0';
+ error << buffer;
+ }
+ } while (num_read == -1 && errno == EINTR);
+
+ if (num_read == 0) {
+ GTEST_LOG_(FATAL) << error.GetString();
+ } else {
+ const int last_error = errno;
+ GTEST_LOG_(FATAL) << "Error while reading death test internal: "
+ << GetLastErrnoDescription() << " [" << last_error << "]";
+ }
+}
+
+// Death test constructor. Increments the running death test count
+// for the current test.
+DeathTest::DeathTest() {
+ TestInfo* const info = GetUnitTestImpl()->current_test_info();
+ if (info == nullptr) {
+ DeathTestAbort("Cannot run a death test outside of a TEST or "
+ "TEST_F construct");
+ }
+}
+
+// Creates and returns a death test by dispatching to the current
+// death test factory.
+bool DeathTest::Create(const char* statement,
+ Matcher<const std::string&> matcher, const char* file,
+ int line, DeathTest** test) {
+ return GetUnitTestImpl()->death_test_factory()->Create(
+ statement, std::move(matcher), file, line, test);
+}
+
+const char* DeathTest::LastMessage() {
+ return last_death_test_message_.c_str();
+}
+
+void DeathTest::set_last_death_test_message(const std::string& message) {
+ last_death_test_message_ = message;
+}
+
+std::string DeathTest::last_death_test_message_;
+
+// Provides cross platform implementation for some death functionality.
+class DeathTestImpl : public DeathTest {
+ protected:
+ DeathTestImpl(const char* a_statement, Matcher<const std::string&> matcher)
+ : statement_(a_statement),
+ matcher_(std::move(matcher)),
+ spawned_(false),
+ status_(-1),
+ outcome_(IN_PROGRESS),
+ read_fd_(-1),
+ write_fd_(-1) {}
+
+ // read_fd_ is expected to be closed and cleared by a derived class.
+ ~DeathTestImpl() override { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }
+
+ void Abort(AbortReason reason) override;
+ bool Passed(bool status_ok) override;
+
+ const char* statement() const { return statement_; }
+ bool spawned() const { return spawned_; }
+ void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
+ int status() const { return status_; }
+ void set_status(int a_status) { status_ = a_status; }
+ DeathTestOutcome outcome() const { return outcome_; }
+ void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }
+ int read_fd() const { return read_fd_; }
+ void set_read_fd(int fd) { read_fd_ = fd; }
+ int write_fd() const { return write_fd_; }
+ void set_write_fd(int fd) { write_fd_ = fd; }
+
+ // Called in the parent process only. Reads the result code of the death
+ // test child process via a pipe, interprets it to set the outcome_
+ // member, and closes read_fd_. Outputs diagnostics and terminates in
+ // case of unexpected codes.
+ void ReadAndInterpretStatusByte();
+
+ // Returns stderr output from the child process.
+ virtual std::string GetErrorLogs();
+
+ private:
+ // The textual content of the code this object is testing. This class
+ // doesn't own this string and should not attempt to delete it.
+ const char* const statement_;
+ // A matcher that's expected to match the stderr output by the child process.
+ Matcher<const std::string&> matcher_;
+ // True if the death test child process has been successfully spawned.
+ bool spawned_;
+ // The exit status of the child process.
+ int status_;
+ // How the death test concluded.
+ DeathTestOutcome outcome_;
+ // Descriptor to the read end of the pipe to the child process. It is
+ // always -1 in the child process. The child keeps its write end of the
+ // pipe in write_fd_.
+ int read_fd_;
+ // Descriptor to the child's write end of the pipe to the parent process.
+ // It is always -1 in the parent process. The parent keeps its end of the
+ // pipe in read_fd_.
+ int write_fd_;
+};
+
+// Called in the parent process only. Reads the result code of the death
+// test child process via a pipe, interprets it to set the outcome_
+// member, and closes read_fd_. Outputs diagnostics and terminates in
+// case of unexpected codes.
+void DeathTestImpl::ReadAndInterpretStatusByte() {
+ char flag;
+ int bytes_read;
+
+ // The read() here blocks until data is available (signifying the
+ // failure of the death test) or until the pipe is closed (signifying
+ // its success), so it's okay to call this in the parent before
+ // the child process has exited.
+ do {
+ bytes_read = posix::Read(read_fd(), &flag, 1);
+ } while (bytes_read == -1 && errno == EINTR);
+
+ if (bytes_read == 0) {
+ set_outcome(DIED);
+ } else if (bytes_read == 1) {
+ switch (flag) {
+ case kDeathTestReturned:
+ set_outcome(RETURNED);
+ break;
+ case kDeathTestThrew:
+ set_outcome(THREW);
+ break;
+ case kDeathTestLived:
+ set_outcome(LIVED);
+ break;
+ case kDeathTestInternalError:
+ FailFromInternalError(read_fd()); // Does not return.
+ break;
+ default:
+ GTEST_LOG_(FATAL) << "Death test child process reported "
+ << "unexpected status byte ("
+ << static_cast<unsigned int>(flag) << ")";
+ }
+ } else {
+ GTEST_LOG_(FATAL) << "Read from death test child process failed: "
+ << GetLastErrnoDescription();
+ }
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));
+ set_read_fd(-1);
+}
+
+std::string DeathTestImpl::GetErrorLogs() {
+ return GetCapturedStderr();
+}
+
+// Signals that the death test code which should have exited, didn't.
+// Should be called only in a death test child process.
+// Writes a status byte to the child's status file descriptor, then
+// calls _exit(1).
+void DeathTestImpl::Abort(AbortReason reason) {
+ // The parent process considers the death test to be a failure if
+ // it finds any data in our pipe. So, here we write a single flag byte
+ // to the pipe, then exit.
+ const char status_ch =
+ reason == TEST_DID_NOT_DIE ? kDeathTestLived :
+ reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned;
+
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));
+ // We are leaking the descriptor here because on some platforms (i.e.,
+ // when built as Windows DLL), destructors of global objects will still
+ // run after calling _exit(). On such systems, write_fd_ will be
+ // indirectly closed from the destructor of UnitTestImpl, causing double
+ // close if it is also closed here. On debug configurations, double close
+ // may assert. As there are no in-process buffers to flush here, we are
+ // relying on the OS to close the descriptor after the process terminates
+ // when the destructors are not run.
+ _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash)
+}
+
+// Returns an indented copy of stderr output for a death test.
+// This makes distinguishing death test output lines from regular log lines
+// much easier.
+static ::std::string FormatDeathTestOutput(const ::std::string& output) {
+ ::std::string ret;
+ for (size_t at = 0; ; ) {
+ const size_t line_end = output.find('\n', at);
+ ret += "[ DEATH ] ";
+ if (line_end == ::std::string::npos) {
+ ret += output.substr(at);
+ break;
+ }
+ ret += output.substr(at, line_end + 1 - at);
+ at = line_end + 1;
+ }
+ return ret;
+}
+
+// Assesses the success or failure of a death test, using both private
+// members which have previously been set, and one argument:
+//
+// Private data members:
+// outcome: An enumeration describing how the death test
+// concluded: DIED, LIVED, THREW, or RETURNED. The death test
+// fails in the latter three cases.
+// status: The exit status of the child process. On *nix, it is in the
+// in the format specified by wait(2). On Windows, this is the
+// value supplied to the ExitProcess() API or a numeric code
+// of the exception that terminated the program.
+// matcher_: A matcher that's expected to match the stderr output by the child
+// process.
+//
+// Argument:
+// status_ok: true if exit_status is acceptable in the context of
+// this particular death test, which fails if it is false
+//
+// Returns true if and only if all of the above conditions are met. Otherwise,
+// the first failing condition, in the order given above, is the one that is
+// reported. Also sets the last death test message string.
+bool DeathTestImpl::Passed(bool status_ok) {
+ if (!spawned())
+ return false;
+
+ const std::string error_message = GetErrorLogs();
+
+ bool success = false;
+ Message buffer;
+
+ buffer << "Death test: " << statement() << "\n";
+ switch (outcome()) {
+ case LIVED:
+ buffer << " Result: failed to die.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case THREW:
+ buffer << " Result: threw an exception.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case RETURNED:
+ buffer << " Result: illegal return in test statement.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case DIED:
+ if (status_ok) {
+ if (matcher_.Matches(error_message)) {
+ success = true;
+ } else {
+ std::ostringstream stream;
+ matcher_.DescribeTo(&stream);
+ buffer << " Result: died but not with expected error.\n"
+ << " Expected: " << stream.str() << "\n"
+ << "Actual msg:\n"
+ << FormatDeathTestOutput(error_message);
+ }
+ } else {
+ buffer << " Result: died but not with expected exit code:\n"
+ << " " << ExitSummary(status()) << "\n"
+ << "Actual msg:\n" << FormatDeathTestOutput(error_message);
+ }
+ break;
+ case IN_PROGRESS:
+ default:
+ GTEST_LOG_(FATAL)
+ << "DeathTest::Passed somehow called before conclusion of test";
+ }
+
+ DeathTest::set_last_death_test_message(buffer.GetString());
+ return success;
+}
+
+# if GTEST_OS_WINDOWS
+// WindowsDeathTest implements death tests on Windows. Due to the
+// specifics of starting new processes on Windows, death tests there are
+// always threadsafe, and Google Test considers the
+// --gtest_death_test_style=fast setting to be equivalent to
+// --gtest_death_test_style=threadsafe there.
+//
+// A few implementation notes: Like the Linux version, the Windows
+// implementation uses pipes for child-to-parent communication. But due to
+// the specifics of pipes on Windows, some extra steps are required:
+//
+// 1. The parent creates a communication pipe and stores handles to both
+// ends of it.
+// 2. The parent starts the child and provides it with the information
+// necessary to acquire the handle to the write end of the pipe.
+// 3. The child acquires the write end of the pipe and signals the parent
+// using a Windows event.
+// 4. Now the parent can release the write end of the pipe on its side. If
+// this is done before step 3, the object's reference count goes down to
+// 0 and it is destroyed, preventing the child from acquiring it. The
+// parent now has to release it, or read operations on the read end of
+// the pipe will not return when the child terminates.
+// 5. The parent reads child's output through the pipe (outcome code and
+// any possible error messages) from the pipe, and its stderr and then
+// determines whether to fail the test.
+//
+// Note: to distinguish Win32 API calls from the local method and function
+// calls, the former are explicitly resolved in the global namespace.
+//
+class WindowsDeathTest : public DeathTestImpl {
+ public:
+ WindowsDeathTest(const char* a_statement, Matcher<const std::string&> matcher,
+ const char* file, int line)
+ : DeathTestImpl(a_statement, std::move(matcher)),
+ file_(file),
+ line_(line) {}
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+ virtual TestRole AssumeRole();
+
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+ // Handle to the write end of the pipe to the child process.
+ AutoHandle write_handle_;
+ // Child process handle.
+ AutoHandle child_handle_;
+ // Event the child process uses to signal the parent that it has
+ // acquired the handle to the write end of the pipe. After seeing this
+ // event the parent can release its own handles to make sure its
+ // ReadFile() calls return when the child terminates.
+ AutoHandle event_handle_;
+};
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int WindowsDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ // Wait until the child either signals that it has acquired the write end
+ // of the pipe or it dies.
+ const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };
+ switch (::WaitForMultipleObjects(2,
+ wait_handles,
+ FALSE, // Waits for any of the handles.
+ INFINITE)) {
+ case WAIT_OBJECT_0:
+ case WAIT_OBJECT_0 + 1:
+ break;
+ default:
+ GTEST_DEATH_TEST_CHECK_(false); // Should not get here.
+ }
+
+ // The child has acquired the write end of the pipe or exited.
+ // We release the handle on our side and continue.
+ write_handle_.Reset();
+ event_handle_.Reset();
+
+ ReadAndInterpretStatusByte();
+
+ // Waits for the child process to exit if it haven't already. This
+ // returns immediately if the child has already exited, regardless of
+ // whether previous calls to WaitForMultipleObjects synchronized on this
+ // handle or not.
+ GTEST_DEATH_TEST_CHECK_(
+ WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),
+ INFINITE));
+ DWORD status_code;
+ GTEST_DEATH_TEST_CHECK_(
+ ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE);
+ child_handle_.Reset();
+ set_status(static_cast<int>(status_code));
+ return status();
+}
+
+// The AssumeRole process for a Windows death test. It creates a child
+// process with the same executable as the current process to run the
+// death test. The child process is given the --gtest_filter and
+// --gtest_internal_run_death_test flags such that it knows to run the
+// current death test only.
+DeathTest::TestRole WindowsDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != nullptr) {
+ // ParseInternalRunDeathTestFlag() has performed all the necessary
+ // processing.
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ // WindowsDeathTest uses an anonymous pipe to communicate results of
+ // a death test.
+ SECURITY_ATTRIBUTES handles_are_inheritable = {sizeof(SECURITY_ATTRIBUTES),
+ nullptr, TRUE};
+ HANDLE read_handle, write_handle;
+ GTEST_DEATH_TEST_CHECK_(
+ ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,
+ 0) // Default buffer size.
+ != FALSE);
+ set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),
+ O_RDONLY));
+ write_handle_.Reset(write_handle);
+ event_handle_.Reset(::CreateEvent(
+ &handles_are_inheritable,
+ TRUE, // The event will automatically reset to non-signaled state.
+ FALSE, // The initial state is non-signalled.
+ nullptr)); // The even is unnamed.
+ GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != nullptr);
+ const std::string filter_flag = std::string("--") + GTEST_FLAG_PREFIX_ +
+ kFilterFlag + "=" + info->test_suite_name() +
+ "." + info->name();
+ const std::string internal_flag =
+ std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag +
+ "=" + file_ + "|" + StreamableToString(line_) + "|" +
+ StreamableToString(death_test_index) + "|" +
+ StreamableToString(static_cast<unsigned int>(::GetCurrentProcessId())) +
+ // size_t has the same width as pointers on both 32-bit and 64-bit
+ // Windows platforms.
+ // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.
+ "|" + StreamableToString(reinterpret_cast<size_t>(write_handle)) +
+ "|" + StreamableToString(reinterpret_cast<size_t>(event_handle_.Get()));
+
+ char executable_path[_MAX_PATH + 1]; // NOLINT
+ GTEST_DEATH_TEST_CHECK_(_MAX_PATH + 1 != ::GetModuleFileNameA(nullptr,
+ executable_path,
+ _MAX_PATH));
+
+ std::string command_line =
+ std::string(::GetCommandLineA()) + " " + filter_flag + " \"" +
+ internal_flag + "\"";
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // Flush the log buffers since the log streams are shared with the child.
+ FlushInfoLog();
+
+ // The child process will share the standard handles with the parent.
+ STARTUPINFOA startup_info;
+ memset(&startup_info, 0, sizeof(STARTUPINFO));
+ startup_info.dwFlags = STARTF_USESTDHANDLES;
+ startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);
+ startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+ startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+
+ PROCESS_INFORMATION process_info;
+ GTEST_DEATH_TEST_CHECK_(
+ ::CreateProcessA(
+ executable_path, const_cast<char*>(command_line.c_str()),
+ nullptr, // Retuned process handle is not inheritable.
+ nullptr, // Retuned thread handle is not inheritable.
+ TRUE, // Child inherits all inheritable handles (for write_handle_).
+ 0x0, // Default creation flags.
+ nullptr, // Inherit the parent's environment.
+ UnitTest::GetInstance()->original_working_dir(), &startup_info,
+ &process_info) != FALSE);
+ child_handle_.Reset(process_info.hProcess);
+ ::CloseHandle(process_info.hThread);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+
+# elif GTEST_OS_FUCHSIA
+
+class FuchsiaDeathTest : public DeathTestImpl {
+ public:
+ FuchsiaDeathTest(const char* a_statement, Matcher<const std::string&> matcher,
+ const char* file, int line)
+ : DeathTestImpl(a_statement, std::move(matcher)),
+ file_(file),
+ line_(line) {}
+
+ // All of these virtual functions are inherited from DeathTest.
+ int Wait() override;
+ TestRole AssumeRole() override;
+ std::string GetErrorLogs() override;
+
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+ // The stderr data captured by the child process.
+ std::string captured_stderr_;
+
+ zx::process child_process_;
+ zx::channel exception_channel_;
+ zx::socket stderr_socket_;
+};
+
+// Utility class for accumulating command-line arguments.
+class Arguments {
+ public:
+ Arguments() { args_.push_back(nullptr); }
+
+ ~Arguments() {
+ for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
+ ++i) {
+ free(*i);
+ }
+ }
+ void AddArgument(const char* argument) {
+ args_.insert(args_.end() - 1, posix::StrDup(argument));
+ }
+
+ template <typename Str>
+ void AddArguments(const ::std::vector<Str>& arguments) {
+ for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
+ i != arguments.end();
+ ++i) {
+ args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
+ }
+ }
+ char* const* Argv() {
+ return &args_[0];
+ }
+
+ int size() {
+ return static_cast<int>(args_.size()) - 1;
+ }
+
+ private:
+ std::vector<char*> args_;
+};
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int FuchsiaDeathTest::Wait() {
+ const int kProcessKey = 0;
+ const int kSocketKey = 1;
+ const int kExceptionKey = 2;
+
+ if (!spawned())
+ return 0;
+
+ // Create a port to wait for socket/task/exception events.
+ zx_status_t status_zx;
+ zx::port port;
+ status_zx = zx::port::create(0, &port);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
+ // Register to wait for the child process to terminate.
+ status_zx = child_process_.wait_async(
+ port, kProcessKey, ZX_PROCESS_TERMINATED, 0);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
+ // Register to wait for the socket to be readable or closed.
+ status_zx = stderr_socket_.wait_async(
+ port, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED, 0);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
+ // Register to wait for an exception.
+ status_zx = exception_channel_.wait_async(
+ port, kExceptionKey, ZX_CHANNEL_READABLE, 0);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
+ bool process_terminated = false;
+ bool socket_closed = false;
+ do {
+ zx_port_packet_t packet = {};
+ status_zx = port.wait(zx::time::infinite(), &packet);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
+ if (packet.key == kExceptionKey) {
+ // Process encountered an exception. Kill it directly rather than
+ // letting other handlers process the event. We will get a kProcessKey
+ // event when the process actually terminates.
+ status_zx = child_process_.kill();
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+ } else if (packet.key == kProcessKey) {
+ // Process terminated.
+ GTEST_DEATH_TEST_CHECK_(ZX_PKT_IS_SIGNAL_ONE(packet.type));
+ GTEST_DEATH_TEST_CHECK_(packet.signal.observed & ZX_PROCESS_TERMINATED);
+ process_terminated = true;
+ } else if (packet.key == kSocketKey) {
+ GTEST_DEATH_TEST_CHECK_(ZX_PKT_IS_SIGNAL_ONE(packet.type));
+ if (packet.signal.observed & ZX_SOCKET_READABLE) {
+ // Read data from the socket.
+ constexpr size_t kBufferSize = 1024;
+ do {
+ size_t old_length = captured_stderr_.length();
+ size_t bytes_read = 0;
+ captured_stderr_.resize(old_length + kBufferSize);
+ status_zx = stderr_socket_.read(
+ 0, &captured_stderr_.front() + old_length, kBufferSize,
+ &bytes_read);
+ captured_stderr_.resize(old_length + bytes_read);
+ } while (status_zx == ZX_OK);
+ if (status_zx == ZX_ERR_PEER_CLOSED) {
+ socket_closed = true;
+ } else {
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_ERR_SHOULD_WAIT);
+ status_zx = stderr_socket_.wait_async(
+ port, kSocketKey, ZX_SOCKET_READABLE | ZX_SOCKET_PEER_CLOSED, 0);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+ }
+ } else {
+ GTEST_DEATH_TEST_CHECK_(packet.signal.observed & ZX_SOCKET_PEER_CLOSED);
+ socket_closed = true;
+ }
+ }
+ } while (!process_terminated && !socket_closed);
+
+ ReadAndInterpretStatusByte();
+
+ zx_info_process_t buffer;
+ status_zx = child_process_.get_info(ZX_INFO_PROCESS, &buffer, sizeof(buffer),
+ nullptr, nullptr);
+ GTEST_DEATH_TEST_CHECK_(status_zx == ZX_OK);
+
+ GTEST_DEATH_TEST_CHECK_(buffer.flags & ZX_INFO_PROCESS_FLAG_EXITED);
+ set_status(static_cast<int>(buffer.return_code));
+ return status();
+}
+
+// The AssumeRole process for a Fuchsia death test. It creates a child
+// process with the same executable as the current process to run the
+// death test. The child process is given the --gtest_filter and
+// --gtest_internal_run_death_test flags such that it knows to run the
+// current death test only.
+DeathTest::TestRole FuchsiaDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != nullptr) {
+ // ParseInternalRunDeathTestFlag() has performed all the necessary
+ // processing.
+ set_write_fd(kFuchsiaReadPipeFd);
+ return EXECUTE_TEST;
+ }
+
+ // Flush the log buffers since the log streams are shared with the child.
+ FlushInfoLog();
+
+ // Build the child process command line.
+ const std::string filter_flag = std::string("--") + GTEST_FLAG_PREFIX_ +
+ kFilterFlag + "=" + info->test_suite_name() +
+ "." + info->name();
+ const std::string internal_flag =
+ std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "="
+ + file_ + "|"
+ + StreamableToString(line_) + "|"
+ + StreamableToString(death_test_index);
+ Arguments args;
+ args.AddArguments(GetInjectableArgvs());
+ args.AddArgument(filter_flag.c_str());
+ args.AddArgument(internal_flag.c_str());
+
+ // Build the pipe for communication with the child.
+ zx_status_t status;
+ zx_handle_t child_pipe_handle;
+ int child_pipe_fd;
+ status = fdio_pipe_half(&child_pipe_fd, &child_pipe_handle);
+ GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
+ set_read_fd(child_pipe_fd);
+
+ // Set the pipe handle for the child.
+ fdio_spawn_action_t spawn_actions[2] = {};
+ fdio_spawn_action_t* add_handle_action = &spawn_actions[0];
+ add_handle_action->action = FDIO_SPAWN_ACTION_ADD_HANDLE;
+ add_handle_action->h.id = PA_HND(PA_FD, kFuchsiaReadPipeFd);
+ add_handle_action->h.handle = child_pipe_handle;
+
+ // Create a socket pair will be used to receive the child process' stderr.
+ zx::socket stderr_producer_socket;
+ status =
+ zx::socket::create(0, &stderr_producer_socket, &stderr_socket_);
+ GTEST_DEATH_TEST_CHECK_(status >= 0);
+ int stderr_producer_fd = -1;
+ status =
+ fdio_fd_create(stderr_producer_socket.release(), &stderr_producer_fd);
+ GTEST_DEATH_TEST_CHECK_(status >= 0);
+
+ // Make the stderr socket nonblocking.
+ GTEST_DEATH_TEST_CHECK_(fcntl(stderr_producer_fd, F_SETFL, 0) == 0);
+
+ fdio_spawn_action_t* add_stderr_action = &spawn_actions[1];
+ add_stderr_action->action = FDIO_SPAWN_ACTION_CLONE_FD;
+ add_stderr_action->fd.local_fd = stderr_producer_fd;
+ add_stderr_action->fd.target_fd = STDERR_FILENO;
+
+ // Create a child job.
+ zx_handle_t child_job = ZX_HANDLE_INVALID;
+ status = zx_job_create(zx_job_default(), 0, & child_job);
+ GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
+ zx_policy_basic_t policy;
+ policy.condition = ZX_POL_NEW_ANY;
+ policy.policy = ZX_POL_ACTION_ALLOW;
+ status = zx_job_set_policy(
+ child_job, ZX_JOB_POL_RELATIVE, ZX_JOB_POL_BASIC, &policy, 1);
+ GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
+
+ // Create an exception channel attached to the |child_job|, to allow
+ // us to suppress the system default exception handler from firing.
+ status =
+ zx_task_create_exception_channel(
+ child_job, 0, exception_channel_.reset_and_get_address());
+ GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
+
+ // Spawn the child process.
+ status = fdio_spawn_etc(
+ child_job, FDIO_SPAWN_CLONE_ALL, args.Argv()[0], args.Argv(), nullptr,
+ 2, spawn_actions, child_process_.reset_and_get_address(), nullptr);
+ GTEST_DEATH_TEST_CHECK_(status == ZX_OK);
+
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+
+std::string FuchsiaDeathTest::GetErrorLogs() {
+ return captured_stderr_;
+}
+
+#else // We are neither on Windows, nor on Fuchsia.
+
+// ForkingDeathTest provides implementations for most of the abstract
+// methods of the DeathTest interface. Only the AssumeRole method is
+// left undefined.
+class ForkingDeathTest : public DeathTestImpl {
+ public:
+ ForkingDeathTest(const char* statement, Matcher<const std::string&> matcher);
+
+ // All of these virtual functions are inherited from DeathTest.
+ int Wait() override;
+
+ protected:
+ void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
+
+ private:
+ // PID of child process during death test; 0 in the child process itself.
+ pid_t child_pid_;
+};
+
+// Constructs a ForkingDeathTest.
+ForkingDeathTest::ForkingDeathTest(const char* a_statement,
+ Matcher<const std::string&> matcher)
+ : DeathTestImpl(a_statement, std::move(matcher)), child_pid_(-1) {}
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int ForkingDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ ReadAndInterpretStatusByte();
+
+ int status_value;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));
+ set_status(status_value);
+ return status_value;
+}
+
+// A concrete death test class that forks, then immediately runs the test
+// in the child process.
+class NoExecDeathTest : public ForkingDeathTest {
+ public:
+ NoExecDeathTest(const char* a_statement, Matcher<const std::string&> matcher)
+ : ForkingDeathTest(a_statement, std::move(matcher)) {}
+ TestRole AssumeRole() override;
+};
+
+// The AssumeRole process for a fork-and-run death test. It implements a
+// straightforward fork, with a simple pipe to transmit the status byte.
+DeathTest::TestRole NoExecDeathTest::AssumeRole() {
+ const size_t thread_count = GetThreadCount();
+ if (thread_count != 1) {
+ GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+
+ DeathTest::set_last_death_test_message("");
+ CaptureStderr();
+ // When we fork the process below, the log file buffers are copied, but the
+ // file descriptors are shared. We flush all log files here so that closing
+ // the file descriptors in the child process doesn't throw off the
+ // synchronization between descriptors and buffers in the parent process.
+ // This is as close to the fork as possible to avoid a race condition in case
+ // there are multiple threads running before the death test, and another
+ // thread writes to the log file.
+ FlushInfoLog();
+
+ const pid_t child_pid = fork();
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ set_child_pid(child_pid);
+ if (child_pid == 0) {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));
+ set_write_fd(pipe_fd[1]);
+ // Redirects all logging to stderr in the child process to prevent
+ // concurrent writes to the log files. We capture stderr in the parent
+ // process and append the child process' output to a log.
+ LogToStderr();
+ // Event forwarding to the listeners of event listener API mush be shut
+ // down in death test subprocesses.
+ GetUnitTestImpl()->listeners()->SuppressEventForwarding();
+ g_in_fast_death_test_child = true;
+ return EXECUTE_TEST;
+ } else {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+ }
+}
+
+// A concrete death test class that forks and re-executes the main
+// program from the beginning, with command-line flags set that cause
+// only this specific death test to be run.
+class ExecDeathTest : public ForkingDeathTest {
+ public:
+ ExecDeathTest(const char* a_statement, Matcher<const std::string&> matcher,
+ const char* file, int line)
+ : ForkingDeathTest(a_statement, std::move(matcher)),
+ file_(file),
+ line_(line) {}
+ TestRole AssumeRole() override;
+
+ private:
+ static ::std::vector<std::string> GetArgvsForDeathTestChildProcess() {
+ ::std::vector<std::string> args = GetInjectableArgvs();
+# if defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)
+ ::std::vector<std::string> extra_args =
+ GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_();
+ args.insert(args.end(), extra_args.begin(), extra_args.end());
+# endif // defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)
+ return args;
+ }
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+};
+
+// Utility class for accumulating command-line arguments.
+class Arguments {
+ public:
+ Arguments() { args_.push_back(nullptr); }
+
+ ~Arguments() {
+ for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
+ ++i) {
+ free(*i);
+ }
+ }
+ void AddArgument(const char* argument) {
+ args_.insert(args_.end() - 1, posix::StrDup(argument));
+ }
+
+ template <typename Str>
+ void AddArguments(const ::std::vector<Str>& arguments) {
+ for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
+ i != arguments.end();
+ ++i) {
+ args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
+ }
+ }
+ char* const* Argv() {
+ return &args_[0];
+ }
+
+ private:
+ std::vector<char*> args_;
+};
+
+// A struct that encompasses the arguments to the child process of a
+// threadsafe-style death test process.
+struct ExecDeathTestArgs {
+ char* const* argv; // Command-line arguments for the child's call to exec
+ int close_fd; // File descriptor to close; the read end of a pipe
+};
+
+# if GTEST_OS_QNX
+extern "C" char** environ;
+# else // GTEST_OS_QNX
+// The main function for a threadsafe-style death test child process.
+// This function is called in a clone()-ed process and thus must avoid
+// any potentially unsafe operations like malloc or libc functions.
+static int ExecDeathTestChildMain(void* child_arg) {
+ ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));
+
+ // We need to execute the test program in the same environment where
+ // it was originally invoked. Therefore we change to the original
+ // working directory first.
+ const char* const original_dir =
+ UnitTest::GetInstance()->original_working_dir();
+ // We can safely call chdir() as it's a direct system call.
+ if (chdir(original_dir) != 0) {
+ DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
+ GetLastErrnoDescription());
+ return EXIT_FAILURE;
+ }
+
+ // We can safely call execv() as it's almost a direct system call. We
+ // cannot use execvp() as it's a libc function and thus potentially
+ // unsafe. Since execv() doesn't search the PATH, the user must
+ // invoke the test program via a valid path that contains at least
+ // one path separator.
+ execv(args->argv[0], args->argv);
+ DeathTestAbort(std::string("execv(") + args->argv[0] + ", ...) in " +
+ original_dir + " failed: " +
+ GetLastErrnoDescription());
+ return EXIT_FAILURE;
+}
+# endif // GTEST_OS_QNX
+
+# if GTEST_HAS_CLONE
+// Two utility routines that together determine the direction the stack
+// grows.
+// This could be accomplished more elegantly by a single recursive
+// function, but we want to guard against the unlikely possibility of
+// a smart compiler optimizing the recursion away.
+//
+// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining
+// StackLowerThanAddress into StackGrowsDown, which then doesn't give
+// correct answer.
+static void StackLowerThanAddress(const void* ptr,
+ bool* result) GTEST_NO_INLINE_;
+// Make sure sanitizers do not tamper with the stack here.
+// Ideally, we want to use `__builtin_frame_address` instead of a local variable
+// address with sanitizer disabled, but it does not work when the
+// compiler optimizes the stack frame out, which happens on PowerPC targets.
+// HWAddressSanitizer add a random tag to the MSB of the local variable address,
+// making comparison result unpredictable.
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_
+static void StackLowerThanAddress(const void* ptr, bool* result) {
+ int dummy = 0;
+ *result = std::less<const void*>()(&dummy, ptr);
+}
+
+// Make sure AddressSanitizer does not tamper with the stack here.
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_
+static bool StackGrowsDown() {
+ int dummy = 0;
+ bool result;
+ StackLowerThanAddress(&dummy, &result);
+ return result;
+}
+# endif // GTEST_HAS_CLONE
+
+// Spawns a child process with the same executable as the current process in
+// a thread-safe manner and instructs it to run the death test. The
+// implementation uses fork(2) + exec. On systems where clone(2) is
+// available, it is used instead, being slightly more thread-safe. On QNX,
+// fork supports only single-threaded environments, so this function uses
+// spawn(2) there instead. The function dies with an error message if
+// anything goes wrong.
+static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) {
+ ExecDeathTestArgs args = { argv, close_fd };
+ pid_t child_pid = -1;
+
+# if GTEST_OS_QNX
+ // Obtains the current directory and sets it to be closed in the child
+ // process.
+ const int cwd_fd = open(".", O_RDONLY);
+ GTEST_DEATH_TEST_CHECK_(cwd_fd != -1);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC));
+ // We need to execute the test program in the same environment where
+ // it was originally invoked. Therefore we change to the original
+ // working directory first.
+ const char* const original_dir =
+ UnitTest::GetInstance()->original_working_dir();
+ // We can safely call chdir() as it's a direct system call.
+ if (chdir(original_dir) != 0) {
+ DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
+ GetLastErrnoDescription());
+ return EXIT_FAILURE;
+ }
+
+ int fd_flags;
+ // Set close_fd to be closed after spawn.
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD));
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD,
+ fd_flags | FD_CLOEXEC));
+ struct inheritance inherit = {0};
+ // spawn is a system call.
+ child_pid = spawn(args.argv[0], 0, nullptr, &inherit, args.argv, environ);
+ // Restores the current working directory.
+ GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd));
+
+# else // GTEST_OS_QNX
+# if GTEST_OS_LINUX
+ // When a SIGPROF signal is received while fork() or clone() are executing,
+ // the process may hang. To avoid this, we ignore SIGPROF here and re-enable
+ // it after the call to fork()/clone() is complete.
+ struct sigaction saved_sigprof_action;
+ struct sigaction ignore_sigprof_action;
+ memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action));
+ sigemptyset(&ignore_sigprof_action.sa_mask);
+ ignore_sigprof_action.sa_handler = SIG_IGN;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction(
+ SIGPROF, &ignore_sigprof_action, &saved_sigprof_action));
+# endif // GTEST_OS_LINUX
+
+# if GTEST_HAS_CLONE
+ const bool use_fork = GTEST_FLAG(death_test_use_fork);
+
+ if (!use_fork) {
+ static const bool stack_grows_down = StackGrowsDown();
+ const auto stack_size = static_cast<size_t>(getpagesize() * 2);
+ // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.
+ void* const stack = mmap(nullptr, stack_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+ GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
+
+ // Maximum stack alignment in bytes: For a downward-growing stack, this
+ // amount is subtracted from size of the stack space to get an address
+ // that is within the stack space and is aligned on all systems we care
+ // about. As far as I know there is no ABI with stack alignment greater
+ // than 64. We assume stack and stack_size already have alignment of
+ // kMaxStackAlignment.
+ const size_t kMaxStackAlignment = 64;
+ void* const stack_top =
+ static_cast<char*>(stack) +
+ (stack_grows_down ? stack_size - kMaxStackAlignment : 0);
+ GTEST_DEATH_TEST_CHECK_(
+ static_cast<size_t>(stack_size) > kMaxStackAlignment &&
+ reinterpret_cast<uintptr_t>(stack_top) % kMaxStackAlignment == 0);
+
+ child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);
+
+ GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
+ }
+# else
+ const bool use_fork = true;
+# endif // GTEST_HAS_CLONE
+
+ if (use_fork && (child_pid = fork()) == 0) {
+ ExecDeathTestChildMain(&args);
+ _exit(0);
+ }
+# endif // GTEST_OS_QNX
+# if GTEST_OS_LINUX
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(
+ sigaction(SIGPROF, &saved_sigprof_action, nullptr));
+# endif // GTEST_OS_LINUX
+
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ return child_pid;
+}
+
+// The AssumeRole process for a fork-and-exec death test. It re-executes the
+// main program from the beginning, setting the --gtest_filter
+// and --gtest_internal_run_death_test flags to cause only the current
+// death test to be re-run.
+DeathTest::TestRole ExecDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != nullptr) {
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+ // Clear the close-on-exec flag on the write end of the pipe, lest
+ // it be closed when the child process does an exec:
+ GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);
+
+ const std::string filter_flag = std::string("--") + GTEST_FLAG_PREFIX_ +
+ kFilterFlag + "=" + info->test_suite_name() +
+ "." + info->name();
+ const std::string internal_flag =
+ std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "="
+ + file_ + "|" + StreamableToString(line_) + "|"
+ + StreamableToString(death_test_index) + "|"
+ + StreamableToString(pipe_fd[1]);
+ Arguments args;
+ args.AddArguments(GetArgvsForDeathTestChildProcess());
+ args.AddArgument(filter_flag.c_str());
+ args.AddArgument(internal_flag.c_str());
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // See the comment in NoExecDeathTest::AssumeRole for why the next line
+ // is necessary.
+ FlushInfoLog();
+
+ const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_child_pid(child_pid);
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+
+# endif // !GTEST_OS_WINDOWS
+
+// Creates a concrete DeathTest-derived class that depends on the
+// --gtest_death_test_style flag, and sets the pointer pointed to
+// by the "test" argument to its address. If the test should be
+// skipped, sets that pointer to NULL. Returns true, unless the
+// flag is set to an invalid value.
+bool DefaultDeathTestFactory::Create(const char* statement,
+ Matcher<const std::string&> matcher,
+ const char* file, int line,
+ DeathTest** test) {
+ UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const int death_test_index = impl->current_test_info()
+ ->increment_death_test_count();
+
+ if (flag != nullptr) {
+ if (death_test_index > flag->index()) {
+ DeathTest::set_last_death_test_message(
+ "Death test count (" + StreamableToString(death_test_index)
+ + ") somehow exceeded expected maximum ("
+ + StreamableToString(flag->index()) + ")");
+ return false;
+ }
+
+ if (!(flag->file() == file && flag->line() == line &&
+ flag->index() == death_test_index)) {
+ *test = nullptr;
+ return true;
+ }
+ }
+
+# if GTEST_OS_WINDOWS
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe" ||
+ GTEST_FLAG(death_test_style) == "fast") {
+ *test = new WindowsDeathTest(statement, std::move(matcher), file, line);
+ }
+
+# elif GTEST_OS_FUCHSIA
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe" ||
+ GTEST_FLAG(death_test_style) == "fast") {
+ *test = new FuchsiaDeathTest(statement, std::move(matcher), file, line);
+ }
+
+# else
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe") {
+ *test = new ExecDeathTest(statement, std::move(matcher), file, line);
+ } else if (GTEST_FLAG(death_test_style) == "fast") {
+ *test = new NoExecDeathTest(statement, std::move(matcher));
+ }
+
+# endif // GTEST_OS_WINDOWS
+
+ else { // NOLINT - this is more readable than unbalanced brackets inside #if.
+ DeathTest::set_last_death_test_message(
+ "Unknown death test style \"" + GTEST_FLAG(death_test_style)
+ + "\" encountered");
+ return false;
+ }
+
+ return true;
+}
+
+# if GTEST_OS_WINDOWS
+// Recreates the pipe and event handles from the provided parameters,
+// signals the event, and returns a file descriptor wrapped around the pipe
+// handle. This function is called in the child process only.
+static int GetStatusFileDescriptor(unsigned int parent_process_id,
+ size_t write_handle_as_size_t,
+ size_t event_handle_as_size_t) {
+ AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,
+ FALSE, // Non-inheritable.
+ parent_process_id));
+ if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {
+ DeathTestAbort("Unable to open parent process " +
+ StreamableToString(parent_process_id));
+ }
+
+ GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));
+
+ const HANDLE write_handle =
+ reinterpret_cast<HANDLE>(write_handle_as_size_t);
+ HANDLE dup_write_handle;
+
+ // The newly initialized handle is accessible only in the parent
+ // process. To obtain one accessible within the child, we need to use
+ // DuplicateHandle.
+ if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,
+ ::GetCurrentProcess(), &dup_write_handle,
+ 0x0, // Requested privileges ignored since
+ // DUPLICATE_SAME_ACCESS is used.
+ FALSE, // Request non-inheritable handler.
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort("Unable to duplicate the pipe handle " +
+ StreamableToString(write_handle_as_size_t) +
+ " from the parent process " +
+ StreamableToString(parent_process_id));
+ }
+
+ const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);
+ HANDLE dup_event_handle;
+
+ if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,
+ ::GetCurrentProcess(), &dup_event_handle,
+ 0x0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort("Unable to duplicate the event handle " +
+ StreamableToString(event_handle_as_size_t) +
+ " from the parent process " +
+ StreamableToString(parent_process_id));
+ }
+
+ const int write_fd =
+ ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);
+ if (write_fd == -1) {
+ DeathTestAbort("Unable to convert pipe handle " +
+ StreamableToString(write_handle_as_size_t) +
+ " to a file descriptor");
+ }
+
+ // Signals the parent that the write end of the pipe has been acquired
+ // so the parent can release its own write end.
+ ::SetEvent(dup_event_handle);
+
+ return write_fd;
+}
+# endif // GTEST_OS_WINDOWS
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
+ if (GTEST_FLAG(internal_run_death_test) == "") return nullptr;
+
+ // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
+ // can use it here.
+ int line = -1;
+ int index = -1;
+ ::std::vector< ::std::string> fields;
+ SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);
+ int write_fd = -1;
+
+# if GTEST_OS_WINDOWS
+
+ unsigned int parent_process_id = 0;
+ size_t write_handle_as_size_t = 0;
+ size_t event_handle_as_size_t = 0;
+
+ if (fields.size() != 6
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &parent_process_id)
+ || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)
+ || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {
+ DeathTestAbort("Bad --gtest_internal_run_death_test flag: " +
+ GTEST_FLAG(internal_run_death_test));
+ }
+ write_fd = GetStatusFileDescriptor(parent_process_id,
+ write_handle_as_size_t,
+ event_handle_as_size_t);
+
+# elif GTEST_OS_FUCHSIA
+
+ if (fields.size() != 3
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)) {
+ DeathTestAbort("Bad --gtest_internal_run_death_test flag: "
+ + GTEST_FLAG(internal_run_death_test));
+ }
+
+# else
+
+ if (fields.size() != 4
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &write_fd)) {
+ DeathTestAbort("Bad --gtest_internal_run_death_test flag: "
+ + GTEST_FLAG(internal_run_death_test));
+ }
+
+# endif // GTEST_OS_WINDOWS
+
+ return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);
+}
+
+} // namespace internal
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include <stdlib.h>
+
+#if GTEST_OS_WINDOWS_MOBILE
+# include <windows.h>
+#elif GTEST_OS_WINDOWS
+# include <direct.h>
+# include <io.h>
+#else
+# include <limits.h>
+# include <climits> // Some Linux distributions define PATH_MAX here.
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_MAX_ _MAX_PATH
+#elif defined(PATH_MAX)
+# define GTEST_PATH_MAX_ PATH_MAX
+#elif defined(_XOPEN_PATH_MAX)
+# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX
+#else
+# define GTEST_PATH_MAX_ _POSIX_PATH_MAX
+#endif // GTEST_OS_WINDOWS
+
+namespace testing {
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+// On Windows, '\\' is the standard path separator, but many tools and the
+// Windows API also accept '/' as an alternate path separator. Unless otherwise
+// noted, a file path can contain either kind of path separators, or a mixture
+// of them.
+const char kPathSeparator = '\\';
+const char kAlternatePathSeparator = '/';
+const char kAlternatePathSeparatorString[] = "/";
+# if GTEST_OS_WINDOWS_MOBILE
+// Windows CE doesn't have a current directory. You should not use
+// the current directory in tests on Windows CE, but this at least
+// provides a reasonable fallback.
+const char kCurrentDirectoryString[] = "\\";
+// Windows CE doesn't define INVALID_FILE_ATTRIBUTES
+const DWORD kInvalidFileAttributes = 0xffffffff;
+# else
+const char kCurrentDirectoryString[] = ".\\";
+# endif // GTEST_OS_WINDOWS_MOBILE
+#else
+const char kPathSeparator = '/';
+const char kCurrentDirectoryString[] = "./";
+#endif // GTEST_OS_WINDOWS
+
+// Returns whether the given character is a valid path separator.
+static bool IsPathSeparator(char c) {
+#if GTEST_HAS_ALT_PATH_SEP_
+ return (c == kPathSeparator) || (c == kAlternatePathSeparator);
+#else
+ return c == kPathSeparator;
+#endif
+}
+
+// Returns the current working directory, or "" if unsuccessful.
+FilePath FilePath::GetCurrentDir() {
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || \
+ GTEST_OS_WINDOWS_RT || GTEST_OS_ESP8266 || GTEST_OS_ESP32 || \
+ GTEST_OS_XTENSA
+ // These platforms do not have a current directory, so we just return
+ // something reasonable.
+ return FilePath(kCurrentDirectoryString);
+#elif GTEST_OS_WINDOWS
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ return FilePath(_getcwd(cwd, sizeof(cwd)) == nullptr ? "" : cwd);
+#else
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ char* result = getcwd(cwd, sizeof(cwd));
+# if GTEST_OS_NACL
+ // getcwd will likely fail in NaCl due to the sandbox, so return something
+ // reasonable. The user may have provided a shim implementation for getcwd,
+ // however, so fallback only when failure is detected.
+ return FilePath(result == nullptr ? kCurrentDirectoryString : cwd);
+# endif // GTEST_OS_NACL
+ return FilePath(result == nullptr ? "" : cwd);
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns a copy of the FilePath with the case-insensitive extension removed.
+// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+// FilePath("dir/file"). If a case-insensitive extension is not
+// found, returns a copy of the original FilePath.
+FilePath FilePath::RemoveExtension(const char* extension) const {
+ const std::string dot_extension = std::string(".") + extension;
+ if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) {
+ return FilePath(pathname_.substr(
+ 0, pathname_.length() - dot_extension.length()));
+ }
+ return *this;
+}
+
+// Returns a pointer to the last occurrence of a valid path separator in
+// the FilePath. On Windows, for example, both '/' and '\' are valid path
+// separators. Returns NULL if no path separator was found.
+const char* FilePath::FindLastPathSeparator() const {
+ const char* const last_sep = strrchr(c_str(), kPathSeparator);
+#if GTEST_HAS_ALT_PATH_SEP_
+ const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);
+ // Comparing two pointers of which only one is NULL is undefined.
+ if (last_alt_sep != nullptr &&
+ (last_sep == nullptr || last_alt_sep > last_sep)) {
+ return last_alt_sep;
+ }
+#endif
+ return last_sep;
+}
+
+// Returns a copy of the FilePath with the directory part removed.
+// Example: FilePath("path/to/file").RemoveDirectoryName() returns
+// FilePath("file"). If there is no directory part ("just_a_file"), it returns
+// the FilePath unmodified. If there is no file part ("just_a_dir/") it
+// returns an empty FilePath ("").
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveDirectoryName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ return last_sep ? FilePath(last_sep + 1) : *this;
+}
+
+// RemoveFileName returns the directory path with the filename removed.
+// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+// If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveFileName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ std::string dir;
+ if (last_sep) {
+ dir = std::string(c_str(), static_cast<size_t>(last_sep + 1 - c_str()));
+ } else {
+ dir = kCurrentDirectoryString;
+ }
+ return FilePath(dir);
+}
+
+// Helper functions for naming files in a directory for xml output.
+
+// Given directory = "dir", base_name = "test", number = 0,
+// extension = "xml", returns "dir/test.xml". If number is greater
+// than zero (e.g., 12), returns "dir/test_12.xml".
+// On Windows platform, uses \ as the separator rather than /.
+FilePath FilePath::MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension) {
+ std::string file;
+ if (number == 0) {
+ file = base_name.string() + "." + extension;
+ } else {
+ file = base_name.string() + "_" + StreamableToString(number)
+ + "." + extension;
+ }
+ return ConcatPaths(directory, FilePath(file));
+}
+
+// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml".
+// On Windows, uses \ as the separator rather than /.
+FilePath FilePath::ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path) {
+ if (directory.IsEmpty())
+ return relative_path;
+ const FilePath dir(directory.RemoveTrailingPathSeparator());
+ return FilePath(dir.string() + kPathSeparator + relative_path.string());
+}
+
+// Returns true if pathname describes something findable in the file-system,
+// either a file, directory, or whatever.
+bool FilePath::FileOrDirectoryExists() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ return attributes != kInvalidFileAttributes;
+#else
+ posix::StatStruct file_stat{};
+ return posix::Stat(pathname_.c_str(), &file_stat) == 0;
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns true if pathname describes a directory in the file-system
+// that exists.
+bool FilePath::DirectoryExists() const {
+ bool result = false;
+#if GTEST_OS_WINDOWS
+ // Don't strip off trailing separator if path is a root directory on
+ // Windows (like "C:\\").
+ const FilePath& path(IsRootDirectory() ? *this :
+ RemoveTrailingPathSeparator());
+#else
+ const FilePath& path(*this);
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ if ((attributes != kInvalidFileAttributes) &&
+ (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ result = true;
+ }
+#else
+ posix::StatStruct file_stat{};
+ result = posix::Stat(path.c_str(), &file_stat) == 0 &&
+ posix::IsDir(file_stat);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ return result;
+}
+
+// Returns true if pathname describes a root directory. (Windows has one
+// root directory per disk drive.)
+bool FilePath::IsRootDirectory() const {
+#if GTEST_OS_WINDOWS
+ return pathname_.length() == 3 && IsAbsolutePath();
+#else
+ return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);
+#endif
+}
+
+// Returns true if pathname describes an absolute path.
+bool FilePath::IsAbsolutePath() const {
+ const char* const name = pathname_.c_str();
+#if GTEST_OS_WINDOWS
+ return pathname_.length() >= 3 &&
+ ((name[0] >= 'a' && name[0] <= 'z') ||
+ (name[0] >= 'A' && name[0] <= 'Z')) &&
+ name[1] == ':' &&
+ IsPathSeparator(name[2]);
+#else
+ return IsPathSeparator(name[0]);
+#endif
+}
+
+// Returns a pathname for a file that does not currently exist. The pathname
+// will be directory/base_name.extension or
+// directory/base_name_<number>.extension if directory/base_name.extension
+// already exists. The number will be incremented until a pathname is found
+// that does not already exist.
+// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+// There could be a race condition if two or more processes are calling this
+// function at the same time -- they could both pick the same filename.
+FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension) {
+ FilePath full_pathname;
+ int number = 0;
+ do {
+ full_pathname.Set(MakeFileName(directory, base_name, number++, extension));
+ } while (full_pathname.FileOrDirectoryExists());
+ return full_pathname;
+}
+
+// Returns true if FilePath ends with a path separator, which indicates that
+// it is intended to represent a directory. Returns false otherwise.
+// This does NOT check that a directory (or file) actually exists.
+bool FilePath::IsDirectory() const {
+ return !pathname_.empty() &&
+ IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);
+}
+
+// Create directories so that path exists. Returns true if successful or if
+// the directories already exist; returns false if unable to create directories
+// for any reason.
+bool FilePath::CreateDirectoriesRecursively() const {
+ if (!this->IsDirectory()) {
+ return false;
+ }
+
+ if (pathname_.length() == 0 || this->DirectoryExists()) {
+ return true;
+ }
+
+ const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());
+ return parent.CreateDirectoriesRecursively() && this->CreateFolder();
+}
+
+// Create the directory so that path exists. Returns true if successful or
+// if the directory already exists; returns false if unable to create the
+// directory for any reason, including if the parent directory does not
+// exist. Not named "CreateDirectory" because that's a macro on Windows.
+bool FilePath::CreateFolder() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ FilePath removed_sep(this->RemoveTrailingPathSeparator());
+ LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
+ int result = CreateDirectory(unicode, nullptr) ? 0 : -1;
+ delete [] unicode;
+#elif GTEST_OS_WINDOWS
+ int result = _mkdir(pathname_.c_str());
+#elif GTEST_OS_ESP8266 || GTEST_OS_XTENSA
+ // do nothing
+ int result = 0;
+#else
+ int result = mkdir(pathname_.c_str(), 0777);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ if (result == -1) {
+ return this->DirectoryExists(); // An error is OK if the directory exists.
+ }
+ return true; // No error.
+}
+
+// If input name has a trailing separator character, remove it and return the
+// name, otherwise return the name string unmodified.
+// On Windows platform, uses \ as the separator, other platforms use /.
+FilePath FilePath::RemoveTrailingPathSeparator() const {
+ return IsDirectory()
+ ? FilePath(pathname_.substr(0, pathname_.length() - 1))
+ : *this;
+}
+
+// Removes any redundant separators that might be in the pathname.
+// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+// redundancies that might be in a pathname involving "." or "..".
+void FilePath::Normalize() {
+ auto out = pathname_.begin();
+
+ for (const char character : pathname_) {
+ if (!IsPathSeparator(character)) {
+ *(out++) = character;
+ } else if (out == pathname_.begin() || *std::prev(out) != kPathSeparator) {
+ *(out++) = kPathSeparator;
+ } else {
+ continue;
+ }
+ }
+
+ pathname_.erase(out, pathname_.end());
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This file implements just enough of the matcher interface to allow
+// EXPECT_DEATH and friends to accept a matcher argument.
+
+
+#include <string>
+
+namespace testing {
+
+// Constructs a matcher that matches a const std::string& whose value is
+// equal to s.
+Matcher<const std::string&>::Matcher(const std::string& s) { *this = Eq(s); }
+
+// Constructs a matcher that matches a const std::string& whose value is
+// equal to s.
+Matcher<const std::string&>::Matcher(const char* s) {
+ *this = Eq(std::string(s));
+}
+
+// Constructs a matcher that matches a std::string whose value is equal to
+// s.
+Matcher<std::string>::Matcher(const std::string& s) { *this = Eq(s); }
+
+// Constructs a matcher that matches a std::string whose value is equal to
+// s.
+Matcher<std::string>::Matcher(const char* s) { *this = Eq(std::string(s)); }
+
+#if GTEST_INTERNAL_HAS_STRING_VIEW
+// Constructs a matcher that matches a const StringView& whose value is
+// equal to s.
+Matcher<const internal::StringView&>::Matcher(const std::string& s) {
+ *this = Eq(s);
+}
+
+// Constructs a matcher that matches a const StringView& whose value is
+// equal to s.
+Matcher<const internal::StringView&>::Matcher(const char* s) {
+ *this = Eq(std::string(s));
+}
+
+// Constructs a matcher that matches a const StringView& whose value is
+// equal to s.
+Matcher<const internal::StringView&>::Matcher(internal::StringView s) {
+ *this = Eq(std::string(s));
+}
+
+// Constructs a matcher that matches a StringView whose value is equal to
+// s.
+Matcher<internal::StringView>::Matcher(const std::string& s) { *this = Eq(s); }
+
+// Constructs a matcher that matches a StringView whose value is equal to
+// s.
+Matcher<internal::StringView>::Matcher(const char* s) {
+ *this = Eq(std::string(s));
+}
+
+// Constructs a matcher that matches a StringView whose value is equal to
+// s.
+Matcher<internal::StringView>::Matcher(internal::StringView s) {
+ *this = Eq(std::string(s));
+}
+#endif // GTEST_INTERNAL_HAS_STRING_VIEW
+
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <cstdint>
+#include <fstream>
+#include <memory>
+
+#if GTEST_OS_WINDOWS
+# include <windows.h>
+# include <io.h>
+# include <sys/stat.h>
+# include <map> // Used in ThreadLocal.
+# ifdef _MSC_VER
+# include <crtdbg.h>
+# endif // _MSC_VER
+#else
+# include <unistd.h>
+#endif // GTEST_OS_WINDOWS
+
+#if GTEST_OS_MAC
+# include <mach/mach_init.h>
+# include <mach/task.h>
+# include <mach/vm_map.h>
+#endif // GTEST_OS_MAC
+
+#if GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \
+ GTEST_OS_NETBSD || GTEST_OS_OPENBSD
+# include <sys/sysctl.h>
+# if GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD
+# include <sys/user.h>
+# endif
+#endif
+
+#if GTEST_OS_QNX
+# include <devctl.h>
+# include <fcntl.h>
+# include <sys/procfs.h>
+#endif // GTEST_OS_QNX
+
+#if GTEST_OS_AIX
+# include <procinfo.h>
+# include <sys/types.h>
+#endif // GTEST_OS_AIX
+
+#if GTEST_OS_FUCHSIA
+# include <zircon/process.h>
+# include <zircon/syscalls.h>
+#endif // GTEST_OS_FUCHSIA
+
+
+namespace testing {
+namespace internal {
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC and C++Builder do not provide a definition of STDERR_FILENO.
+const int kStdOutFileno = 1;
+const int kStdErrFileno = 2;
+#else
+const int kStdOutFileno = STDOUT_FILENO;
+const int kStdErrFileno = STDERR_FILENO;
+#endif // _MSC_VER
+
+#if GTEST_OS_LINUX
+
+namespace {
+template <typename T>
+T ReadProcFileField(const std::string& filename, int field) {
+ std::string dummy;
+ std::ifstream file(filename.c_str());
+ while (field-- > 0) {
+ file >> dummy;
+ }
+ T output = 0;
+ file >> output;
+ return output;
+}
+} // namespace
+
+// Returns the number of active threads, or 0 when there is an error.
+size_t GetThreadCount() {
+ const std::string filename =
+ (Message() << "/proc/" << getpid() << "/stat").GetString();
+ return ReadProcFileField<size_t>(filename, 19);
+}
+
+#elif GTEST_OS_MAC
+
+size_t GetThreadCount() {
+ const task_t task = mach_task_self();
+ mach_msg_type_number_t thread_count;
+ thread_act_array_t thread_list;
+ const kern_return_t status = task_threads(task, &thread_list, &thread_count);
+ if (status == KERN_SUCCESS) {
+ // task_threads allocates resources in thread_list and we need to free them
+ // to avoid leaks.
+ vm_deallocate(task,
+ reinterpret_cast<vm_address_t>(thread_list),
+ sizeof(thread_t) * thread_count);
+ return static_cast<size_t>(thread_count);
+ } else {
+ return 0;
+ }
+}
+
+#elif GTEST_OS_DRAGONFLY || GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD || \
+ GTEST_OS_NETBSD
+
+#if GTEST_OS_NETBSD
+#undef KERN_PROC
+#define KERN_PROC KERN_PROC2
+#define kinfo_proc kinfo_proc2
+#endif
+
+#if GTEST_OS_DRAGONFLY
+#define KP_NLWP(kp) (kp.kp_nthreads)
+#elif GTEST_OS_FREEBSD || GTEST_OS_GNU_KFREEBSD
+#define KP_NLWP(kp) (kp.ki_numthreads)
+#elif GTEST_OS_NETBSD
+#define KP_NLWP(kp) (kp.p_nlwps)
+#endif
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+ int mib[] = {
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_PID,
+ getpid(),
+#if GTEST_OS_NETBSD
+ sizeof(struct kinfo_proc),
+ 1,
+#endif
+ };
+ u_int miblen = sizeof(mib) / sizeof(mib[0]);
+ struct kinfo_proc info;
+ size_t size = sizeof(info);
+ if (sysctl(mib, miblen, &info, &size, NULL, 0)) {
+ return 0;
+ }
+ return static_cast<size_t>(KP_NLWP(info));
+}
+#elif GTEST_OS_OPENBSD
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+ int mib[] = {
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_PID | KERN_PROC_SHOW_THREADS,
+ getpid(),
+ sizeof(struct kinfo_proc),
+ 0,
+ };
+ u_int miblen = sizeof(mib) / sizeof(mib[0]);
+
+ // get number of structs
+ size_t size;
+ if (sysctl(mib, miblen, NULL, &size, NULL, 0)) {
+ return 0;
+ }
+
+ mib[5] = static_cast<int>(size / static_cast<size_t>(mib[4]));
+
+ // populate array of structs
+ struct kinfo_proc info[mib[5]];
+ if (sysctl(mib, miblen, &info, &size, NULL, 0)) {
+ return 0;
+ }
+
+ // exclude empty members
+ size_t nthreads = 0;
+ for (size_t i = 0; i < size / static_cast<size_t>(mib[4]); i++) {
+ if (info[i].p_tid != -1)
+ nthreads++;
+ }
+ return nthreads;
+}
+
+#elif GTEST_OS_QNX
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+ const int fd = open("/proc/self/as", O_RDONLY);
+ if (fd < 0) {
+ return 0;
+ }
+ procfs_info process_info;
+ const int status =
+ devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), nullptr);
+ close(fd);
+ if (status == EOK) {
+ return static_cast<size_t>(process_info.num_threads);
+ } else {
+ return 0;
+ }
+}
+
+#elif GTEST_OS_AIX
+
+size_t GetThreadCount() {
+ struct procentry64 entry;
+ pid_t pid = getpid();
+ int status = getprocs64(&entry, sizeof(entry), nullptr, 0, &pid, 1);
+ if (status == 1) {
+ return entry.pi_thcount;
+ } else {
+ return 0;
+ }
+}
+
+#elif GTEST_OS_FUCHSIA
+
+size_t GetThreadCount() {
+ int dummy_buffer;
+ size_t avail;
+ zx_status_t status = zx_object_get_info(
+ zx_process_self(),
+ ZX_INFO_PROCESS_THREADS,
+ &dummy_buffer,
+ 0,
+ nullptr,
+ &avail);
+ if (status == ZX_OK) {
+ return avail;
+ } else {
+ return 0;
+ }
+}
+
+#else
+
+size_t GetThreadCount() {
+ // There's no portable way to detect the number of threads, so we just
+ // return 0 to indicate that we cannot detect it.
+ return 0;
+}
+
+#endif // GTEST_OS_LINUX
+
+#if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
+
+void SleepMilliseconds(int n) {
+ ::Sleep(static_cast<DWORD>(n));
+}
+
+AutoHandle::AutoHandle()
+ : handle_(INVALID_HANDLE_VALUE) {}
+
+AutoHandle::AutoHandle(Handle handle)
+ : handle_(handle) {}
+
+AutoHandle::~AutoHandle() {
+ Reset();
+}
+
+AutoHandle::Handle AutoHandle::Get() const {
+ return handle_;
+}
+
+void AutoHandle::Reset() {
+ Reset(INVALID_HANDLE_VALUE);
+}
+
+void AutoHandle::Reset(HANDLE handle) {
+ // Resetting with the same handle we already own is invalid.
+ if (handle_ != handle) {
+ if (IsCloseable()) {
+ ::CloseHandle(handle_);
+ }
+ handle_ = handle;
+ } else {
+ GTEST_CHECK_(!IsCloseable())
+ << "Resetting a valid handle to itself is likely a programmer error "
+ "and thus not allowed.";
+ }
+}
+
+bool AutoHandle::IsCloseable() const {
+ // Different Windows APIs may use either of these values to represent an
+ // invalid handle.
+ return handle_ != nullptr && handle_ != INVALID_HANDLE_VALUE;
+}
+
+Notification::Notification()
+ : event_(::CreateEvent(nullptr, // Default security attributes.
+ TRUE, // Do not reset automatically.
+ FALSE, // Initially unset.
+ nullptr)) { // Anonymous event.
+ GTEST_CHECK_(event_.Get() != nullptr);
+}
+
+void Notification::Notify() {
+ GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE);
+}
+
+void Notification::WaitForNotification() {
+ GTEST_CHECK_(
+ ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0);
+}
+
+Mutex::Mutex()
+ : owner_thread_id_(0),
+ type_(kDynamic),
+ critical_section_init_phase_(0),
+ critical_section_(new CRITICAL_SECTION) {
+ ::InitializeCriticalSection(critical_section_);
+}
+
+Mutex::~Mutex() {
+ // Static mutexes are leaked intentionally. It is not thread-safe to try
+ // to clean them up.
+ if (type_ == kDynamic) {
+ ::DeleteCriticalSection(critical_section_);
+ delete critical_section_;
+ critical_section_ = nullptr;
+ }
+}
+
+void Mutex::Lock() {
+ ThreadSafeLazyInit();
+ ::EnterCriticalSection(critical_section_);
+ owner_thread_id_ = ::GetCurrentThreadId();
+}
+
+void Mutex::Unlock() {
+ ThreadSafeLazyInit();
+ // We don't protect writing to owner_thread_id_ here, as it's the
+ // caller's responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ owner_thread_id_ = 0;
+ ::LeaveCriticalSection(critical_section_);
+}
+
+// Does nothing if the current thread holds the mutex. Otherwise, crashes
+// with high probability.
+void Mutex::AssertHeld() {
+ ThreadSafeLazyInit();
+ GTEST_CHECK_(owner_thread_id_ == ::GetCurrentThreadId())
+ << "The current thread is not holding the mutex @" << this;
+}
+
+namespace {
+
+#ifdef _MSC_VER
+// Use the RAII idiom to flag mem allocs that are intentionally never
+// deallocated. The motivation is to silence the false positive mem leaks
+// that are reported by the debug version of MS's CRT which can only detect
+// if an alloc is missing a matching deallocation.
+// Example:
+// MemoryIsNotDeallocated memory_is_not_deallocated;
+// critical_section_ = new CRITICAL_SECTION;
+//
+class MemoryIsNotDeallocated
+{
+ public:
+ MemoryIsNotDeallocated() : old_crtdbg_flag_(0) {
+ old_crtdbg_flag_ = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG);
+ // Set heap allocation block type to _IGNORE_BLOCK so that MS debug CRT
+ // doesn't report mem leak if there's no matching deallocation.
+ _CrtSetDbgFlag(old_crtdbg_flag_ & ~_CRTDBG_ALLOC_MEM_DF);
+ }
+
+ ~MemoryIsNotDeallocated() {
+ // Restore the original _CRTDBG_ALLOC_MEM_DF flag
+ _CrtSetDbgFlag(old_crtdbg_flag_);
+ }
+
+ private:
+ int old_crtdbg_flag_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MemoryIsNotDeallocated);
+};
+#endif // _MSC_VER
+
+} // namespace
+
+// Initializes owner_thread_id_ and critical_section_ in static mutexes.
+void Mutex::ThreadSafeLazyInit() {
+ // Dynamic mutexes are initialized in the constructor.
+ if (type_ == kStatic) {
+ switch (
+ ::InterlockedCompareExchange(&critical_section_init_phase_, 1L, 0L)) {
+ case 0:
+ // If critical_section_init_phase_ was 0 before the exchange, we
+ // are the first to test it and need to perform the initialization.
+ owner_thread_id_ = 0;
+ {
+ // Use RAII to flag that following mem alloc is never deallocated.
+#ifdef _MSC_VER
+ MemoryIsNotDeallocated memory_is_not_deallocated;
+#endif // _MSC_VER
+ critical_section_ = new CRITICAL_SECTION;
+ }
+ ::InitializeCriticalSection(critical_section_);
+ // Updates the critical_section_init_phase_ to 2 to signal
+ // initialization complete.
+ GTEST_CHECK_(::InterlockedCompareExchange(
+ &critical_section_init_phase_, 2L, 1L) ==
+ 1L);
+ break;
+ case 1:
+ // Somebody else is already initializing the mutex; spin until they
+ // are done.
+ while (::InterlockedCompareExchange(&critical_section_init_phase_,
+ 2L,
+ 2L) != 2L) {
+ // Possibly yields the rest of the thread's time slice to other
+ // threads.
+ ::Sleep(0);
+ }
+ break;
+
+ case 2:
+ break; // The mutex is already initialized and ready for use.
+
+ default:
+ GTEST_CHECK_(false)
+ << "Unexpected value of critical_section_init_phase_ "
+ << "while initializing a static mutex.";
+ }
+ }
+}
+
+namespace {
+
+class ThreadWithParamSupport : public ThreadWithParamBase {
+ public:
+ static HANDLE CreateThread(Runnable* runnable,
+ Notification* thread_can_start) {
+ ThreadMainParam* param = new ThreadMainParam(runnable, thread_can_start);
+ DWORD thread_id;
+ HANDLE thread_handle = ::CreateThread(
+ nullptr, // Default security.
+ 0, // Default stack size.
+ &ThreadWithParamSupport::ThreadMain,
+ param, // Parameter to ThreadMainStatic
+ 0x0, // Default creation flags.
+ &thread_id); // Need a valid pointer for the call to work under Win98.
+ GTEST_CHECK_(thread_handle != nullptr)
+ << "CreateThread failed with error " << ::GetLastError() << ".";
+ if (thread_handle == nullptr) {
+ delete param;
+ }
+ return thread_handle;
+ }
+
+ private:
+ struct ThreadMainParam {
+ ThreadMainParam(Runnable* runnable, Notification* thread_can_start)
+ : runnable_(runnable),
+ thread_can_start_(thread_can_start) {
+ }
+ std::unique_ptr<Runnable> runnable_;
+ // Does not own.
+ Notification* thread_can_start_;
+ };
+
+ static DWORD WINAPI ThreadMain(void* ptr) {
+ // Transfers ownership.
+ std::unique_ptr<ThreadMainParam> param(static_cast<ThreadMainParam*>(ptr));
+ if (param->thread_can_start_ != nullptr)
+ param->thread_can_start_->WaitForNotification();
+ param->runnable_->Run();
+ return 0;
+ }
+
+ // Prohibit instantiation.
+ ThreadWithParamSupport();
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport);
+};
+
+} // namespace
+
+ThreadWithParamBase::ThreadWithParamBase(Runnable *runnable,
+ Notification* thread_can_start)
+ : thread_(ThreadWithParamSupport::CreateThread(runnable,
+ thread_can_start)) {
+}
+
+ThreadWithParamBase::~ThreadWithParamBase() {
+ Join();
+}
+
+void ThreadWithParamBase::Join() {
+ GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0)
+ << "Failed to join the thread with error " << ::GetLastError() << ".";
+}
+
+// Maps a thread to a set of ThreadIdToThreadLocals that have values
+// instantiated on that thread and notifies them when the thread exits. A
+// ThreadLocal instance is expected to persist until all threads it has
+// values on have terminated.
+class ThreadLocalRegistryImpl {
+ public:
+ // Registers thread_local_instance as having value on the current thread.
+ // Returns a value that can be used to identify the thread from other threads.
+ static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
+ const ThreadLocalBase* thread_local_instance) {
+#ifdef _MSC_VER
+ MemoryIsNotDeallocated memory_is_not_deallocated;
+#endif // _MSC_VER
+ DWORD current_thread = ::GetCurrentThreadId();
+ MutexLock lock(&mutex_);
+ ThreadIdToThreadLocals* const thread_to_thread_locals =
+ GetThreadLocalsMapLocked();
+ ThreadIdToThreadLocals::iterator thread_local_pos =
+ thread_to_thread_locals->find(current_thread);
+ if (thread_local_pos == thread_to_thread_locals->end()) {
+ thread_local_pos = thread_to_thread_locals->insert(
+ std::make_pair(current_thread, ThreadLocalValues())).first;
+ StartWatcherThreadFor(current_thread);
+ }
+ ThreadLocalValues& thread_local_values = thread_local_pos->second;
+ ThreadLocalValues::iterator value_pos =
+ thread_local_values.find(thread_local_instance);
+ if (value_pos == thread_local_values.end()) {
+ value_pos =
+ thread_local_values
+ .insert(std::make_pair(
+ thread_local_instance,
+ std::shared_ptr<ThreadLocalValueHolderBase>(
+ thread_local_instance->NewValueForCurrentThread())))
+ .first;
+ }
+ return value_pos->second.get();
+ }
+
+ static void OnThreadLocalDestroyed(
+ const ThreadLocalBase* thread_local_instance) {
+ std::vector<std::shared_ptr<ThreadLocalValueHolderBase> > value_holders;
+ // Clean up the ThreadLocalValues data structure while holding the lock, but
+ // defer the destruction of the ThreadLocalValueHolderBases.
+ {
+ MutexLock lock(&mutex_);
+ ThreadIdToThreadLocals* const thread_to_thread_locals =
+ GetThreadLocalsMapLocked();
+ for (ThreadIdToThreadLocals::iterator it =
+ thread_to_thread_locals->begin();
+ it != thread_to_thread_locals->end();
+ ++it) {
+ ThreadLocalValues& thread_local_values = it->second;
+ ThreadLocalValues::iterator value_pos =
+ thread_local_values.find(thread_local_instance);
+ if (value_pos != thread_local_values.end()) {
+ value_holders.push_back(value_pos->second);
+ thread_local_values.erase(value_pos);
+ // This 'if' can only be successful at most once, so theoretically we
+ // could break out of the loop here, but we don't bother doing so.
+ }
+ }
+ }
+ // Outside the lock, let the destructor for 'value_holders' deallocate the
+ // ThreadLocalValueHolderBases.
+ }
+
+ static void OnThreadExit(DWORD thread_id) {
+ GTEST_CHECK_(thread_id != 0) << ::GetLastError();
+ std::vector<std::shared_ptr<ThreadLocalValueHolderBase> > value_holders;
+ // Clean up the ThreadIdToThreadLocals data structure while holding the
+ // lock, but defer the destruction of the ThreadLocalValueHolderBases.
+ {
+ MutexLock lock(&mutex_);
+ ThreadIdToThreadLocals* const thread_to_thread_locals =
+ GetThreadLocalsMapLocked();
+ ThreadIdToThreadLocals::iterator thread_local_pos =
+ thread_to_thread_locals->find(thread_id);
+ if (thread_local_pos != thread_to_thread_locals->end()) {
+ ThreadLocalValues& thread_local_values = thread_local_pos->second;
+ for (ThreadLocalValues::iterator value_pos =
+ thread_local_values.begin();
+ value_pos != thread_local_values.end();
+ ++value_pos) {
+ value_holders.push_back(value_pos->second);
+ }
+ thread_to_thread_locals->erase(thread_local_pos);
+ }
+ }
+ // Outside the lock, let the destructor for 'value_holders' deallocate the
+ // ThreadLocalValueHolderBases.
+ }
+
+ private:
+ // In a particular thread, maps a ThreadLocal object to its value.
+ typedef std::map<const ThreadLocalBase*,
+ std::shared_ptr<ThreadLocalValueHolderBase> >
+ ThreadLocalValues;
+ // Stores all ThreadIdToThreadLocals having values in a thread, indexed by
+ // thread's ID.
+ typedef std::map<DWORD, ThreadLocalValues> ThreadIdToThreadLocals;
+
+ // Holds the thread id and thread handle that we pass from
+ // StartWatcherThreadFor to WatcherThreadFunc.
+ typedef std::pair<DWORD, HANDLE> ThreadIdAndHandle;
+
+ static void StartWatcherThreadFor(DWORD thread_id) {
+ // The returned handle will be kept in thread_map and closed by
+ // watcher_thread in WatcherThreadFunc.
+ HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
+ FALSE,
+ thread_id);
+ GTEST_CHECK_(thread != nullptr);
+ // We need to pass a valid thread ID pointer into CreateThread for it
+ // to work correctly under Win98.
+ DWORD watcher_thread_id;
+ HANDLE watcher_thread = ::CreateThread(
+ nullptr, // Default security.
+ 0, // Default stack size
+ &ThreadLocalRegistryImpl::WatcherThreadFunc,
+ reinterpret_cast<LPVOID>(new ThreadIdAndHandle(thread_id, thread)),
+ CREATE_SUSPENDED, &watcher_thread_id);
+ GTEST_CHECK_(watcher_thread != nullptr);
+ // Give the watcher thread the same priority as ours to avoid being
+ // blocked by it.
+ ::SetThreadPriority(watcher_thread,
+ ::GetThreadPriority(::GetCurrentThread()));
+ ::ResumeThread(watcher_thread);
+ ::CloseHandle(watcher_thread);
+ }
+
+ // Monitors exit from a given thread and notifies those
+ // ThreadIdToThreadLocals about thread termination.
+ static DWORD WINAPI WatcherThreadFunc(LPVOID param) {
+ const ThreadIdAndHandle* tah =
+ reinterpret_cast<const ThreadIdAndHandle*>(param);
+ GTEST_CHECK_(
+ ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0);
+ OnThreadExit(tah->first);
+ ::CloseHandle(tah->second);
+ delete tah;
+ return 0;
+ }
+
+ // Returns map of thread local instances.
+ static ThreadIdToThreadLocals* GetThreadLocalsMapLocked() {
+ mutex_.AssertHeld();
+#ifdef _MSC_VER
+ MemoryIsNotDeallocated memory_is_not_deallocated;
+#endif // _MSC_VER
+ static ThreadIdToThreadLocals* map = new ThreadIdToThreadLocals();
+ return map;
+ }
+
+ // Protects access to GetThreadLocalsMapLocked() and its return value.
+ static Mutex mutex_;
+ // Protects access to GetThreadMapLocked() and its return value.
+ static Mutex thread_map_mutex_;
+};
+
+Mutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex); // NOLINT
+Mutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex); // NOLINT
+
+ThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread(
+ const ThreadLocalBase* thread_local_instance) {
+ return ThreadLocalRegistryImpl::GetValueOnCurrentThread(
+ thread_local_instance);
+}
+
+void ThreadLocalRegistry::OnThreadLocalDestroyed(
+ const ThreadLocalBase* thread_local_instance) {
+ ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance);
+}
+
+#endif // GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
+
+#if GTEST_USES_POSIX_RE
+
+// Implements RE. Currently only needed for death tests.
+
+RE::~RE() {
+ if (is_valid_) {
+ // regfree'ing an invalid regex might crash because the content
+ // of the regex is undefined. Since the regex's are essentially
+ // the same, one cannot be valid (or invalid) without the other
+ // being so too.
+ regfree(&partial_regex_);
+ regfree(&full_regex_);
+ }
+ free(const_cast<char*>(pattern_));
+}
+
+// Returns true if and only if regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
+}
+
+// Returns true if and only if regular expression re matches a substring of
+// str (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = posix::StrDup(regex);
+
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match.
+ const size_t full_regex_len = strlen(regex) + 10;
+ char* const full_pattern = new char[full_regex_len];
+
+ snprintf(full_pattern, full_regex_len, "^(%s)$", regex);
+ is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;
+ // We want to call regcomp(&partial_regex_, ...) even if the
+ // previous expression returns false. Otherwise partial_regex_ may
+ // not be properly initialized can may cause trouble when it's
+ // freed.
+ //
+ // Some implementation of POSIX regex (e.g. on at least some
+ // versions of Cygwin) doesn't accept the empty string as a valid
+ // regex. We change it to an equivalent form "()" to be safe.
+ if (is_valid_) {
+ const char* const partial_regex = (*regex == '\0') ? "()" : regex;
+ is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;
+ }
+ EXPECT_TRUE(is_valid_)
+ << "Regular expression \"" << regex
+ << "\" is not a valid POSIX Extended regular expression.";
+
+ delete[] full_pattern;
+}
+
+#elif GTEST_USES_SIMPLE_RE
+
+// Returns true if and only if ch appears anywhere in str (excluding the
+// terminating '\0' character).
+bool IsInSet(char ch, const char* str) {
+ return ch != '\0' && strchr(str, ch) != nullptr;
+}
+
+// Returns true if and only if ch belongs to the given classification.
+// Unlike similar functions in <ctype.h>, these aren't affected by the
+// current locale.
+bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
+bool IsAsciiPunct(char ch) {
+ return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~");
+}
+bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
+bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
+bool IsAsciiWordChar(char ch) {
+ return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
+ ('0' <= ch && ch <= '9') || ch == '_';
+}
+
+// Returns true if and only if "\\c" is a supported escape sequence.
+bool IsValidEscape(char c) {
+ return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW"));
+}
+
+// Returns true if and only if the given atom (specified by escaped and
+// pattern) matches ch. The result is undefined if the atom is invalid.
+bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
+ if (escaped) { // "\\p" where p is pattern_char.
+ switch (pattern_char) {
+ case 'd': return IsAsciiDigit(ch);
+ case 'D': return !IsAsciiDigit(ch);
+ case 'f': return ch == '\f';
+ case 'n': return ch == '\n';
+ case 'r': return ch == '\r';
+ case 's': return IsAsciiWhiteSpace(ch);
+ case 'S': return !IsAsciiWhiteSpace(ch);
+ case 't': return ch == '\t';
+ case 'v': return ch == '\v';
+ case 'w': return IsAsciiWordChar(ch);
+ case 'W': return !IsAsciiWordChar(ch);
+ }
+ return IsAsciiPunct(pattern_char) && pattern_char == ch;
+ }
+
+ return (pattern_char == '.' && ch != '\n') || pattern_char == ch;
+}
+
+// Helper function used by ValidateRegex() to format error messages.
+static std::string FormatRegexSyntaxError(const char* regex, int index) {
+ return (Message() << "Syntax error at index " << index
+ << " in simple regular expression \"" << regex << "\": ").GetString();
+}
+
+// Generates non-fatal failures and returns false if regex is invalid;
+// otherwise returns true.
+bool ValidateRegex(const char* regex) {
+ if (regex == nullptr) {
+ ADD_FAILURE() << "NULL is not a valid simple regular expression.";
+ return false;
+ }
+
+ bool is_valid = true;
+
+ // True if and only if ?, *, or + can follow the previous atom.
+ bool prev_repeatable = false;
+ for (int i = 0; regex[i]; i++) {
+ if (regex[i] == '\\') { // An escape sequence
+ i++;
+ if (regex[i] == '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "'\\' cannot appear at the end.";
+ return false;
+ }
+
+ if (!IsValidEscape(regex[i])) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "invalid escape sequence \"\\" << regex[i] << "\".";
+ is_valid = false;
+ }
+ prev_repeatable = true;
+ } else { // Not an escape sequence.
+ const char ch = regex[i];
+
+ if (ch == '^' && i > 0) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'^' can only appear at the beginning.";
+ is_valid = false;
+ } else if (ch == '$' && regex[i + 1] != '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'$' can only appear at the end.";
+ is_valid = false;
+ } else if (IsInSet(ch, "()[]{}|")) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' is unsupported.";
+ is_valid = false;
+ } else if (IsRepeat(ch) && !prev_repeatable) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' can only follow a repeatable token.";
+ is_valid = false;
+ }
+
+ prev_repeatable = !IsInSet(ch, "^$?*+");
+ }
+ }
+
+ return is_valid;
+}
+
+// Matches a repeated regex atom followed by a valid simple regular
+// expression. The regex atom is defined as c if escaped is false,
+// or \c otherwise. repeat is the repetition meta character (?, *,
+// or +). The behavior is undefined if str contains too many
+// characters to be indexable by size_t, in which case the test will
+// probably time out anyway. We are fine with this limitation as
+// std::string has it too.
+bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char c, char repeat, const char* regex,
+ const char* str) {
+ const size_t min_count = (repeat == '+') ? 1 : 0;
+ const size_t max_count = (repeat == '?') ? 1 :
+ static_cast<size_t>(-1) - 1;
+ // We cannot call numeric_limits::max() as it conflicts with the
+ // max() macro on Windows.
+
+ for (size_t i = 0; i <= max_count; ++i) {
+ // We know that the atom matches each of the first i characters in str.
+ if (i >= min_count && MatchRegexAtHead(regex, str + i)) {
+ // We have enough matches at the head, and the tail matches too.
+ // Since we only care about *whether* the pattern matches str
+ // (as opposed to *how* it matches), there is no need to find a
+ // greedy match.
+ return true;
+ }
+ if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i]))
+ return false;
+ }
+ return false;
+}
+
+// Returns true if and only if regex matches a prefix of str. regex must
+// be a valid simple regular expression and not start with "^", or the
+// result is undefined.
+bool MatchRegexAtHead(const char* regex, const char* str) {
+ if (*regex == '\0') // An empty regex matches a prefix of anything.
+ return true;
+
+ // "$" only matches the end of a string. Note that regex being
+ // valid guarantees that there's nothing after "$" in it.
+ if (*regex == '$')
+ return *str == '\0';
+
+ // Is the first thing in regex an escape sequence?
+ const bool escaped = *regex == '\\';
+ if (escaped)
+ ++regex;
+ if (IsRepeat(regex[1])) {
+ // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so
+ // here's an indirect recursion. It terminates as the regex gets
+ // shorter in each recursion.
+ return MatchRepetitionAndRegexAtHead(
+ escaped, regex[0], regex[1], regex + 2, str);
+ } else {
+ // regex isn't empty, isn't "$", and doesn't start with a
+ // repetition. We match the first atom of regex with the first
+ // character of str and recurse.
+ return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) &&
+ MatchRegexAtHead(regex + 1, str + 1);
+ }
+}
+
+// Returns true if and only if regex matches any substring of str. regex must
+// be a valid simple regular expression, or the result is undefined.
+//
+// The algorithm is recursive, but the recursion depth doesn't exceed
+// the regex length, so we won't need to worry about running out of
+// stack space normally. In rare cases the time complexity can be
+// exponential with respect to the regex length + the string length,
+// but usually it's must faster (often close to linear).
+bool MatchRegexAnywhere(const char* regex, const char* str) {
+ if (regex == nullptr || str == nullptr) return false;
+
+ if (*regex == '^')
+ return MatchRegexAtHead(regex + 1, str);
+
+ // A successful match can be anywhere in str.
+ do {
+ if (MatchRegexAtHead(regex, str))
+ return true;
+ } while (*str++ != '\0');
+ return false;
+}
+
+// Implements the RE class.
+
+RE::~RE() {
+ free(const_cast<char*>(pattern_));
+ free(const_cast<char*>(full_pattern_));
+}
+
+// Returns true if and only if regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
+}
+
+// Returns true if and only if regular expression re matches a substring of
+// str (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = full_pattern_ = nullptr;
+ if (regex != nullptr) {
+ pattern_ = posix::StrDup(regex);
+ }
+
+ is_valid_ = ValidateRegex(regex);
+ if (!is_valid_) {
+ // No need to calculate the full pattern when the regex is invalid.
+ return;
+ }
+
+ const size_t len = strlen(regex);
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match: we need space to prepend a '^', append a '$', and
+ // terminate the string with '\0'.
+ char* buffer = static_cast<char*>(malloc(len + 3));
+ full_pattern_ = buffer;
+
+ if (*regex != '^')
+ *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'.
+
+ // We don't use snprintf or strncpy, as they trigger a warning when
+ // compiled with VC++ 8.0.
+ memcpy(buffer, regex, len);
+ buffer += len;
+
+ if (len == 0 || regex[len - 1] != '$')
+ *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'.
+
+ *buffer = '\0';
+}
+
+#endif // GTEST_USES_POSIX_RE
+
+const char kUnknownFile[] = "unknown file";
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) {
+ const std::string file_name(file == nullptr ? kUnknownFile : file);
+
+ if (line < 0) {
+ return file_name + ":";
+ }
+#ifdef _MSC_VER
+ return file_name + "(" + StreamableToString(line) + "):";
+#else
+ return file_name + ":" + StreamableToString(line) + ":";
+#endif // _MSC_VER
+}
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+// Note that FormatCompilerIndependentFileLocation() does NOT append colon
+// to the file location it produces, unlike FormatFileLocation().
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
+ const char* file, int line) {
+ const std::string file_name(file == nullptr ? kUnknownFile : file);
+
+ if (line < 0)
+ return file_name;
+ else
+ return file_name + ":" + StreamableToString(line);
+}
+
+GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)
+ : severity_(severity) {
+ const char* const marker =
+ severity == GTEST_INFO ? "[ INFO ]" :
+ severity == GTEST_WARNING ? "[WARNING]" :
+ severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]";
+ GetStream() << ::std::endl << marker << " "
+ << FormatFileLocation(file, line).c_str() << ": ";
+}
+
+// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+GTestLog::~GTestLog() {
+ GetStream() << ::std::endl;
+ if (severity_ == GTEST_FATAL) {
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+
+// Disable Microsoft deprecation warnings for POSIX functions called from
+// this class (creat, dup, dup2, and close)
+GTEST_DISABLE_MSC_DEPRECATED_PUSH_()
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Object that captures an output stream (stdout/stderr).
+class CapturedStream {
+ public:
+ // The ctor redirects the stream to a temporary file.
+ explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
+# if GTEST_OS_WINDOWS
+ char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+ char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+
+ ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);
+ const UINT success = ::GetTempFileNameA(temp_dir_path,
+ "gtest_redir",
+ 0, // Generate unique file name.
+ temp_file_path);
+ GTEST_CHECK_(success != 0)
+ << "Unable to create a temporary file in " << temp_dir_path;
+ const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);
+ GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file "
+ << temp_file_path;
+ filename_ = temp_file_path;
+# else
+ // There's no guarantee that a test has write access to the current
+ // directory, so we create the temporary file in a temporary directory.
+ std::string name_template;
+
+# if GTEST_OS_LINUX_ANDROID
+ // Note: Android applications are expected to call the framework's
+ // Context.getExternalStorageDirectory() method through JNI to get
+ // the location of the world-writable SD Card directory. However,
+ // this requires a Context handle, which cannot be retrieved
+ // globally from native code. Doing so also precludes running the
+ // code as part of a regular standalone executable, which doesn't
+ // run in a Dalvik process (e.g. when running it through 'adb shell').
+ //
+ // The location /data/local/tmp is directly accessible from native code.
+ // '/sdcard' and other variants cannot be relied on, as they are not
+ // guaranteed to be mounted, or may have a delay in mounting.
+ name_template = "/data/local/tmp/";
+# elif GTEST_OS_IOS
+ char user_temp_dir[PATH_MAX + 1];
+
+ // Documented alternative to NSTemporaryDirectory() (for obtaining creating
+ // a temporary directory) at
+ // https://developer.apple.com/library/archive/documentation/Security/Conceptual/SecureCodingGuide/Articles/RaceConditions.html#//apple_ref/doc/uid/TP40002585-SW10
+ //
+ // _CS_DARWIN_USER_TEMP_DIR (as well as _CS_DARWIN_USER_CACHE_DIR) is not
+ // documented in the confstr() man page at
+ // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/confstr.3.html#//apple_ref/doc/man/3/confstr
+ // but are still available, according to the WebKit patches at
+ // https://trac.webkit.org/changeset/262004/webkit
+ // https://trac.webkit.org/changeset/263705/webkit
+ //
+ // The confstr() implementation falls back to getenv("TMPDIR"). See
+ // https://opensource.apple.com/source/Libc/Libc-1439.100.3/gen/confstr.c.auto.html
+ ::confstr(_CS_DARWIN_USER_TEMP_DIR, user_temp_dir, sizeof(user_temp_dir));
+
+ name_template = user_temp_dir;
+ if (name_template.back() != GTEST_PATH_SEP_[0])
+ name_template.push_back(GTEST_PATH_SEP_[0]);
+# else
+ name_template = "/tmp/";
+# endif
+ name_template.append("gtest_captured_stream.XXXXXX");
+
+ // mkstemp() modifies the string bytes in place, and does not go beyond the
+ // string's length. This results in well-defined behavior in C++17.
+ //
+ // The const_cast is needed below C++17. The constraints on std::string
+ // implementations in C++11 and above make assumption behind the const_cast
+ // fairly safe.
+ const int captured_fd = ::mkstemp(const_cast<char*>(name_template.data()));
+ if (captured_fd == -1) {
+ GTEST_LOG_(WARNING)
+ << "Failed to create tmp file " << name_template
+ << " for test; does the test have access to the /tmp directory?";
+ }
+ filename_ = std::move(name_template);
+# endif // GTEST_OS_WINDOWS
+ fflush(nullptr);
+ dup2(captured_fd, fd_);
+ close(captured_fd);
+ }
+
+ ~CapturedStream() {
+ remove(filename_.c_str());
+ }
+
+ std::string GetCapturedString() {
+ if (uncaptured_fd_ != -1) {
+ // Restores the original stream.
+ fflush(nullptr);
+ dup2(uncaptured_fd_, fd_);
+ close(uncaptured_fd_);
+ uncaptured_fd_ = -1;
+ }
+
+ FILE* const file = posix::FOpen(filename_.c_str(), "r");
+ if (file == nullptr) {
+ GTEST_LOG_(FATAL) << "Failed to open tmp file " << filename_
+ << " for capturing stream.";
+ }
+ const std::string content = ReadEntireFile(file);
+ posix::FClose(file);
+ return content;
+ }
+
+ private:
+ const int fd_; // A stream to capture.
+ int uncaptured_fd_;
+ // Name of the temporary file holding the stderr output.
+ ::std::string filename_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);
+};
+
+GTEST_DISABLE_MSC_DEPRECATED_POP_()
+
+static CapturedStream* g_captured_stderr = nullptr;
+static CapturedStream* g_captured_stdout = nullptr;
+
+// Starts capturing an output stream (stdout/stderr).
+static void CaptureStream(int fd, const char* stream_name,
+ CapturedStream** stream) {
+ if (*stream != nullptr) {
+ GTEST_LOG_(FATAL) << "Only one " << stream_name
+ << " capturer can exist at a time.";
+ }
+ *stream = new CapturedStream(fd);
+}
+
+// Stops capturing the output stream and returns the captured string.
+static std::string GetCapturedStream(CapturedStream** captured_stream) {
+ const std::string content = (*captured_stream)->GetCapturedString();
+
+ delete *captured_stream;
+ *captured_stream = nullptr;
+
+ return content;
+}
+
+// Starts capturing stdout.
+void CaptureStdout() {
+ CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout);
+}
+
+// Starts capturing stderr.
+void CaptureStderr() {
+ CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr);
+}
+
+// Stops capturing stdout and returns the captured string.
+std::string GetCapturedStdout() {
+ return GetCapturedStream(&g_captured_stdout);
+}
+
+// Stops capturing stderr and returns the captured string.
+std::string GetCapturedStderr() {
+ return GetCapturedStream(&g_captured_stderr);
+}
+
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+
+
+
+
+size_t GetFileSize(FILE* file) {
+ fseek(file, 0, SEEK_END);
+ return static_cast<size_t>(ftell(file));
+}
+
+std::string ReadEntireFile(FILE* file) {
+ const size_t file_size = GetFileSize(file);
+ char* const buffer = new char[file_size];
+
+ size_t bytes_last_read = 0; // # of bytes read in the last fread()
+ size_t bytes_read = 0; // # of bytes read so far
+
+ fseek(file, 0, SEEK_SET);
+
+ // Keeps reading the file until we cannot read further or the
+ // pre-determined file size is reached.
+ do {
+ bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);
+ bytes_read += bytes_last_read;
+ } while (bytes_last_read > 0 && bytes_read < file_size);
+
+ const std::string content(buffer, bytes_read);
+ delete[] buffer;
+
+ return content;
+}
+
+#if GTEST_HAS_DEATH_TEST
+static const std::vector<std::string>* g_injected_test_argvs =
+ nullptr; // Owned.
+
+std::vector<std::string> GetInjectableArgvs() {
+ if (g_injected_test_argvs != nullptr) {
+ return *g_injected_test_argvs;
+ }
+ return GetArgvs();
+}
+
+void SetInjectableArgvs(const std::vector<std::string>* new_argvs) {
+ if (g_injected_test_argvs != new_argvs) delete g_injected_test_argvs;
+ g_injected_test_argvs = new_argvs;
+}
+
+void SetInjectableArgvs(const std::vector<std::string>& new_argvs) {
+ SetInjectableArgvs(
+ new std::vector<std::string>(new_argvs.begin(), new_argvs.end()));
+}
+
+void ClearInjectableArgvs() {
+ delete g_injected_test_argvs;
+ g_injected_test_argvs = nullptr;
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+#if GTEST_OS_WINDOWS_MOBILE
+namespace posix {
+void Abort() {
+ DebugBreak();
+ TerminateProcess(GetCurrentProcess(), 1);
+}
+} // namespace posix
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Returns the name of the environment variable corresponding to the
+// given flag. For example, FlagToEnvVar("foo") will return
+// "GTEST_FOO" in the open-source version.
+static std::string FlagToEnvVar(const char* flag) {
+ const std::string full_flag =
+ (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();
+
+ Message env_var;
+ for (size_t i = 0; i != full_flag.length(); i++) {
+ env_var << ToUpper(full_flag.c_str()[i]);
+ }
+
+ return env_var.GetString();
+}
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const Message& src_text, const char* str, int32_t* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = nullptr;
+ const long long_value = strtol(str, &end, 10); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value \"" << str << "\".\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ // Is the parsed value in the range of an int32_t?
+ const auto result = static_cast<int32_t>(long_value);
+ if (long_value == LONG_MAX || long_value == LONG_MIN ||
+ // The parsed value overflows as a long. (strtol() returns
+ // LONG_MAX or LONG_MIN when the input overflows.)
+ result != long_value
+ // The parsed value overflows as an int32_t.
+ ) {
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value " << str << ", which overflows.\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ *value = result;
+ return true;
+}
+
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true if and only if it's not "0".
+bool BoolFromGTestEnv(const char* flag, bool default_value) {
+#if defined(GTEST_GET_BOOL_FROM_ENV_)
+ return GTEST_GET_BOOL_FROM_ENV_(flag, default_value);
+#else
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ return string_value == nullptr ? default_value
+ : strcmp(string_value, "0") != 0;
+#endif // defined(GTEST_GET_BOOL_FROM_ENV_)
+}
+
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+int32_t Int32FromGTestEnv(const char* flag, int32_t default_value) {
+#if defined(GTEST_GET_INT32_FROM_ENV_)
+ return GTEST_GET_INT32_FROM_ENV_(flag, default_value);
+#else
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ if (string_value == nullptr) {
+ // The environment variable is not set.
+ return default_value;
+ }
+
+ int32_t result = default_value;
+ if (!ParseInt32(Message() << "Environment variable " << env_var,
+ string_value, &result)) {
+ printf("The default value %s is used.\n",
+ (Message() << default_value).GetString().c_str());
+ fflush(stdout);
+ return default_value;
+ }
+
+ return result;
+#endif // defined(GTEST_GET_INT32_FROM_ENV_)
+}
+
+// As a special case for the 'output' flag, if GTEST_OUTPUT is not
+// set, we look for XML_OUTPUT_FILE, which is set by the Bazel build
+// system. The value of XML_OUTPUT_FILE is a filename without the
+// "xml:" prefix of GTEST_OUTPUT.
+// Note that this is meant to be called at the call site so it does
+// not check that the flag is 'output'
+// In essence this checks an env variable called XML_OUTPUT_FILE
+// and if it is set we prepend "xml:" to its value, if it not set we return ""
+std::string OutputFlagAlsoCheckEnvVar(){
+ std::string default_value_for_output_flag = "";
+ const char* xml_output_file_env = posix::GetEnv("XML_OUTPUT_FILE");
+ if (nullptr != xml_output_file_env) {
+ default_value_for_output_flag = std::string("xml:") + xml_output_file_env;
+ }
+ return default_value_for_output_flag;
+}
+
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+const char* StringFromGTestEnv(const char* flag, const char* default_value) {
+#if defined(GTEST_GET_STRING_FROM_ENV_)
+ return GTEST_GET_STRING_FROM_ENV_(flag, default_value);
+#else
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const value = posix::GetEnv(env_var.c_str());
+ return value == nullptr ? default_value : value;
+#endif // defined(GTEST_GET_STRING_FROM_ENV_)
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Google Test - The Google C++ Testing and Mocking Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+// void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// It uses the << operator when possible, and prints the bytes in the
+// object otherwise. A user can override its behavior for a class
+// type Foo by defining either operator<<(::std::ostream&, const Foo&)
+// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that
+// defines Foo.
+
+
+#include <stdio.h>
+
+#include <cctype>
+#include <cstdint>
+#include <cwchar>
+#include <ostream> // NOLINT
+#include <string>
+#include <type_traits>
+
+
+namespace testing {
+
+namespace {
+
+using ::std::ostream;
+
+// Prints a segment of bytes in the given object.
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start,
+ size_t count, ostream* os) {
+ char text[5] = "";
+ for (size_t i = 0; i != count; i++) {
+ const size_t j = start + i;
+ if (i != 0) {
+ // Organizes the bytes into groups of 2 for easy parsing by
+ // human.
+ if ((j % 2) == 0)
+ *os << ' ';
+ else
+ *os << '-';
+ }
+ GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]);
+ *os << text;
+ }
+}
+
+// Prints the bytes in the given value to the given ostream.
+void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count,
+ ostream* os) {
+ // Tells the user how big the object is.
+ *os << count << "-byte object <";
+
+ const size_t kThreshold = 132;
+ const size_t kChunkSize = 64;
+ // If the object size is bigger than kThreshold, we'll have to omit
+ // some details by printing only the first and the last kChunkSize
+ // bytes.
+ if (count < kThreshold) {
+ PrintByteSegmentInObjectTo(obj_bytes, 0, count, os);
+ } else {
+ PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os);
+ *os << " ... ";
+ // Rounds up to 2-byte boundary.
+ const size_t resume_pos = (count - kChunkSize + 1)/2*2;
+ PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os);
+ }
+ *os << ">";
+}
+
+// Helpers for widening a character to char32_t. Since the standard does not
+// specify if char / wchar_t is signed or unsigned, it is important to first
+// convert it to the unsigned type of the same width before widening it to
+// char32_t.
+template <typename CharType>
+char32_t ToChar32(CharType in) {
+ return static_cast<char32_t>(
+ static_cast<typename std::make_unsigned<CharType>::type>(in));
+}
+
+} // namespace
+
+namespace internal {
+
+// Delegates to PrintBytesInObjectToImpl() to print the bytes in the
+// given object. The delegation simplifies the implementation, which
+// uses the << operator and thus is easier done outside of the
+// ::testing::internal namespace, which contains a << operator that
+// sometimes conflicts with the one in STL.
+void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count,
+ ostream* os) {
+ PrintBytesInObjectToImpl(obj_bytes, count, os);
+}
+
+// Depending on the value of a char (or wchar_t), we print it in one
+// of three formats:
+// - as is if it's a printable ASCII (e.g. 'a', '2', ' '),
+// - as a hexadecimal escape sequence (e.g. '\x7F'), or
+// - as a special escape sequence (e.g. '\r', '\n').
+enum CharFormat {
+ kAsIs,
+ kHexEscape,
+ kSpecialEscape
+};
+
+// Returns true if c is a printable ASCII character. We test the
+// value of c directly instead of calling isprint(), which is buggy on
+// Windows Mobile.
+inline bool IsPrintableAscii(char32_t c) { return 0x20 <= c && c <= 0x7E; }
+
+// Prints c (of type char, char8_t, char16_t, char32_t, or wchar_t) as a
+// character literal without the quotes, escaping it when necessary; returns how
+// c was formatted.
+template <typename Char>
+static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
+ const char32_t u_c = ToChar32(c);
+ switch (u_c) {
+ case L'\0':
+ *os << "\\0";
+ break;
+ case L'\'':
+ *os << "\\'";
+ break;
+ case L'\\':
+ *os << "\\\\";
+ break;
+ case L'\a':
+ *os << "\\a";
+ break;
+ case L'\b':
+ *os << "\\b";
+ break;
+ case L'\f':
+ *os << "\\f";
+ break;
+ case L'\n':
+ *os << "\\n";
+ break;
+ case L'\r':
+ *os << "\\r";
+ break;
+ case L'\t':
+ *os << "\\t";
+ break;
+ case L'\v':
+ *os << "\\v";
+ break;
+ default:
+ if (IsPrintableAscii(u_c)) {
+ *os << static_cast<char>(c);
+ return kAsIs;
+ } else {
+ ostream::fmtflags flags = os->flags();
+ *os << "\\x" << std::hex << std::uppercase << static_cast<int>(u_c);
+ os->flags(flags);
+ return kHexEscape;
+ }
+ }
+ return kSpecialEscape;
+}
+
+// Prints a char32_t c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsStringLiteralTo(char32_t c, ostream* os) {
+ switch (c) {
+ case L'\'':
+ *os << "'";
+ return kAsIs;
+ case L'"':
+ *os << "\\\"";
+ return kSpecialEscape;
+ default:
+ return PrintAsCharLiteralTo(c, os);
+ }
+}
+
+static const char* GetCharWidthPrefix(char) {
+ return "";
+}
+
+static const char* GetCharWidthPrefix(signed char) {
+ return "";
+}
+
+static const char* GetCharWidthPrefix(unsigned char) {
+ return "";
+}
+
+#ifdef __cpp_lib_char8_t
+static const char* GetCharWidthPrefix(char8_t) {
+ return "u8";
+}
+#endif
+
+static const char* GetCharWidthPrefix(char16_t) {
+ return "u";
+}
+
+static const char* GetCharWidthPrefix(char32_t) {
+ return "U";
+}
+
+static const char* GetCharWidthPrefix(wchar_t) {
+ return "L";
+}
+
+// Prints a char c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsStringLiteralTo(char c, ostream* os) {
+ return PrintAsStringLiteralTo(ToChar32(c), os);
+}
+
+#ifdef __cpp_lib_char8_t
+static CharFormat PrintAsStringLiteralTo(char8_t c, ostream* os) {
+ return PrintAsStringLiteralTo(ToChar32(c), os);
+}
+#endif
+
+static CharFormat PrintAsStringLiteralTo(char16_t c, ostream* os) {
+ return PrintAsStringLiteralTo(ToChar32(c), os);
+}
+
+static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) {
+ return PrintAsStringLiteralTo(ToChar32(c), os);
+}
+
+// Prints a character c (of type char, char8_t, char16_t, char32_t, or wchar_t)
+// and its code. '\0' is printed as "'\\0'", other unprintable characters are
+// also properly escaped using the standard C++ escape sequence.
+template <typename Char>
+void PrintCharAndCodeTo(Char c, ostream* os) {
+ // First, print c as a literal in the most readable form we can find.
+ *os << GetCharWidthPrefix(c) << "'";
+ const CharFormat format = PrintAsCharLiteralTo(c, os);
+ *os << "'";
+
+ // To aid user debugging, we also print c's code in decimal, unless
+ // it's 0 (in which case c was printed as '\\0', making the code
+ // obvious).
+ if (c == 0)
+ return;
+ *os << " (" << static_cast<int>(c);
+
+ // For more convenience, we print c's code again in hexadecimal,
+ // unless c was already printed in the form '\x##' or the code is in
+ // [1, 9].
+ if (format == kHexEscape || (1 <= c && c <= 9)) {
+ // Do nothing.
+ } else {
+ *os << ", 0x" << String::FormatHexInt(static_cast<int>(c));
+ }
+ *os << ")";
+}
+
+void PrintTo(unsigned char c, ::std::ostream* os) { PrintCharAndCodeTo(c, os); }
+void PrintTo(signed char c, ::std::ostream* os) { PrintCharAndCodeTo(c, os); }
+
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its code. L'\0' is printed as "L'\\0'".
+void PrintTo(wchar_t wc, ostream* os) { PrintCharAndCodeTo(wc, os); }
+
+// TODO(dcheng): Consider making this delegate to PrintCharAndCodeTo() as well.
+void PrintTo(char32_t c, ::std::ostream* os) {
+ *os << std::hex << "U+" << std::uppercase << std::setfill('0') << std::setw(4)
+ << static_cast<uint32_t>(c);
+}
+
+// Prints the given array of characters to the ostream. CharType must be either
+// char, char8_t, char16_t, char32_t, or wchar_t.
+// The array starts at begin, the length is len, it may include '\0' characters
+// and may not be NUL-terminated.
+template <typename CharType>
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+static CharFormat PrintCharsAsStringTo(
+ const CharType* begin, size_t len, ostream* os) {
+ const char* const quote_prefix = GetCharWidthPrefix(*begin);
+ *os << quote_prefix << "\"";
+ bool is_previous_hex = false;
+ CharFormat print_format = kAsIs;
+ for (size_t index = 0; index < len; ++index) {
+ const CharType cur = begin[index];
+ if (is_previous_hex && IsXDigit(cur)) {
+ // Previous character is of '\x..' form and this character can be
+ // interpreted as another hexadecimal digit in its number. Break string to
+ // disambiguate.
+ *os << "\" " << quote_prefix << "\"";
+ }
+ is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape;
+ // Remember if any characters required hex escaping.
+ if (is_previous_hex) {
+ print_format = kHexEscape;
+ }
+ }
+ *os << "\"";
+ return print_format;
+}
+
+// Prints a (const) char/wchar_t array of 'len' elements, starting at address
+// 'begin'. CharType must be either char or wchar_t.
+template <typename CharType>
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+static void UniversalPrintCharArray(
+ const CharType* begin, size_t len, ostream* os) {
+ // The code
+ // const char kFoo[] = "foo";
+ // generates an array of 4, not 3, elements, with the last one being '\0'.
+ //
+ // Therefore when printing a char array, we don't print the last element if
+ // it's '\0', such that the output matches the string literal as it's
+ // written in the source code.
+ if (len > 0 && begin[len - 1] == '\0') {
+ PrintCharsAsStringTo(begin, len - 1, os);
+ return;
+ }
+
+ // If, however, the last element in the array is not '\0', e.g.
+ // const char kFoo[] = { 'f', 'o', 'o' };
+ // we must print the entire array. We also print a message to indicate
+ // that the array is not NUL-terminated.
+ PrintCharsAsStringTo(begin, len, os);
+ *os << " (no terminating NUL)";
+}
+
+// Prints a (const) char array of 'len' elements, starting at address 'begin'.
+void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
+ UniversalPrintCharArray(begin, len, os);
+}
+
+#ifdef __cpp_lib_char8_t
+// Prints a (const) char8_t array of 'len' elements, starting at address
+// 'begin'.
+void UniversalPrintArray(const char8_t* begin, size_t len, ostream* os) {
+ UniversalPrintCharArray(begin, len, os);
+}
+#endif
+
+// Prints a (const) char16_t array of 'len' elements, starting at address
+// 'begin'.
+void UniversalPrintArray(const char16_t* begin, size_t len, ostream* os) {
+ UniversalPrintCharArray(begin, len, os);
+}
+
+// Prints a (const) char32_t array of 'len' elements, starting at address
+// 'begin'.
+void UniversalPrintArray(const char32_t* begin, size_t len, ostream* os) {
+ UniversalPrintCharArray(begin, len, os);
+}
+
+// Prints a (const) wchar_t array of 'len' elements, starting at address
+// 'begin'.
+void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) {
+ UniversalPrintCharArray(begin, len, os);
+}
+
+namespace {
+
+// Prints a null-terminated C-style string to the ostream.
+template <typename Char>
+void PrintCStringTo(const Char* s, ostream* os) {
+ if (s == nullptr) {
+ *os << "NULL";
+ } else {
+ *os << ImplicitCast_<const void*>(s) << " pointing to ";
+ PrintCharsAsStringTo(s, std::char_traits<Char>::length(s), os);
+ }
+}
+
+} // anonymous namespace
+
+void PrintTo(const char* s, ostream* os) { PrintCStringTo(s, os); }
+
+#ifdef __cpp_lib_char8_t
+void PrintTo(const char8_t* s, ostream* os) { PrintCStringTo(s, os); }
+#endif
+
+void PrintTo(const char16_t* s, ostream* os) { PrintCStringTo(s, os); }
+
+void PrintTo(const char32_t* s, ostream* os) { PrintCStringTo(s, os); }
+
+// MSVC compiler can be configured to define whar_t as a typedef
+// of unsigned short. Defining an overload for const wchar_t* in that case
+// would cause pointers to unsigned shorts be printed as wide strings,
+// possibly accessing more memory than intended and causing invalid
+// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when
+// wchar_t is implemented as a native type.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Prints the given wide C string to the ostream.
+void PrintTo(const wchar_t* s, ostream* os) { PrintCStringTo(s, os); }
+#endif // wchar_t is native
+
+namespace {
+
+bool ContainsUnprintableControlCodes(const char* str, size_t length) {
+ const unsigned char *s = reinterpret_cast<const unsigned char *>(str);
+
+ for (size_t i = 0; i < length; i++) {
+ unsigned char ch = *s++;
+ if (std::iscntrl(ch)) {
+ switch (ch) {
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ default:
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool IsUTF8TrailByte(unsigned char t) { return 0x80 <= t && t<= 0xbf; }
+
+bool IsValidUTF8(const char* str, size_t length) {
+ const unsigned char *s = reinterpret_cast<const unsigned char *>(str);
+
+ for (size_t i = 0; i < length;) {
+ unsigned char lead = s[i++];
+
+ if (lead <= 0x7f) {
+ continue; // single-byte character (ASCII) 0..7F
+ }
+ if (lead < 0xc2) {
+ return false; // trail byte or non-shortest form
+ } else if (lead <= 0xdf && (i + 1) <= length && IsUTF8TrailByte(s[i])) {
+ ++i; // 2-byte character
+ } else if (0xe0 <= lead && lead <= 0xef && (i + 2) <= length &&
+ IsUTF8TrailByte(s[i]) &&
+ IsUTF8TrailByte(s[i + 1]) &&
+ // check for non-shortest form and surrogate
+ (lead != 0xe0 || s[i] >= 0xa0) &&
+ (lead != 0xed || s[i] < 0xa0)) {
+ i += 2; // 3-byte character
+ } else if (0xf0 <= lead && lead <= 0xf4 && (i + 3) <= length &&
+ IsUTF8TrailByte(s[i]) &&
+ IsUTF8TrailByte(s[i + 1]) &&
+ IsUTF8TrailByte(s[i + 2]) &&
+ // check for non-shortest form
+ (lead != 0xf0 || s[i] >= 0x90) &&
+ (lead != 0xf4 || s[i] < 0x90)) {
+ i += 3; // 4-byte character
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+void ConditionalPrintAsText(const char* str, size_t length, ostream* os) {
+ if (!ContainsUnprintableControlCodes(str, length) &&
+ IsValidUTF8(str, length)) {
+ *os << "\n As Text: \"" << str << "\"";
+ }
+}
+
+} // anonymous namespace
+
+void PrintStringTo(const ::std::string& s, ostream* os) {
+ if (PrintCharsAsStringTo(s.data(), s.size(), os) == kHexEscape) {
+ if (GTEST_FLAG(print_utf8)) {
+ ConditionalPrintAsText(s.data(), s.size(), os);
+ }
+ }
+}
+
+#ifdef __cpp_lib_char8_t
+void PrintU8StringTo(const ::std::u8string& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif
+
+void PrintU16StringTo(const ::std::u16string& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+
+void PrintU32StringTo(const ::std::u32string& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+
+#if GTEST_HAS_STD_WSTRING
+void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+} // namespace internal
+
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+
+
+
+namespace testing {
+
+using internal::GetUnitTestImpl;
+
+// Gets the summary of the failure message by omitting the stack trace
+// in it.
+std::string TestPartResult::ExtractSummary(const char* message) {
+ const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
+ return stack_trace == nullptr ? message : std::string(message, stack_trace);
+}
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
+ return os << internal::FormatFileLocation(result.file_name(),
+ result.line_number())
+ << " "
+ << (result.type() == TestPartResult::kSuccess
+ ? "Success"
+ : result.type() == TestPartResult::kSkip
+ ? "Skipped"
+ : result.type() == TestPartResult::kFatalFailure
+ ? "Fatal failure"
+ : "Non-fatal failure")
+ << ":\n"
+ << result.message() << std::endl;
+}
+
+// Appends a TestPartResult to the array.
+void TestPartResultArray::Append(const TestPartResult& result) {
+ array_.push_back(result);
+}
+
+// Returns the TestPartResult at the given index (0-based).
+const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
+ if (index < 0 || index >= size()) {
+ printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
+ internal::posix::Abort();
+ }
+
+ return array_[static_cast<size_t>(index)];
+}
+
+// Returns the number of TestPartResult objects in the array.
+int TestPartResultArray::size() const {
+ return static_cast<int>(array_.size());
+}
+
+namespace internal {
+
+HasNewFatalFailureHelper::HasNewFatalFailureHelper()
+ : has_new_fatal_failure_(false),
+ original_reporter_(GetUnitTestImpl()->
+ GetTestPartResultReporterForCurrentThread()) {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
+}
+
+HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
+ original_reporter_);
+}
+
+void HasNewFatalFailureHelper::ReportTestPartResult(
+ const TestPartResult& result) {
+ if (result.fatally_failed())
+ has_new_fatal_failure_ = true;
+ original_reporter_->ReportTestPartResult(result);
+}
+
+} // namespace internal
+
+} // namespace testing
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+
+namespace testing {
+namespace internal {
+
+// Skips to the first non-space char in str. Returns an empty string if str
+// contains only whitespace characters.
+static const char* SkipSpaces(const char* str) {
+ while (IsSpace(*str))
+ str++;
+ return str;
+}
+
+static std::vector<std::string> SplitIntoTestNames(const char* src) {
+ std::vector<std::string> name_vec;
+ src = SkipSpaces(src);
+ for (; src != nullptr; src = SkipComma(src)) {
+ name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src)));
+ }
+ return name_vec;
+}
+
+// Verifies that registered_tests match the test names in
+// registered_tests_; returns registered_tests if successful, or
+// aborts the program otherwise.
+const char* TypedTestSuitePState::VerifyRegisteredTestNames(
+ const char* test_suite_name, const char* file, int line,
+ const char* registered_tests) {
+ RegisterTypeParameterizedTestSuite(test_suite_name, CodeLocation(file, line));
+
+ typedef RegisteredTestsMap::const_iterator RegisteredTestIter;
+ registered_ = true;
+
+ std::vector<std::string> name_vec = SplitIntoTestNames(registered_tests);
+
+ Message errors;
+
+ std::set<std::string> tests;
+ for (std::vector<std::string>::const_iterator name_it = name_vec.begin();
+ name_it != name_vec.end(); ++name_it) {
+ const std::string& name = *name_it;
+ if (tests.count(name) != 0) {
+ errors << "Test " << name << " is listed more than once.\n";
+ continue;
+ }
+
+ if (registered_tests_.count(name) != 0) {
+ tests.insert(name);
+ } else {
+ errors << "No test named " << name
+ << " can be found in this test suite.\n";
+ }
+ }
+
+ for (RegisteredTestIter it = registered_tests_.begin();
+ it != registered_tests_.end();
+ ++it) {
+ if (tests.count(it->first) == 0) {
+ errors << "You forgot to list test " << it->first << ".\n";
+ }
+ }
+
+ const std::string& errors_str = errors.GetString();
+ if (errors_str != "") {
+ fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
+ errors_str.c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+
+ return registered_tests;
+}
+
+} // namespace internal
+} // namespace testing
--- /dev/null
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This header file defines the public API for Google Test. It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
+// easyUnit framework.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_H_
+
+#include <cstddef>
+#include <limits>
+#include <memory>
+#include <ostream>
+#include <type_traits>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test. They are subject to change without notice.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Low-level types and utilities for porting Google Test to various
+// platforms. All macros ending with _ and symbols defined in an
+// internal namespace are subject to change without notice. Code
+// outside Google Test MUST NOT USE THEM DIRECTLY. Macros that don't
+// end with _ are part of Google Test's public API and can be used by
+// code outside Google Test.
+//
+// This file is fundamental to Google Test. All other Google Test source
+// files are expected to #include this. Therefore, it cannot #include
+// any other Google Test header.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+// Environment-describing macros
+// -----------------------------
+//
+// Google Test can be used in many different environments. Macros in
+// this section tell Google Test what kind of environment it is being
+// used in, such that Google Test can provide environment-specific
+// features and implementations.
+//
+// Google Test tries to automatically detect the properties of its
+// environment, so users usually don't need to worry about these
+// macros. However, the automatic detection is not perfect.
+// Sometimes it's necessary for a user to define some of the following
+// macros in the build script to override Google Test's decisions.
+//
+// If the user doesn't define a macro in the list, Google Test will
+// provide a default definition. After this header is #included, all
+// macros in this list will be defined to either 1 or 0.
+//
+// Notes to maintainers:
+// - Each macro here is a user-tweakable knob; do not grow the list
+// lightly.
+// - Use #if to key off these macros. Don't use #ifdef or "#if
+// defined(...)", which will not work as these macros are ALWAYS
+// defined.
+//
+// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2)
+// is/isn't available.
+// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions
+// are enabled.
+// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular
+// expressions are/aren't available.
+// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that <pthread.h>
+// is/isn't available.
+// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
+// enabled.
+// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
+// std::wstring does/doesn't work (Google Test can
+// be used where std::wstring is unavailable).
+// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the
+// compiler supports Microsoft's "Structured
+// Exception Handling".
+// GTEST_HAS_STREAM_REDIRECTION
+// - Define it to 1/0 to indicate whether the
+// platform supports I/O stream redirection using
+// dup() and dup2().
+// GTEST_LINKED_AS_SHARED_LIBRARY
+// - Define to 1 when compiling tests that use
+// Google Test as a shared library (known as
+// DLL on Windows).
+// GTEST_CREATE_SHARED_LIBRARY
+// - Define to 1 when compiling Google Test itself
+// as a shared library.
+// GTEST_DEFAULT_DEATH_TEST_STYLE
+// - The default value of --gtest_death_test_style.
+// The legacy default has been "fast" in the open
+// source version since 2008. The recommended value
+// is "threadsafe", and can be set in
+// custom/gtest-port.h.
+
+// Platform-indicating macros
+// --------------------------
+//
+// Macros indicating the platform on which Google Test is being used
+// (a macro is defined to 1 if compiled on the given platform;
+// otherwise UNDEFINED -- it's never defined to 0.). Google Test
+// defines these macros automatically. Code outside Google Test MUST
+// NOT define them.
+//
+// GTEST_OS_AIX - IBM AIX
+// GTEST_OS_CYGWIN - Cygwin
+// GTEST_OS_DRAGONFLY - DragonFlyBSD
+// GTEST_OS_FREEBSD - FreeBSD
+// GTEST_OS_FUCHSIA - Fuchsia
+// GTEST_OS_GNU_KFREEBSD - GNU/kFreeBSD
+// GTEST_OS_HAIKU - Haiku
+// GTEST_OS_HPUX - HP-UX
+// GTEST_OS_LINUX - Linux
+// GTEST_OS_LINUX_ANDROID - Google Android
+// GTEST_OS_MAC - Mac OS X
+// GTEST_OS_IOS - iOS
+// GTEST_OS_NACL - Google Native Client (NaCl)
+// GTEST_OS_NETBSD - NetBSD
+// GTEST_OS_OPENBSD - OpenBSD
+// GTEST_OS_OS2 - OS/2
+// GTEST_OS_QNX - QNX
+// GTEST_OS_SOLARIS - Sun Solaris
+// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile)
+// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop
+// GTEST_OS_WINDOWS_MINGW - MinGW
+// GTEST_OS_WINDOWS_MOBILE - Windows Mobile
+// GTEST_OS_WINDOWS_PHONE - Windows Phone
+// GTEST_OS_WINDOWS_RT - Windows Store App/WinRT
+// GTEST_OS_ZOS - z/OS
+//
+// Among the platforms, Cygwin, Linux, Mac OS X, and Windows have the
+// most stable support. Since core members of the Google Test project
+// don't have access to other platforms, support for them may be less
+// stable. If you notice any problems on your platform, please notify
+// googletestframework@googlegroups.com (patches for fixing them are
+// even more welcome!).
+//
+// It is possible that none of the GTEST_OS_* macros are defined.
+
+// Feature-indicating macros
+// -------------------------
+//
+// Macros indicating which Google Test features are available (a macro
+// is defined to 1 if the corresponding feature is supported;
+// otherwise UNDEFINED -- it's never defined to 0.). Google Test
+// defines these macros automatically. Code outside Google Test MUST
+// NOT define them.
+//
+// These macros are public so that portable tests can be written.
+// Such tests typically surround code using a feature with an #if
+// which controls that code. For example:
+//
+// #if GTEST_HAS_DEATH_TEST
+// EXPECT_DEATH(DoSomethingDeadly());
+// #endif
+//
+// GTEST_HAS_DEATH_TEST - death tests
+// GTEST_HAS_TYPED_TEST - typed tests
+// GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+// GTEST_IS_THREADSAFE - Google Test is thread-safe.
+// GOOGLETEST_CM0007 DO NOT DELETE
+// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with
+// GTEST_HAS_POSIX_RE (see above) which users can
+// define themselves.
+// GTEST_USES_SIMPLE_RE - our own simple regex is used;
+// the above RE\b(s) are mutually exclusive.
+
+// Misc public macros
+// ------------------
+//
+// GTEST_FLAG(flag_name) - references the variable corresponding to
+// the given Google Test flag.
+
+// Internal utilities
+// ------------------
+//
+// The following macros and utilities are for Google Test's INTERNAL
+// use only. Code outside Google Test MUST NOT USE THEM DIRECTLY.
+//
+// Macros for basic C++ coding:
+// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
+// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a
+// variable don't have to be used.
+// GTEST_DISALLOW_ASSIGN_ - disables copy operator=.
+// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
+// GTEST_DISALLOW_MOVE_ASSIGN_ - disables move operator=.
+// GTEST_DISALLOW_MOVE_AND_ASSIGN_ - disables move ctor and operator=.
+// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used.
+// GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is
+// suppressed (constant conditional).
+// GTEST_INTENTIONAL_CONST_COND_POP_ - finish code section where MSVC C4127
+// is suppressed.
+// GTEST_INTERNAL_HAS_ANY - for enabling UniversalPrinter<std::any> or
+// UniversalPrinter<absl::any> specializations.
+// GTEST_INTERNAL_HAS_OPTIONAL - for enabling UniversalPrinter<std::optional>
+// or
+// UniversalPrinter<absl::optional>
+// specializations.
+// GTEST_INTERNAL_HAS_STRING_VIEW - for enabling Matcher<std::string_view> or
+// Matcher<absl::string_view>
+// specializations.
+// GTEST_INTERNAL_HAS_VARIANT - for enabling UniversalPrinter<std::variant> or
+// UniversalPrinter<absl::variant>
+// specializations.
+//
+// Synchronization:
+// Mutex, MutexLock, ThreadLocal, GetThreadCount()
+// - synchronization primitives.
+//
+// Regular expressions:
+// RE - a simple regular expression class using the POSIX
+// Extended Regular Expression syntax on UNIX-like platforms
+// GOOGLETEST_CM0008 DO NOT DELETE
+// or a reduced regular exception syntax on other
+// platforms, including Windows.
+// Logging:
+// GTEST_LOG_() - logs messages at the specified severity level.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+//
+// Stdout and stderr capturing:
+// CaptureStdout() - starts capturing stdout.
+// GetCapturedStdout() - stops capturing stdout and returns the captured
+// string.
+// CaptureStderr() - starts capturing stderr.
+// GetCapturedStderr() - stops capturing stderr and returns the captured
+// string.
+//
+// Integer types:
+// TypeWithSize - maps an integer to a int type.
+// TimeInMillis - integers of known sizes.
+// BiggestInt - the biggest signed integer type.
+//
+// Command-line utilities:
+// GTEST_DECLARE_*() - declares a flag.
+// GTEST_DEFINE_*() - defines a flag.
+// GetInjectableArgvs() - returns the command line as a vector of strings.
+//
+// Environment variable utilities:
+// GetEnv() - gets the value of an environment variable.
+// BoolFromGTestEnv() - parses a bool environment variable.
+// Int32FromGTestEnv() - parses an int32_t environment variable.
+// StringFromGTestEnv() - parses a string environment variable.
+//
+// Deprecation warnings:
+// GTEST_INTERNAL_DEPRECATED(message) - attribute marking a function as
+// deprecated; calling a marked function
+// should generate a compiler warning
+
+#include <ctype.h> // for isspace, etc
+#include <stddef.h> // for ptrdiff_t
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cerrno>
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+
+#ifndef _WIN32_WCE
+# include <sys/types.h>
+# include <sys/stat.h>
+#endif // !_WIN32_WCE
+
+#if defined __APPLE__
+# include <AvailabilityMacros.h>
+# include <TargetConditionals.h>
+#endif
+
+#include <iostream> // NOLINT
+#include <locale>
+#include <memory>
+#include <string> // NOLINT
+#include <tuple>
+#include <vector> // NOLINT
+
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Injection point for custom user configurations. See README for details
+//
+// ** Custom implementation starts here **
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This header file defines the GTEST_OS_* macro.
+// It is separate from gtest-port.h so that custom/gtest-port.h can include it.
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+
+// Determines the platform on which Google Test is compiled.
+#ifdef __CYGWIN__
+# define GTEST_OS_CYGWIN 1
+# elif defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)
+# define GTEST_OS_WINDOWS_MINGW 1
+# define GTEST_OS_WINDOWS 1
+#elif defined _WIN32
+# define GTEST_OS_WINDOWS 1
+# ifdef _WIN32_WCE
+# define GTEST_OS_WINDOWS_MOBILE 1
+# elif defined(WINAPI_FAMILY)
+# include <winapifamily.h>
+# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP)
+# define GTEST_OS_WINDOWS_PHONE 1
+# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+# define GTEST_OS_WINDOWS_RT 1
+# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_TV_TITLE)
+# define GTEST_OS_WINDOWS_PHONE 1
+# define GTEST_OS_WINDOWS_TV_TITLE 1
+# else
+ // WINAPI_FAMILY defined but no known partition matched.
+ // Default to desktop.
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# endif
+# else
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# endif // _WIN32_WCE
+#elif defined __OS2__
+# define GTEST_OS_OS2 1
+#elif defined __APPLE__
+# define GTEST_OS_MAC 1
+# include <TargetConditionals.h>
+# if TARGET_OS_IPHONE
+# define GTEST_OS_IOS 1
+# endif
+#elif defined __DragonFly__
+# define GTEST_OS_DRAGONFLY 1
+#elif defined __FreeBSD__
+# define GTEST_OS_FREEBSD 1
+#elif defined __Fuchsia__
+# define GTEST_OS_FUCHSIA 1
+#elif defined(__GLIBC__) && defined(__FreeBSD_kernel__)
+# define GTEST_OS_GNU_KFREEBSD 1
+#elif defined __linux__
+# define GTEST_OS_LINUX 1
+# if defined __ANDROID__
+# define GTEST_OS_LINUX_ANDROID 1
+# endif
+#elif defined __MVS__
+# define GTEST_OS_ZOS 1
+#elif defined(__sun) && defined(__SVR4)
+# define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+# define GTEST_OS_AIX 1
+#elif defined(__hpux)
+# define GTEST_OS_HPUX 1
+#elif defined __native_client__
+# define GTEST_OS_NACL 1
+#elif defined __NetBSD__
+# define GTEST_OS_NETBSD 1
+#elif defined __OpenBSD__
+# define GTEST_OS_OPENBSD 1
+#elif defined __QNX__
+# define GTEST_OS_QNX 1
+#elif defined(__HAIKU__)
+#define GTEST_OS_HAIKU 1
+#elif defined ESP8266
+#define GTEST_OS_ESP8266 1
+#elif defined ESP32
+#define GTEST_OS_ESP32 1
+#elif defined(__XTENSA__)
+#define GTEST_OS_XTENSA 1
+#endif // __CYGWIN__
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+
+#if !defined(GTEST_DEV_EMAIL_)
+# define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+# define GTEST_FLAG_PREFIX_ "gtest_"
+# define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+# define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+# define GTEST_NAME_ "Google Test"
+# define GTEST_PROJECT_URL_ "https://github.com/google/googletest/"
+#endif // !defined(GTEST_DEV_EMAIL_)
+
+#if !defined(GTEST_INIT_GOOGLE_TEST_NAME_)
+# define GTEST_INIT_GOOGLE_TEST_NAME_ "testing::InitGoogleTest"
+#endif // !defined(GTEST_INIT_GOOGLE_TEST_NAME_)
+
+// Determines the version of gcc that is used to compile this.
+#ifdef __GNUC__
+// 40302 means version 4.3.2.
+# define GTEST_GCC_VER_ \
+ (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif // __GNUC__
+
+// Macros for disabling Microsoft Visual C++ warnings.
+//
+// GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 4385)
+// /* code that triggers warnings C4800 and C4385 */
+// GTEST_DISABLE_MSC_WARNINGS_POP_()
+#if defined(_MSC_VER)
+# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \
+ __pragma(warning(push)) \
+ __pragma(warning(disable: warnings))
+# define GTEST_DISABLE_MSC_WARNINGS_POP_() \
+ __pragma(warning(pop))
+#else
+// Not all compilers are MSVC
+# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings)
+# define GTEST_DISABLE_MSC_WARNINGS_POP_()
+#endif
+
+// Clang on Windows does not understand MSVC's pragma warning.
+// We need clang-specific way to disable function deprecation warning.
+#ifdef __clang__
+# define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") \
+ _Pragma("clang diagnostic ignored \"-Wdeprecated-implementations\"")
+#define GTEST_DISABLE_MSC_DEPRECATED_POP_() \
+ _Pragma("clang diagnostic pop")
+#else
+# define GTEST_DISABLE_MSC_DEPRECATED_PUSH_() \
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)
+# define GTEST_DISABLE_MSC_DEPRECATED_POP_() \
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+#endif
+
+// Brings in definitions for functions used in the testing::internal::posix
+// namespace (read, write, close, chdir, isatty, stat). We do not currently
+// use them on Windows Mobile.
+#if GTEST_OS_WINDOWS
+# if !GTEST_OS_WINDOWS_MOBILE
+# include <direct.h>
+# include <io.h>
+# endif
+// In order to avoid having to include <windows.h>, use forward declaration
+#if GTEST_OS_WINDOWS_MINGW && !defined(__MINGW64_VERSION_MAJOR)
+// MinGW defined _CRITICAL_SECTION and _RTL_CRITICAL_SECTION as two
+// separate (equivalent) structs, instead of using typedef
+typedef struct _CRITICAL_SECTION GTEST_CRITICAL_SECTION;
+#else
+// Assume CRITICAL_SECTION is a typedef of _RTL_CRITICAL_SECTION.
+// This assumption is verified by
+// WindowsTypesTest.CRITICAL_SECTIONIs_RTL_CRITICAL_SECTION.
+typedef struct _RTL_CRITICAL_SECTION GTEST_CRITICAL_SECTION;
+#endif
+#elif GTEST_OS_XTENSA
+#include <unistd.h>
+// Xtensa toolchains define strcasecmp in the string.h header instead of
+// strings.h. string.h is already included.
+#else
+// This assumes that non-Windows OSes provide unistd.h. For OSes where this
+// is not the case, we need to include headers that provide the functions
+// mentioned above.
+# include <unistd.h>
+# include <strings.h>
+#endif // GTEST_OS_WINDOWS
+
+#if GTEST_OS_LINUX_ANDROID
+// Used to define __ANDROID_API__ matching the target NDK API level.
+# include <android/api-level.h> // NOLINT
+#endif
+
+// Defines this to true if and only if Google Test can use POSIX regular
+// expressions.
+#ifndef GTEST_HAS_POSIX_RE
+# if GTEST_OS_LINUX_ANDROID
+// On Android, <regex.h> is only available starting with Gingerbread.
+# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9)
+# else
+#define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS && !GTEST_OS_XTENSA)
+# endif
+#endif
+
+#if GTEST_USES_PCRE
+// The appropriate headers have already been included.
+
+#elif GTEST_HAS_POSIX_RE
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise. We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+# include <regex.h> // NOLINT
+
+# define GTEST_USES_POSIX_RE 1
+
+#elif GTEST_OS_WINDOWS
+
+// <regex.h> is not available on Windows. Use our own simple regex
+// implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#else
+
+// <regex.h> may not be available on this platform. Use our own
+// simple regex implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#endif // GTEST_USES_PCRE
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+# if defined(_MSC_VER) && defined(_CPPUNWIND)
+// MSVC defines _CPPUNWIND to 1 if and only if exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__BORLANDC__)
+// C++Builder's implementation of the STL uses the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
+// Assumes that exceptions are enabled by default.
+# ifndef _HAS_EXCEPTIONS
+# define _HAS_EXCEPTIONS 1
+# endif // _HAS_EXCEPTIONS
+# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
+# elif defined(__clang__)
+// clang defines __EXCEPTIONS if and only if exceptions are enabled before clang
+// 220714, but if and only if cleanups are enabled after that. In Obj-C++ files,
+// there can be cleanups for ObjC exceptions which also need cleanups, even if
+// C++ exceptions are disabled. clang has __has_feature(cxx_exceptions) which
+// checks for C++ exceptions starting at clang r206352, but which checked for
+// cleanups prior to that. To reliably check for C++ exception availability with
+// clang, check for
+// __EXCEPTIONS && __has_feature(cxx_exceptions).
+# define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions))
+# elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 if and only if exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions. However, there is no compile-time way of
+// detecting whether they are enabled or not. Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 if and only if exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__HP_aCC)
+// Exception handling is in effect by default in HP aCC compiler. It has to
+// be turned of by +noeh compiler option if desired.
+# define GTEST_HAS_EXCEPTIONS 1
+# else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
+# define GTEST_HAS_EXCEPTIONS 0
+# endif // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif // GTEST_HAS_EXCEPTIONS
+
+#ifndef GTEST_HAS_STD_WSTRING
+// The user didn't tell us whether ::std::wstring is available, so we need
+// to figure it out.
+// Cygwin 1.7 and below doesn't support ::std::wstring.
+// Solaris' libc++ doesn't support it either. Android has
+// no support for it at least as recent as Froyo (2.2).
+#define GTEST_HAS_STD_WSTRING \
+ (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+ GTEST_OS_HAIKU || GTEST_OS_ESP32 || GTEST_OS_ESP8266 || GTEST_OS_XTENSA))
+
+#endif // GTEST_HAS_STD_WSTRING
+
+// Determines whether RTTI is available.
+#ifndef GTEST_HAS_RTTI
+// The user didn't tell us whether RTTI is enabled, so we need to
+// figure it out.
+
+# ifdef _MSC_VER
+
+#ifdef _CPPRTTI // MSVC defines this macro if and only if RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+// Starting with version 4.3.2, gcc defines __GXX_RTTI if and only if RTTI is
+// enabled.
+# elif defined(__GNUC__)
+
+# ifdef __GXX_RTTI
+// When building against STLport with the Android NDK and with
+// -frtti -fno-exceptions, the build fails at link time with undefined
+// references to __cxa_bad_typeid. Note sure if STL or toolchain bug,
+// so disable RTTI when detected.
+# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \
+ !defined(__EXCEPTIONS)
+# define GTEST_HAS_RTTI 0
+# else
+# define GTEST_HAS_RTTI 1
+# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS
+# else
+# define GTEST_HAS_RTTI 0
+# endif // __GXX_RTTI
+
+// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends
+// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the
+// first version with C++ support.
+# elif defined(__clang__)
+
+# define GTEST_HAS_RTTI __has_feature(cxx_rtti)
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+# ifdef __RTTI_ALL__
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+# else
+
+// For all other compilers, we assume RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+
+# endif // _MSC_VER
+
+#endif // GTEST_HAS_RTTI
+
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+# include <typeinfo>
+#endif
+
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we make reasonable assumptions about
+// which platforms have pthreads support.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+#define GTEST_HAS_PTHREAD \
+ (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX || GTEST_OS_QNX || \
+ GTEST_OS_FREEBSD || GTEST_OS_NACL || GTEST_OS_NETBSD || GTEST_OS_FUCHSIA || \
+ GTEST_OS_DRAGONFLY || GTEST_OS_GNU_KFREEBSD || GTEST_OS_OPENBSD || \
+ GTEST_OS_HAIKU)
+#endif // GTEST_HAS_PTHREAD
+
+#if GTEST_HAS_PTHREAD
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+# include <pthread.h> // NOLINT
+
+// For timespec and nanosleep, used below.
+# include <time.h> // NOLINT
+#endif
+
+// Determines whether clone(2) is supported.
+// Usually it will only be available on Linux, excluding
+// Linux on the Itanium architecture.
+// Also see http://linux.die.net/man/2/clone.
+#ifndef GTEST_HAS_CLONE
+// The user didn't tell us, so we need to figure it out.
+
+# if GTEST_OS_LINUX && !defined(__ia64__)
+# if GTEST_OS_LINUX_ANDROID
+// On Android, clone() became available at different API levels for each 32-bit
+// architecture.
+# if defined(__LP64__) || \
+ (defined(__arm__) && __ANDROID_API__ >= 9) || \
+ (defined(__mips__) && __ANDROID_API__ >= 12) || \
+ (defined(__i386__) && __ANDROID_API__ >= 17)
+# define GTEST_HAS_CLONE 1
+# else
+# define GTEST_HAS_CLONE 0
+# endif
+# else
+# define GTEST_HAS_CLONE 1
+# endif
+# else
+# define GTEST_HAS_CLONE 0
+# endif // GTEST_OS_LINUX && !defined(__ia64__)
+
+#endif // GTEST_HAS_CLONE
+
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#ifndef GTEST_HAS_STREAM_REDIRECTION
+// By default, we assume that stream redirection is supported on all
+// platforms except known mobile ones.
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || \
+ GTEST_OS_WINDOWS_RT || GTEST_OS_ESP8266 || GTEST_OS_XTENSA
+# define GTEST_HAS_STREAM_REDIRECTION 0
+# else
+# define GTEST_HAS_STREAM_REDIRECTION 1
+# endif // !GTEST_OS_WINDOWS_MOBILE
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+// Determines whether to support death tests.
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+ (GTEST_OS_MAC && !GTEST_OS_IOS) || \
+ (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER) || GTEST_OS_WINDOWS_MINGW || \
+ GTEST_OS_AIX || GTEST_OS_HPUX || GTEST_OS_OPENBSD || GTEST_OS_QNX || \
+ GTEST_OS_FREEBSD || GTEST_OS_NETBSD || GTEST_OS_FUCHSIA || \
+ GTEST_OS_DRAGONFLY || GTEST_OS_GNU_KFREEBSD || GTEST_OS_HAIKU)
+# define GTEST_HAS_DEATH_TEST 1
+#endif
+
+// Determines whether to support type-driven tests.
+
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, IBM Visual Age, and HP aCC support.
+#if defined(__GNUC__) || defined(_MSC_VER) || defined(__SUNPRO_CC) || \
+ defined(__IBMCPP__) || defined(__HP_aCC)
+# define GTEST_HAS_TYPED_TEST 1
+# define GTEST_HAS_TYPED_TEST_P 1
+#endif
+
+// Determines whether the system compiler uses UTF-16 for encoding wide strings.
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+ (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_AIX || GTEST_OS_OS2)
+
+// Determines whether test results can be streamed to a socket.
+#if GTEST_OS_LINUX || GTEST_OS_GNU_KFREEBSD || GTEST_OS_DRAGONFLY || \
+ GTEST_OS_FREEBSD || GTEST_OS_NETBSD || GTEST_OS_OPENBSD
+# define GTEST_CAN_STREAM_RESULTS_ 1
+#endif
+
+// Defines some utility macros.
+
+// The GNU compiler emits a warning if nested "if" statements are followed by
+// an "else" statement and braces are not used to explicitly disambiguate the
+// "else" binding. This leads to problems with code like:
+//
+// if (gate)
+// ASSERT_*(condition) << "Some message";
+//
+// The "switch (0) case 0:" idiom is used to suppress this.
+#ifdef __INTEL_COMPILER
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_
+#else
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT
+#endif
+
+// Use this annotation at the end of a struct/class definition to
+// prevent the compiler from optimizing away instances that are never
+// used. This is useful when all interesting logic happens inside the
+// c'tor and / or d'tor. Example:
+//
+// struct Foo {
+// Foo() { ... }
+// } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+#elif defined(__clang__)
+# if __has_attribute(unused)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+# endif
+#endif
+#ifndef GTEST_ATTRIBUTE_UNUSED_
+# define GTEST_ATTRIBUTE_UNUSED_
+#endif
+
+// Use this annotation before a function that takes a printf format string.
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(COMPILER_ICC)
+# if defined(__MINGW_PRINTF_FORMAT)
+// MinGW has two different printf implementations. Ensure the format macro
+// matches the selected implementation. See
+// https://sourceforge.net/p/mingw-w64/wiki2/gnu%20printf/.
+# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \
+ __attribute__((__format__(__MINGW_PRINTF_FORMAT, string_index, \
+ first_to_check)))
+# else
+# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check) \
+ __attribute__((__format__(__printf__, string_index, first_to_check)))
+# endif
+#else
+# define GTEST_ATTRIBUTE_PRINTF_(string_index, first_to_check)
+#endif
+
+
+// A macro to disallow copy operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type) \
+ type& operator=(type const &) = delete
+
+// A macro to disallow copy constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type) \
+ type(type const&) = delete; \
+ type& operator=(type const&) = delete
+
+// A macro to disallow move operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_MOVE_ASSIGN_(type) \
+ type& operator=(type &&) noexcept = delete
+
+// A macro to disallow move constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_MOVE_AND_ASSIGN_(type) \
+ type(type&&) noexcept = delete; \
+ type& operator=(type&&) noexcept = delete
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro. The macro should be used on function declarations
+// following the argument list:
+//
+// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
+#else
+# define GTEST_MUST_USE_RESULT_
+#endif // __GNUC__ && !COMPILER_ICC
+
+// MS C++ compiler emits warning when a conditional expression is compile time
+// constant. In some contexts this warning is false positive and needs to be
+// suppressed. Use the following two macros in such cases:
+//
+// GTEST_INTENTIONAL_CONST_COND_PUSH_()
+// while (true) {
+// GTEST_INTENTIONAL_CONST_COND_POP_()
+// }
+# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127)
+# define GTEST_INTENTIONAL_CONST_COND_POP_() \
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling. This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+# define GTEST_HAS_SEH 1
+# else
+// Assume no SEH.
+# define GTEST_HAS_SEH 0
+# endif
+
+#endif // GTEST_HAS_SEH
+
+#ifndef GTEST_IS_THREADSAFE
+
+#define GTEST_IS_THREADSAFE \
+ (GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ || \
+ (GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT) || \
+ GTEST_HAS_PTHREAD)
+
+#endif // GTEST_IS_THREADSAFE
+
+// GTEST_API_ qualifies all symbols that must be exported. The definitions below
+// are guarded by #ifndef to give embedders a chance to define GTEST_API_ in
+// gtest/internal/custom/gtest-port.h
+#ifndef GTEST_API_
+
+#ifdef _MSC_VER
+# if GTEST_LINKED_AS_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllimport)
+# elif GTEST_CREATE_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllexport)
+# endif
+#elif __GNUC__ >= 4 || defined(__clang__)
+# define GTEST_API_ __attribute__((visibility ("default")))
+#endif // _MSC_VER
+
+#endif // GTEST_API_
+
+#ifndef GTEST_API_
+# define GTEST_API_
+#endif // GTEST_API_
+
+#ifndef GTEST_DEFAULT_DEATH_TEST_STYLE
+# define GTEST_DEFAULT_DEATH_TEST_STYLE "fast"
+#endif // GTEST_DEFAULT_DEATH_TEST_STYLE
+
+#ifdef __GNUC__
+// Ask the compiler to never inline a given function.
+# define GTEST_NO_INLINE_ __attribute__((noinline))
+#else
+# define GTEST_NO_INLINE_
+#endif
+
+// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project.
+#if !defined(GTEST_HAS_CXXABI_H_)
+# if defined(__GLIBCXX__) || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER))
+# define GTEST_HAS_CXXABI_H_ 1
+# else
+# define GTEST_HAS_CXXABI_H_ 0
+# endif
+#endif
+
+// A function level attribute to disable checking for use of uninitialized
+// memory when built with MemorySanitizer.
+#if defined(__clang__)
+# if __has_feature(memory_sanitizer)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \
+ __attribute__((no_sanitize_memory))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+# endif // __has_feature(memory_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+#endif // __clang__
+
+// A function level attribute to disable AddressSanitizer instrumentation.
+#if defined(__clang__)
+# if __has_feature(address_sanitizer)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \
+ __attribute__((no_sanitize_address))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+# endif // __has_feature(address_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+#endif // __clang__
+
+// A function level attribute to disable HWAddressSanitizer instrumentation.
+#if defined(__clang__)
+# if __has_feature(hwaddress_sanitizer)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_ \
+ __attribute__((no_sanitize("hwaddress")))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_
+# endif // __has_feature(hwaddress_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_HWADDRESS_
+#endif // __clang__
+
+// A function level attribute to disable ThreadSanitizer instrumentation.
+#if defined(__clang__)
+# if __has_feature(thread_sanitizer)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \
+ __attribute__((no_sanitize_thread))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+# endif // __has_feature(thread_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+#endif // __clang__
+
+namespace testing {
+
+class Message;
+
+// Legacy imports for backwards compatibility.
+// New code should use std:: names directly.
+using std::get;
+using std::make_tuple;
+using std::tuple;
+using std::tuple_element;
+using std::tuple_size;
+
+namespace internal {
+
+// A secret type that Google Test users don't know about. It has no
+// definition on purpose. Therefore it's impossible to create a
+// Secret object, which is what we want.
+class Secret;
+
+// The GTEST_COMPILE_ASSERT_ is a legacy macro used to verify that a compile
+// time expression is true (in new code, use static_assert instead). For
+// example, you could use it to verify the size of a static array:
+//
+// GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES,
+// names_incorrect_size);
+//
+// The second argument to the macro must be a valid C++ identifier. If the
+// expression is false, compiler will issue an error containing this identifier.
+#define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg)
+
+// A helper for suppressing warnings on constant condition. It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
+
+// Defines RE.
+
+#if GTEST_USES_PCRE
+// if used, PCRE is injected by custom/gtest-port.h
+#elif GTEST_USES_POSIX_RE || GTEST_USES_SIMPLE_RE
+
+// A simple C++ wrapper for <regex.h>. It uses the POSIX Extended
+// Regular Expression syntax.
+class GTEST_API_ RE {
+ public:
+ // A copy constructor is required by the Standard to initialize object
+ // references from r-values.
+ RE(const RE& other) { Init(other.pattern()); }
+
+ // Constructs an RE from a string.
+ RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT
+
+ RE(const char* regex) { Init(regex); } // NOLINT
+ ~RE();
+
+ // Returns the string representation of the regex.
+ const char* pattern() const { return pattern_; }
+
+ // FullMatch(str, re) returns true if and only if regular expression re
+ // matches the entire str.
+ // PartialMatch(str, re) returns true if and only if regular expression re
+ // matches a substring of str (including str itself).
+ static bool FullMatch(const ::std::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::std::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+ static bool FullMatch(const char* str, const RE& re);
+ static bool PartialMatch(const char* str, const RE& re);
+
+ private:
+ void Init(const char* regex);
+ const char* pattern_;
+ bool is_valid_;
+
+# if GTEST_USES_POSIX_RE
+
+ regex_t full_regex_; // For FullMatch().
+ regex_t partial_regex_; // For PartialMatch().
+
+# else // GTEST_USES_SIMPLE_RE
+
+ const char* full_pattern_; // For FullMatch();
+
+# endif
+};
+
+#endif // GTEST_USES_PCRE
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line);
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,
+ int line);
+
+// Defines logging utilities:
+// GTEST_LOG_(severity) - logs messages at the specified severity level. The
+// message itself is streamed into the macro.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+
+enum GTestLogSeverity {
+ GTEST_INFO,
+ GTEST_WARNING,
+ GTEST_ERROR,
+ GTEST_FATAL
+};
+
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+ GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+ // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+ ~GTestLog();
+
+ ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+ const GTestLogSeverity severity_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#if !defined(GTEST_LOG_)
+
+# define GTEST_LOG_(severity) \
+ ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+ __FILE__, __LINE__).GetStream()
+
+inline void LogToStderr() {}
+inline void FlushInfoLog() { fflush(nullptr); }
+
+#endif // !defined(GTEST_LOG_)
+
+#if !defined(GTEST_CHECK_)
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+// Synopsys:
+// GTEST_CHECK_(boolean_condition);
+// or
+// GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+// This checks the condition and if the condition is not satisfied
+// it prints message about the condition violation, including the
+// condition itself, plus additional message streamed into it, if any,
+// and then it aborts the program. It aborts the program irrespective of
+// whether it is built in the debug mode or not.
+# define GTEST_CHECK_(condition) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::IsTrue(condition)) \
+ ; \
+ else \
+ GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+#endif // !defined(GTEST_CHECK_)
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success). Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+ if (const int gtest_error = (posix_call)) \
+ GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+ << gtest_error
+
+// Transforms "T" into "const T&" according to standard reference collapsing
+// rules (this is only needed as a backport for C++98 compilers that do not
+// support reference collapsing). Specifically, it transforms:
+//
+// char ==> const char&
+// const char ==> const char&
+// char& ==> char&
+// const char& ==> const char&
+//
+// Note that the non-const reference will not have "const" added. This is
+// standard, and necessary so that "T" can always bind to "const T&".
+template <typename T>
+struct ConstRef { typedef const T& type; };
+template <typename T>
+struct ConstRef<T&> { typedef T& type; };
+
+// The argument T must depend on some template parameters.
+#define GTEST_REFERENCE_TO_CONST_(T) \
+ typename ::testing::internal::ConstRef<T>::type
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Use ImplicitCast_ as a safe version of static_cast for upcasting in
+// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a
+// const Foo*). When you use ImplicitCast_, the compiler checks that
+// the cast is safe. Such explicit ImplicitCast_s are necessary in
+// surprisingly many situations where C++ demands an exact type match
+// instead of an argument type convertable to a target type.
+//
+// The syntax for using ImplicitCast_ is the same as for static_cast:
+//
+// ImplicitCast_<ToType>(expr)
+//
+// ImplicitCast_ would have been part of the C++ standard library,
+// but the proposal was submitted too late. It will probably make
+// its way into the language in the future.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., implicit_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To>
+inline To ImplicitCast_(To x) { return x; }
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts
+// always succeed. When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo? It
+// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus,
+// when you downcast, you should use this macro. In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not). In normal mode, we do the efficient static_cast<>
+// instead. Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+// This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+// if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., down_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To, typename From> // use like this: DownCast_<T*>(foo);
+inline To DownCast_(From* f) { // so we only accept pointers
+ // Ensures that To is a sub-type of From *. This test is here only
+ // for compile-time type checking, and has no overhead in an
+ // optimized build at run-time, as it will be optimized away
+ // completely.
+ GTEST_INTENTIONAL_CONST_COND_PUSH_()
+ if (false) {
+ GTEST_INTENTIONAL_CONST_COND_POP_()
+ const To to = nullptr;
+ ::testing::internal::ImplicitCast_<From*>(to);
+ }
+
+#if GTEST_HAS_RTTI
+ // RTTI: debug mode only!
+ GTEST_CHECK_(f == nullptr || dynamic_cast<To>(f) != nullptr);
+#endif
+ return static_cast<To>(f);
+}
+
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+ GTEST_CHECK_(typeid(*base) == typeid(Derived));
+#endif
+
+#if GTEST_HAS_DOWNCAST_
+ return ::down_cast<Derived*>(base);
+#elif GTEST_HAS_RTTI
+ return dynamic_cast<Derived*>(base); // NOLINT
+#else
+ return static_cast<Derived*>(base); // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Defines the stderr capturer:
+// CaptureStdout - starts capturing stdout.
+// GetCapturedStdout - stops capturing stdout and returns the captured string.
+// CaptureStderr - starts capturing stderr.
+// GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ std::string GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ std::string GetCapturedStderr();
+
+#endif // GTEST_HAS_STREAM_REDIRECTION
+// Returns the size (in bytes) of a file.
+GTEST_API_ size_t GetFileSize(FILE* file);
+
+// Reads the entire content of a file as a string.
+GTEST_API_ std::string ReadEntireFile(FILE* file);
+
+// All command line arguments.
+GTEST_API_ std::vector<std::string> GetArgvs();
+
+#if GTEST_HAS_DEATH_TEST
+
+std::vector<std::string> GetInjectableArgvs();
+// Deprecated: pass the args vector by value instead.
+void SetInjectableArgvs(const std::vector<std::string>* new_argvs);
+void SetInjectableArgvs(const std::vector<std::string>& new_argvs);
+void ClearInjectableArgvs();
+
+#endif // GTEST_HAS_DEATH_TEST
+
+// Defines synchronization primitives.
+#if GTEST_IS_THREADSAFE
+# if GTEST_HAS_PTHREAD
+// Sleeps for (roughly) n milliseconds. This function is only for testing
+// Google Test's own constructs. Don't use it in user tests, either
+// directly or indirectly.
+inline void SleepMilliseconds(int n) {
+ const timespec time = {
+ 0, // 0 seconds.
+ n * 1000L * 1000L, // And n ms.
+ };
+ nanosleep(&time, nullptr);
+}
+# endif // GTEST_HAS_PTHREAD
+
+# if GTEST_HAS_NOTIFICATION_
+// Notification has already been imported into the namespace.
+// Nothing to do here.
+
+# elif GTEST_HAS_PTHREAD
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+ Notification() : notified_(false) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, nullptr));
+ }
+ ~Notification() {
+ pthread_mutex_destroy(&mutex_);
+ }
+
+ // Notifies all threads created with this notification to start. Must
+ // be called from the controller thread.
+ void Notify() {
+ pthread_mutex_lock(&mutex_);
+ notified_ = true;
+ pthread_mutex_unlock(&mutex_);
+ }
+
+ // Blocks until the controller thread notifies. Must be called from a test
+ // thread.
+ void WaitForNotification() {
+ for (;;) {
+ pthread_mutex_lock(&mutex_);
+ const bool notified = notified_;
+ pthread_mutex_unlock(&mutex_);
+ if (notified)
+ break;
+ SleepMilliseconds(10);
+ }
+ }
+
+ private:
+ pthread_mutex_t mutex_;
+ bool notified_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+GTEST_API_ void SleepMilliseconds(int n);
+
+// Provides leak-safe Windows kernel handle ownership.
+// Used in death tests and in threading support.
+class GTEST_API_ AutoHandle {
+ public:
+ // Assume that Win32 HANDLE type is equivalent to void*. Doing so allows us to
+ // avoid including <windows.h> in this header file. Including <windows.h> is
+ // undesirable because it defines a lot of symbols and macros that tend to
+ // conflict with client code. This assumption is verified by
+ // WindowsTypesTest.HANDLEIsVoidStar.
+ typedef void* Handle;
+ AutoHandle();
+ explicit AutoHandle(Handle handle);
+
+ ~AutoHandle();
+
+ Handle Get() const;
+ void Reset();
+ void Reset(Handle handle);
+
+ private:
+ // Returns true if and only if the handle is a valid handle object that can be
+ // closed.
+ bool IsCloseable() const;
+
+ Handle handle_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
+};
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class GTEST_API_ Notification {
+ public:
+ Notification();
+ void Notify();
+ void WaitForNotification();
+
+ private:
+ AutoHandle event_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+# endif // GTEST_HAS_NOTIFICATION_
+
+// On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD
+// defined, but we don't want to use MinGW's pthreads implementation, which
+// has conformance problems with some versions of the POSIX standard.
+# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+ virtual ~ThreadWithParamBase() {}
+ virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical. Some compilers (for
+// example, SunStudio) treat them as different types. Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+ static_cast<ThreadWithParamBase*>(thread)->Run();
+ return nullptr;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+// void ThreadFunc(int param) { /* Do things with param */ }
+// Notification thread_can_start;
+// ...
+// // The thread_can_start parameter is optional; you can supply NULL.
+// ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+// thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void UserThreadFunc(T);
+
+ ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
+ : func_(func),
+ param_(param),
+ thread_can_start_(thread_can_start),
+ finished_(false) {
+ ThreadWithParamBase* const base = this;
+ // The thread can be created only after all fields except thread_
+ // have been initialized.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_create(&thread_, nullptr, &ThreadFuncWithCLinkage, base));
+ }
+ ~ThreadWithParam() override { Join(); }
+
+ void Join() {
+ if (!finished_) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, nullptr));
+ finished_ = true;
+ }
+ }
+
+ void Run() override {
+ if (thread_can_start_ != nullptr) thread_can_start_->WaitForNotification();
+ func_(param_);
+ }
+
+ private:
+ UserThreadFunc* const func_; // User-supplied thread function.
+ const T param_; // User-supplied parameter to the thread function.
+ // When non-NULL, used to block execution until the controller thread
+ // notifies.
+ Notification* const thread_can_start_;
+ bool finished_; // true if and only if we know that the thread function has
+ // finished.
+ pthread_t thread_; // The native thread object.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+# endif // !GTEST_OS_WINDOWS && GTEST_HAS_PTHREAD ||
+ // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+
+# if GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+// Mutex and ThreadLocal have already been imported into the namespace.
+// Nothing to do here.
+
+# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+// Mutex implements mutex on Windows platforms. It is used in conjunction
+// with class MutexLock:
+//
+// Mutex mutex;
+// ...
+// MutexLock lock(&mutex); // Acquires the mutex and releases it at the
+// // end of the current scope.
+//
+// A static Mutex *must* be defined or declared using one of the following
+// macros:
+// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// (A non-static Mutex is defined/declared in the usual way).
+class GTEST_API_ Mutex {
+ public:
+ enum MutexType { kStatic = 0, kDynamic = 1 };
+ // We rely on kStaticMutex being 0 as it is to what the linker initializes
+ // type_ in static mutexes. critical_section_ will be initialized lazily
+ // in ThreadSafeLazyInit().
+ enum StaticConstructorSelector { kStaticMutex = 0 };
+
+ // This constructor intentionally does nothing. It relies on type_ being
+ // statically initialized to 0 (effectively setting it to kStatic) and on
+ // ThreadSafeLazyInit() to lazily initialize the rest of the members.
+ explicit Mutex(StaticConstructorSelector /*dummy*/) {}
+
+ Mutex();
+ ~Mutex();
+
+ void Lock();
+
+ void Unlock();
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld();
+
+ private:
+ // Initializes owner_thread_id_ and critical_section_ in static mutexes.
+ void ThreadSafeLazyInit();
+
+ // Per https://blogs.msdn.microsoft.com/oldnewthing/20040223-00/?p=40503,
+ // we assume that 0 is an invalid value for thread IDs.
+ unsigned int owner_thread_id_;
+
+ // For static mutexes, we rely on these members being initialized to zeros
+ // by the linker.
+ MutexType type_;
+ long critical_section_init_phase_; // NOLINT
+ GTEST_CRITICAL_SECTION* critical_section_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex)
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)". Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(Mutex* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ Mutex* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Base class for ValueHolder<T>. Allows a caller to hold and delete a value
+// without knowing its type.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Provides a way for a thread to send notifications to a ThreadLocal
+// regardless of its parameter type.
+class ThreadLocalBase {
+ public:
+ // Creates a new ValueHolder<T> object holding a default value passed to
+ // this ThreadLocal<T>'s constructor and returns it. It is the caller's
+ // responsibility not to call this when the ThreadLocal<T> instance already
+ // has a value on the current thread.
+ virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const = 0;
+
+ protected:
+ ThreadLocalBase() {}
+ virtual ~ThreadLocalBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase);
+};
+
+// Maps a thread to a set of ThreadLocals that have values instantiated on that
+// thread and notifies them when the thread exits. A ThreadLocal instance is
+// expected to persist until all threads it has values on have terminated.
+class GTEST_API_ ThreadLocalRegistry {
+ public:
+ // Registers thread_local_instance as having value on the current thread.
+ // Returns a value that can be used to identify the thread from other threads.
+ static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
+ const ThreadLocalBase* thread_local_instance);
+
+ // Invoked when a ThreadLocal instance is destroyed.
+ static void OnThreadLocalDestroyed(
+ const ThreadLocalBase* thread_local_instance);
+};
+
+class GTEST_API_ ThreadWithParamBase {
+ public:
+ void Join();
+
+ protected:
+ class Runnable {
+ public:
+ virtual ~Runnable() {}
+ virtual void Run() = 0;
+ };
+
+ ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start);
+ virtual ~ThreadWithParamBase();
+
+ private:
+ AutoHandle thread_;
+};
+
+// Helper class for testing Google Test's multi-threading constructs.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void UserThreadFunc(T);
+
+ ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
+ : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) {
+ }
+ virtual ~ThreadWithParam() {}
+
+ private:
+ class RunnableImpl : public Runnable {
+ public:
+ RunnableImpl(UserThreadFunc* func, T param)
+ : func_(func),
+ param_(param) {
+ }
+ virtual ~RunnableImpl() {}
+ virtual void Run() {
+ func_(param_);
+ }
+
+ private:
+ UserThreadFunc* const func_;
+ const T param_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl);
+ };
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// Implements thread-local storage on Windows systems.
+//
+// // Thread 1
+// ThreadLocal<int> tl(100); // 100 is the default value for each thread.
+//
+// // Thread 2
+// tl.set(150); // Changes the value for thread 2 only.
+// EXPECT_EQ(150, tl.get());
+//
+// // Thread 1
+// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value.
+// tl.set(200);
+// EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// The users of a TheadLocal instance have to make sure that all but one
+// threads (including the main one) using that instance have exited before
+// destroying it. Otherwise, the per-thread objects managed for them by the
+// ThreadLocal instance are not guaranteed to be destroyed on all platforms.
+//
+// Google Test only uses global ThreadLocal objects. That means they
+// will die after main() has returned. Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal : public ThreadLocalBase {
+ public:
+ ThreadLocal() : default_factory_(new DefaultValueHolderFactory()) {}
+ explicit ThreadLocal(const T& value)
+ : default_factory_(new InstanceValueHolderFactory(value)) {}
+
+ ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of T. Can be deleted via its base class without the caller
+ // knowing the type of T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ ValueHolder() : value_() {}
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+
+ T* GetOrCreateValue() const {
+ return static_cast<ValueHolder*>(
+ ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer();
+ }
+
+ virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const {
+ return default_factory_->MakeNewHolder();
+ }
+
+ class ValueHolderFactory {
+ public:
+ ValueHolderFactory() {}
+ virtual ~ValueHolderFactory() {}
+ virtual ValueHolder* MakeNewHolder() const = 0;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);
+ };
+
+ class DefaultValueHolderFactory : public ValueHolderFactory {
+ public:
+ DefaultValueHolderFactory() {}
+ ValueHolder* MakeNewHolder() const override { return new ValueHolder(); }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);
+ };
+
+ class InstanceValueHolderFactory : public ValueHolderFactory {
+ public:
+ explicit InstanceValueHolderFactory(const T& value) : value_(value) {}
+ ValueHolder* MakeNewHolder() const override {
+ return new ValueHolder(value_);
+ }
+
+ private:
+ const T value_; // The value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);
+ };
+
+ std::unique_ptr<ValueHolderFactory> default_factory_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# elif GTEST_HAS_PTHREAD
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms.
+class MutexBase {
+ public:
+ // Acquires this mutex.
+ void Lock() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+ owner_ = pthread_self();
+ has_owner_ = true;
+ }
+
+ // Releases this mutex.
+ void Unlock() {
+ // Since the lock is being released the owner_ field should no longer be
+ // considered valid. We don't protect writing to has_owner_ here, as it's
+ // the caller's responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ has_owner_ = false;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+ }
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld() const {
+ GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self()))
+ << "The current thread is not holding the mutex @" << this;
+ }
+
+ // A static mutex may be used before main() is entered. It may even
+ // be used before the dynamic initialization stage. Therefore we
+ // must be able to initialize a static mutex object at link time.
+ // This means MutexBase has to be a POD and its member variables
+ // have to be public.
+ public:
+ pthread_mutex_t mutex_; // The underlying pthread mutex.
+ // has_owner_ indicates whether the owner_ field below contains a valid thread
+ // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All
+ // accesses to the owner_ field should be protected by a check of this field.
+ // An alternative might be to memset() owner_ to all zeros, but there's no
+ // guarantee that a zero'd pthread_t is necessarily invalid or even different
+ // from pthread_self().
+ bool has_owner_;
+ pthread_t owner_; // The thread holding the mutex.
+};
+
+// Forward-declares a static mutex.
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+// The initialization list here does not explicitly initialize each field,
+// instead relying on default initialization for the unspecified fields. In
+// particular, the owner_ field (a pthread_t) is not explicitly initialized.
+// This allows initialization to work whether pthread_t is a scalar or struct.
+// The flag -Wmissing-field-initializers must not be specified for this to work.
+#define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::MutexBase mutex = {PTHREAD_MUTEX_INITIALIZER, false, 0}
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+ Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, nullptr));
+ has_owner_ = false;
+ }
+ ~Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)". Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(MutexBase* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ MutexBase* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage. Therefore it cannot be templatized to access
+// ThreadLocal<T>. Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+ delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+template <typename T>
+class GTEST_API_ ThreadLocal {
+ public:
+ ThreadLocal()
+ : key_(CreateKey()), default_factory_(new DefaultValueHolderFactory()) {}
+ explicit ThreadLocal(const T& value)
+ : key_(CreateKey()),
+ default_factory_(new InstanceValueHolderFactory(value)) {}
+
+ ~ThreadLocal() {
+ // Destroys the managed object for the current thread, if any.
+ DeleteThreadLocalValue(pthread_getspecific(key_));
+
+ // Releases resources associated with the key. This will *not*
+ // delete managed objects for other threads.
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
+ }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of type T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ ValueHolder() : value_() {}
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+ static pthread_key_t CreateKey() {
+ pthread_key_t key;
+ // When a thread exits, DeleteThreadLocalValue() will be called on
+ // the object managed for that thread.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_key_create(&key, &DeleteThreadLocalValue));
+ return key;
+ }
+
+ T* GetOrCreateValue() const {
+ ThreadLocalValueHolderBase* const holder =
+ static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
+ if (holder != nullptr) {
+ return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
+ }
+
+ ValueHolder* const new_holder = default_factory_->MakeNewHolder();
+ ThreadLocalValueHolderBase* const holder_base = new_holder;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
+ return new_holder->pointer();
+ }
+
+ class ValueHolderFactory {
+ public:
+ ValueHolderFactory() {}
+ virtual ~ValueHolderFactory() {}
+ virtual ValueHolder* MakeNewHolder() const = 0;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);
+ };
+
+ class DefaultValueHolderFactory : public ValueHolderFactory {
+ public:
+ DefaultValueHolderFactory() {}
+ ValueHolder* MakeNewHolder() const override { return new ValueHolder(); }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);
+ };
+
+ class InstanceValueHolderFactory : public ValueHolderFactory {
+ public:
+ explicit InstanceValueHolderFactory(const T& value) : value_(value) {}
+ ValueHolder* MakeNewHolder() const override {
+ return new ValueHolder(value_);
+ }
+
+ private:
+ const T value_; // The value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);
+ };
+
+ // A key pthreads uses for looking up per-thread values.
+ const pthread_key_t key_;
+ std::unique_ptr<ValueHolderFactory> default_factory_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# endif // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+
+#else // GTEST_IS_THREADSAFE
+
+// A dummy implementation of synchronization primitives (mutex, lock,
+// and thread-local variable). Necessary for compiling Google Test where
+// mutex is not supported - using Google Test in multiple threads is not
+// supported on such platforms.
+
+class Mutex {
+ public:
+ Mutex() {}
+ void Lock() {}
+ void Unlock() {}
+ void AssertHeld() const {}
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)". Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(Mutex*) {} // NOLINT
+};
+
+typedef GTestMutexLock MutexLock;
+
+template <typename T>
+class GTEST_API_ ThreadLocal {
+ public:
+ ThreadLocal() : value_() {}
+ explicit ThreadLocal(const T& value) : value_(value) {}
+ T* pointer() { return &value_; }
+ const T* pointer() const { return &value_; }
+ const T& get() const { return value_; }
+ void set(const T& value) { value_ = value; }
+ private:
+ T value_;
+};
+
+#endif // GTEST_IS_THREADSAFE
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+GTEST_API_ size_t GetThreadCount();
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_SEP_ "\\"
+# define GTEST_HAS_ALT_PATH_SEP_ 1
+#else
+# define GTEST_PATH_SEP_ "/"
+# define GTEST_HAS_ALT_PATH_SEP_ 0
+#endif // GTEST_OS_WINDOWS
+
+// Utilities for char.
+
+// isspace(int ch) and friends accept an unsigned char or EOF. char
+// may be signed, depending on the compiler (or compiler flags).
+// Therefore we need to cast a char to unsigned char before calling
+// isspace(), etc.
+
+inline bool IsAlpha(char ch) {
+ return isalpha(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsAlNum(char ch) {
+ return isalnum(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsDigit(char ch) {
+ return isdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsLower(char ch) {
+ return islower(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsSpace(char ch) {
+ return isspace(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsUpper(char ch) {
+ return isupper(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(char ch) {
+ return isxdigit(static_cast<unsigned char>(ch)) != 0;
+}
+#ifdef __cpp_lib_char8_t
+inline bool IsXDigit(char8_t ch) {
+ return isxdigit(static_cast<unsigned char>(ch)) != 0;
+}
+#endif
+inline bool IsXDigit(char16_t ch) {
+ const unsigned char low_byte = static_cast<unsigned char>(ch);
+ return ch == low_byte && isxdigit(low_byte) != 0;
+}
+inline bool IsXDigit(char32_t ch) {
+ const unsigned char low_byte = static_cast<unsigned char>(ch);
+ return ch == low_byte && isxdigit(low_byte) != 0;
+}
+inline bool IsXDigit(wchar_t ch) {
+ const unsigned char low_byte = static_cast<unsigned char>(ch);
+ return ch == low_byte && isxdigit(low_byte) != 0;
+}
+
+inline char ToLower(char ch) {
+ return static_cast<char>(tolower(static_cast<unsigned char>(ch)));
+}
+inline char ToUpper(char ch) {
+ return static_cast<char>(toupper(static_cast<unsigned char>(ch)));
+}
+
+inline std::string StripTrailingSpaces(std::string str) {
+ std::string::iterator it = str.end();
+ while (it != str.begin() && IsSpace(*--it))
+ it = str.erase(it);
+ return str;
+}
+
+// The testing::internal::posix namespace holds wrappers for common
+// POSIX functions. These wrappers hide the differences between
+// Windows/MSVC and POSIX systems. Since some compilers define these
+// standard functions as macros, the wrapper cannot have the same name
+// as the wrapped function.
+
+namespace posix {
+
+// Functions with a different name on Windows.
+
+#if GTEST_OS_WINDOWS
+
+typedef struct _stat StatStruct;
+
+# ifdef __BORLANDC__
+inline int DoIsATTY(int fd) { return isatty(fd); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+# else // !__BORLANDC__
+# if GTEST_OS_WINDOWS_MOBILE
+inline int DoIsATTY(int /* fd */) { return 0; }
+# else
+inline int DoIsATTY(int fd) { return _isatty(fd); }
+# endif // GTEST_OS_WINDOWS_MOBILE
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return _stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return _strdup(src); }
+# endif // __BORLANDC__
+
+# if GTEST_OS_WINDOWS_MOBILE
+inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
+// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
+// time and thus not defined there.
+# else
+inline int FileNo(FILE* file) { return _fileno(file); }
+inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
+inline int RmDir(const char* dir) { return _rmdir(dir); }
+inline bool IsDir(const StatStruct& st) {
+ return (_S_IFDIR & st.st_mode) != 0;
+}
+# endif // GTEST_OS_WINDOWS_MOBILE
+
+#elif GTEST_OS_ESP8266
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+inline int DoIsATTY(int fd) { return isatty(fd); }
+inline int Stat(const char* path, StatStruct* buf) {
+ // stat function not implemented on ESP8266
+ return 0;
+}
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#else
+
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+inline int DoIsATTY(int fd) { return isatty(fd); }
+inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#endif // GTEST_OS_WINDOWS
+
+inline int IsATTY(int fd) {
+ // DoIsATTY might change errno (for example ENOTTY in case you redirect stdout
+ // to a file on Linux), which is unexpected, so save the previous value, and
+ // restore it after the call.
+ int savedErrno = errno;
+ int isAttyValue = DoIsATTY(fd);
+ errno = savedErrno;
+
+ return isAttyValue;
+}
+
+// Functions deprecated by MSVC 8.0.
+
+GTEST_DISABLE_MSC_DEPRECATED_PUSH_()
+
+// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
+// StrError() aren't needed on Windows CE at this time and thus not
+// defined there.
+
+#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && \
+ !GTEST_OS_WINDOWS_RT && !GTEST_OS_ESP8266 && !GTEST_OS_XTENSA
+inline int ChDir(const char* dir) { return chdir(dir); }
+#endif
+inline FILE* FOpen(const char* path, const char* mode) {
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW
+ struct wchar_codecvt : public std::codecvt<wchar_t, char, std::mbstate_t> {};
+ std::wstring_convert<wchar_codecvt> converter;
+ std::wstring wide_path = converter.from_bytes(path);
+ std::wstring wide_mode = converter.from_bytes(mode);
+ return _wfopen(wide_path.c_str(), wide_mode.c_str());
+#else // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW
+ return fopen(path, mode);
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MINGW
+}
+#if !GTEST_OS_WINDOWS_MOBILE
+inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
+ return freopen(path, mode, stream);
+}
+inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
+#endif
+inline int FClose(FILE* fp) { return fclose(fp); }
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int Read(int fd, void* buf, unsigned int count) {
+ return static_cast<int>(read(fd, buf, count));
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+ return static_cast<int>(write(fd, buf, count));
+}
+inline int Close(int fd) { return close(fd); }
+inline const char* StrError(int errnum) { return strerror(errnum); }
+#endif
+inline const char* GetEnv(const char* name) {
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || \
+ GTEST_OS_WINDOWS_RT || GTEST_OS_ESP8266 || GTEST_OS_XTENSA
+ // We are on an embedded platform, which has no environment variables.
+ static_cast<void>(name); // To prevent 'unused argument' warning.
+ return nullptr;
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
+ // Environment variables which we programmatically clear will be set to the
+ // empty string rather than unset (NULL). Handle that case.
+ const char* const env = getenv(name);
+ return (env != nullptr && env[0] != '\0') ? env : nullptr;
+#else
+ return getenv(name);
+#endif
+}
+
+GTEST_DISABLE_MSC_DEPRECATED_POP_()
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE has no C library. The abort() function is used in
+// several places in Google Test. This implementation provides a reasonable
+// imitation of standard behaviour.
+[[noreturn]] void Abort();
+#else
+[[noreturn]] inline void Abort() { abort(); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+} // namespace posix
+
+// MSVC "deprecates" snprintf and issues warnings wherever it is used. In
+// order to avoid these warnings, we need to use _snprintf or _snprintf_s on
+// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate
+// function in order to achieve that. We use macro definition here because
+// snprintf is a variadic function.
+#if _MSC_VER && !GTEST_OS_WINDOWS_MOBILE
+// MSVC 2005 and above support variadic macros.
+# define GTEST_SNPRINTF_(buffer, size, format, ...) \
+ _snprintf_s(buffer, size, size, format, __VA_ARGS__)
+#elif defined(_MSC_VER)
+// Windows CE does not define _snprintf_s
+# define GTEST_SNPRINTF_ _snprintf
+#else
+# define GTEST_SNPRINTF_ snprintf
+#endif
+
+// The biggest signed integer type the compiler supports.
+//
+// long long is guaranteed to be at least 64-bits in C++11.
+using BiggestInt = long long; // NOLINT
+
+// The maximum number a BiggestInt can represent.
+constexpr BiggestInt kMaxBiggestInt = (std::numeric_limits<BiggestInt>::max)();
+
+// This template class serves as a compile-time function from size to
+// type. It maps a size in bytes to a primitive type with that
+// size. e.g.
+//
+// TypeWithSize<4>::UInt
+//
+// is typedef-ed to be unsigned int (unsigned integer made up of 4
+// bytes).
+//
+// Such functionality should belong to STL, but I cannot find it
+// there.
+//
+// Google Test uses this class in the implementation of floating-point
+// comparison.
+//
+// For now it only handles UInt (unsigned int) as that's all Google Test
+// needs. Other types can be easily added in the future if need
+// arises.
+template <size_t size>
+class TypeWithSize {
+ public:
+ // This prevents the user from using TypeWithSize<N> with incorrect
+ // values of N.
+ using UInt = void;
+};
+
+// The specialization for size 4.
+template <>
+class TypeWithSize<4> {
+ public:
+ using Int = std::int32_t;
+ using UInt = std::uint32_t;
+};
+
+// The specialization for size 8.
+template <>
+class TypeWithSize<8> {
+ public:
+ using Int = std::int64_t;
+ using UInt = std::uint64_t;
+};
+
+// Integer types of known sizes.
+using TimeInMillis = int64_t; // Represents time in milliseconds.
+
+// Utilities for command line flags and environment variables.
+
+// Macro for referencing flags.
+#if !defined(GTEST_FLAG)
+# define GTEST_FLAG(name) FLAGS_gtest_##name
+#endif // !defined(GTEST_FLAG)
+
+#if !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)
+# define GTEST_USE_OWN_FLAGFILE_FLAG_ 1
+#endif // !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)
+
+#if !defined(GTEST_DECLARE_bool_)
+# define GTEST_FLAG_SAVER_ ::testing::internal::GTestFlagSaver
+
+// Macros for declaring flags.
+# define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
+# define GTEST_DECLARE_int32_(name) \
+ GTEST_API_ extern std::int32_t GTEST_FLAG(name)
+# define GTEST_DECLARE_string_(name) \
+ GTEST_API_ extern ::std::string GTEST_FLAG(name)
+
+// Macros for defining flags.
+# define GTEST_DEFINE_bool_(name, default_val, doc) \
+ GTEST_API_ bool GTEST_FLAG(name) = (default_val)
+# define GTEST_DEFINE_int32_(name, default_val, doc) \
+ GTEST_API_ std::int32_t GTEST_FLAG(name) = (default_val)
+# define GTEST_DEFINE_string_(name, default_val, doc) \
+ GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val)
+
+#endif // !defined(GTEST_DECLARE_bool_)
+
+// Thread annotations
+#if !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)
+# define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)
+# define GTEST_LOCK_EXCLUDED_(locks)
+#endif // !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+GTEST_API_ bool ParseInt32(const Message& src_text, const char* str,
+ int32_t* value);
+
+// Parses a bool/int32_t/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromGTestEnv(const char* flag, bool default_val);
+GTEST_API_ int32_t Int32FromGTestEnv(const char* flag, int32_t default_val);
+std::string OutputFlagAlsoCheckEnvVar();
+const char* StringFromGTestEnv(const char* flag, const char* default_val);
+
+} // namespace internal
+} // namespace testing
+
+#if !defined(GTEST_INTERNAL_DEPRECATED)
+
+// Internal Macro to mark an API deprecated, for googletest usage only
+// Usage: class GTEST_INTERNAL_DEPRECATED(message) MyClass or
+// GTEST_INTERNAL_DEPRECATED(message) <return_type> myFunction(); Every usage of
+// a deprecated entity will trigger a warning when compiled with
+// `-Wdeprecated-declarations` option (clang, gcc, any __GNUC__ compiler).
+// For msvc /W3 option will need to be used
+// Note that for 'other' compilers this macro evaluates to nothing to prevent
+// compilations errors.
+#if defined(_MSC_VER)
+#define GTEST_INTERNAL_DEPRECATED(message) __declspec(deprecated(message))
+#elif defined(__GNUC__)
+#define GTEST_INTERNAL_DEPRECATED(message) __attribute__((deprecated(message)))
+#else
+#define GTEST_INTERNAL_DEPRECATED(message)
+#endif
+
+#endif // !defined(GTEST_INTERNAL_DEPRECATED)
+
+#if GTEST_HAS_ABSL
+// Always use absl::any for UniversalPrinter<> specializations if googletest
+// is built with absl support.
+#define GTEST_INTERNAL_HAS_ANY 1
+#include "absl/types/any.h"
+namespace testing {
+namespace internal {
+using Any = ::absl::any;
+} // namespace internal
+} // namespace testing
+#else
+#ifdef __has_include
+#if __has_include(<any>) && __cplusplus >= 201703L
+// Otherwise for C++17 and higher use std::any for UniversalPrinter<>
+// specializations.
+#define GTEST_INTERNAL_HAS_ANY 1
+#include <any>
+namespace testing {
+namespace internal {
+using Any = ::std::any;
+} // namespace internal
+} // namespace testing
+// The case where absl is configured NOT to alias std::any is not
+// supported.
+#endif // __has_include(<any>) && __cplusplus >= 201703L
+#endif // __has_include
+#endif // GTEST_HAS_ABSL
+
+#if GTEST_HAS_ABSL
+// Always use absl::optional for UniversalPrinter<> specializations if
+// googletest is built with absl support.
+#define GTEST_INTERNAL_HAS_OPTIONAL 1
+#include "absl/types/optional.h"
+namespace testing {
+namespace internal {
+template <typename T>
+using Optional = ::absl::optional<T>;
+} // namespace internal
+} // namespace testing
+#else
+#ifdef __has_include
+#if __has_include(<optional>) && __cplusplus >= 201703L
+// Otherwise for C++17 and higher use std::optional for UniversalPrinter<>
+// specializations.
+#define GTEST_INTERNAL_HAS_OPTIONAL 1
+#include <optional>
+namespace testing {
+namespace internal {
+template <typename T>
+using Optional = ::std::optional<T>;
+} // namespace internal
+} // namespace testing
+// The case where absl is configured NOT to alias std::optional is not
+// supported.
+#endif // __has_include(<optional>) && __cplusplus >= 201703L
+#endif // __has_include
+#endif // GTEST_HAS_ABSL
+
+#if GTEST_HAS_ABSL
+// Always use absl::string_view for Matcher<> specializations if googletest
+// is built with absl support.
+# define GTEST_INTERNAL_HAS_STRING_VIEW 1
+#include "absl/strings/string_view.h"
+namespace testing {
+namespace internal {
+using StringView = ::absl::string_view;
+} // namespace internal
+} // namespace testing
+#else
+# ifdef __has_include
+# if __has_include(<string_view>) && __cplusplus >= 201703L
+// Otherwise for C++17 and higher use std::string_view for Matcher<>
+// specializations.
+# define GTEST_INTERNAL_HAS_STRING_VIEW 1
+#include <string_view>
+namespace testing {
+namespace internal {
+using StringView = ::std::string_view;
+} // namespace internal
+} // namespace testing
+// The case where absl is configured NOT to alias std::string_view is not
+// supported.
+# endif // __has_include(<string_view>) && __cplusplus >= 201703L
+# endif // __has_include
+#endif // GTEST_HAS_ABSL
+
+#if GTEST_HAS_ABSL
+// Always use absl::variant for UniversalPrinter<> specializations if googletest
+// is built with absl support.
+#define GTEST_INTERNAL_HAS_VARIANT 1
+#include "absl/types/variant.h"
+namespace testing {
+namespace internal {
+template <typename... T>
+using Variant = ::absl::variant<T...>;
+} // namespace internal
+} // namespace testing
+#else
+#ifdef __has_include
+#if __has_include(<variant>) && __cplusplus >= 201703L
+// Otherwise for C++17 and higher use std::variant for UniversalPrinter<>
+// specializations.
+#define GTEST_INTERNAL_HAS_VARIANT 1
+#include <variant>
+namespace testing {
+namespace internal {
+template <typename... T>
+using Variant = ::std::variant<T...>;
+} // namespace internal
+} // namespace testing
+// The case where absl is configured NOT to alias std::variant is not supported.
+#endif // __has_include(<variant>) && __cplusplus >= 201703L
+#endif // __has_include
+#endif // GTEST_HAS_ABSL
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+#if GTEST_OS_LINUX
+# include <stdlib.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#include <ctype.h>
+#include <float.h>
+#include <string.h>
+#include <cstdint>
+#include <iomanip>
+#include <limits>
+#include <map>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This header file defines the Message class.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+
+#include <limits>
+#include <memory>
+#include <sstream>
+
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+// Ensures that there is at least one operator<< in the global namespace.
+// See Message& operator<<(...) below for why.
+void operator<<(const testing::internal::Secret&, int);
+
+namespace testing {
+
+// The Message class works like an ostream repeater.
+//
+// Typical usage:
+//
+// 1. You stream a bunch of values to a Message object.
+// It will remember the text in a stringstream.
+// 2. Then you stream the Message object to an ostream.
+// This causes the text in the Message to be streamed
+// to the ostream.
+//
+// For example;
+//
+// testing::Message foo;
+// foo << 1 << " != " << 2;
+// std::cout << foo;
+//
+// will print "1 != 2".
+//
+// Message is not intended to be inherited from. In particular, its
+// destructor is not virtual.
+//
+// Note that stringstream behaves differently in gcc and in MSVC. You
+// can stream a NULL char pointer to it in the former, but not in the
+// latter (it causes an access violation if you do). The Message
+// class hides this difference by treating a NULL char pointer as
+// "(null)".
+class GTEST_API_ Message {
+ private:
+ // The type of basic IO manipulators (endl, ends, and flush) for
+ // narrow streams.
+ typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
+
+ public:
+ // Constructs an empty Message.
+ Message();
+
+ // Copy constructor.
+ Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT
+ *ss_ << msg.GetString();
+ }
+
+ // Constructs a Message from a C-string.
+ explicit Message(const char* str) : ss_(new ::std::stringstream) {
+ *ss_ << str;
+ }
+
+ // Streams a non-pointer value to this object.
+ template <typename T>
+ inline Message& operator <<(const T& val) {
+ // Some libraries overload << for STL containers. These
+ // overloads are defined in the global namespace instead of ::std.
+ //
+ // C++'s symbol lookup rule (i.e. Koenig lookup) says that these
+ // overloads are visible in either the std namespace or the global
+ // namespace, but not other namespaces, including the testing
+ // namespace which Google Test's Message class is in.
+ //
+ // To allow STL containers (and other types that has a << operator
+ // defined in the global namespace) to be used in Google Test
+ // assertions, testing::Message must access the custom << operator
+ // from the global namespace. With this using declaration,
+ // overloads of << defined in the global namespace and those
+ // visible via Koenig lookup are both exposed in this function.
+ using ::operator <<;
+ *ss_ << val;
+ return *this;
+ }
+
+ // Streams a pointer value to this object.
+ //
+ // This function is an overload of the previous one. When you
+ // stream a pointer to a Message, this definition will be used as it
+ // is more specialized. (The C++ Standard, section
+ // [temp.func.order].) If you stream a non-pointer, then the
+ // previous definition will be used.
+ //
+ // The reason for this overload is that streaming a NULL pointer to
+ // ostream is undefined behavior. Depending on the compiler, you
+ // may get "0", "(nil)", "(null)", or an access violation. To
+ // ensure consistent result across compilers, we always treat NULL
+ // as "(null)".
+ template <typename T>
+ inline Message& operator <<(T* const& pointer) { // NOLINT
+ if (pointer == nullptr) {
+ *ss_ << "(null)";
+ } else {
+ *ss_ << pointer;
+ }
+ return *this;
+ }
+
+ // Since the basic IO manipulators are overloaded for both narrow
+ // and wide streams, we have to provide this specialized definition
+ // of operator <<, even though its body is the same as the
+ // templatized version above. Without this definition, streaming
+ // endl or other basic IO manipulators to Message will confuse the
+ // compiler.
+ Message& operator <<(BasicNarrowIoManip val) {
+ *ss_ << val;
+ return *this;
+ }
+
+ // Instead of 1/0, we want to see true/false for bool values.
+ Message& operator <<(bool b) {
+ return *this << (b ? "true" : "false");
+ }
+
+ // These two overloads allow streaming a wide C string to a Message
+ // using the UTF-8 encoding.
+ Message& operator <<(const wchar_t* wide_c_str);
+ Message& operator <<(wchar_t* wide_c_str);
+
+#if GTEST_HAS_STD_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::std::wstring& wstr);
+#endif // GTEST_HAS_STD_WSTRING
+
+ // Gets the text streamed to this object so far as an std::string.
+ // Each '\0' character in the buffer is replaced with "\\0".
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ std::string GetString() const;
+
+ private:
+ // We'll hold the text streamed to this object here.
+ const std::unique_ptr< ::std::stringstream> ss_;
+
+ // We declare (but don't implement) this to prevent the compiler
+ // from implementing the assignment operator.
+ void operator=(const Message&);
+};
+
+// Streams a Message to an ostream.
+inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
+ return os << sb.GetString();
+}
+
+namespace internal {
+
+// Converts a streamable value to an std::string. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+template <typename T>
+std::string StreamableToString(const T& streamable) {
+ return (Message() << streamable).GetString();
+}
+
+} // namespace internal
+} // namespace testing
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Google Test filepath utilities
+//
+// This header file declares classes and functions used internally by
+// Google Test. They are subject to change without notice.
+//
+// This file is #included in gtest/internal/gtest-internal.h.
+// Do not include this header file separately!
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This header file declares the String class and functions used internally by
+// Google Test. They are subject to change without notice. They should not used
+// by code external to Google Test.
+//
+// This header file is #included by gtest-internal.h.
+// It should not be #included by other files.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+
+#ifdef __BORLANDC__
+// string.h is not guaranteed to provide strcpy on C++ Builder.
+# include <mem.h>
+#endif
+
+#include <string.h>
+#include <cstdint>
+#include <string>
+
+
+namespace testing {
+namespace internal {
+
+// String - an abstract class holding static string utilities.
+class GTEST_API_ String {
+ public:
+ // Static utility methods
+
+ // Clones a 0-terminated C string, allocating memory using new. The
+ // caller is responsible for deleting the return value using
+ // delete[]. Returns the cloned string, or NULL if the input is
+ // NULL.
+ //
+ // This is different from strdup() in string.h, which allocates
+ // memory using malloc().
+ static const char* CloneCString(const char* c_str);
+
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
+ // able to pass strings to Win32 APIs on CE we need to convert them
+ // to 'Unicode', UTF-16.
+
+ // Creates a UTF-16 wide string from the given ANSI string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the wide string, or NULL if the
+ // input is NULL.
+ //
+ // The wide string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static LPCWSTR AnsiToUtf16(const char* c_str);
+
+ // Creates an ANSI string from the given wide string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the ANSI string, or NULL if the
+ // input is NULL.
+ //
+ // The returned string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static const char* Utf16ToAnsi(LPCWSTR utf16_str);
+#endif
+
+ // Compares two C strings. Returns true if and only if they have the same
+ // content.
+ //
+ // Unlike strcmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CStringEquals(const char* lhs, const char* rhs);
+
+ // Converts a wide C string to a String using the UTF-8 encoding.
+ // NULL will be converted to "(null)". If an error occurred during
+ // the conversion, "(failed to convert from wide string)" is
+ // returned.
+ static std::string ShowWideCString(const wchar_t* wide_c_str);
+
+ // Compares two wide C strings. Returns true if and only if they have the
+ // same content.
+ //
+ // Unlike wcscmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
+
+ // Compares two C strings, ignoring case. Returns true if and only if
+ // they have the same content.
+ //
+ // Unlike strcasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CaseInsensitiveCStringEquals(const char* lhs,
+ const char* rhs);
+
+ // Compares two wide C strings, ignoring case. Returns true if and only if
+ // they have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+ static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs);
+
+ // Returns true if and only if the given string ends with the given suffix,
+ // ignoring case. Any string is considered to end with an empty suffix.
+ static bool EndsWithCaseInsensitive(
+ const std::string& str, const std::string& suffix);
+
+ // Formats an int value as "%02d".
+ static std::string FormatIntWidth2(int value); // "%02d" for width == 2
+
+ // Formats an int value to given width with leading zeros.
+ static std::string FormatIntWidthN(int value, int width);
+
+ // Formats an int value as "%X".
+ static std::string FormatHexInt(int value);
+
+ // Formats an int value as "%X".
+ static std::string FormatHexUInt32(uint32_t value);
+
+ // Formats a byte as "%02X".
+ static std::string FormatByte(unsigned char value);
+
+ private:
+ String(); // Not meant to be instantiated.
+}; // class String
+
+// Gets the content of the stringstream's buffer as an std::string. Each '\0'
+// character in the buffer is replaced with "\\0".
+GTEST_API_ std::string StringStreamToString(::std::stringstream* stream);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+namespace testing {
+namespace internal {
+
+// FilePath - a class for file and directory pathname manipulation which
+// handles platform-specific conventions (like the pathname separator).
+// Used for helper functions for naming files in a directory for xml output.
+// Except for Set methods, all methods are const or static, which provides an
+// "immutable value object" -- useful for peace of mind.
+// A FilePath with a value ending in a path separator ("like/this/") represents
+// a directory, otherwise it is assumed to represent a file. In either case,
+// it may or may not represent an actual file or directory in the file system.
+// Names are NOT checked for syntax correctness -- no checking for illegal
+// characters, malformed paths, etc.
+
+class GTEST_API_ FilePath {
+ public:
+ FilePath() : pathname_("") { }
+ FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
+
+ explicit FilePath(const std::string& pathname) : pathname_(pathname) {
+ Normalize();
+ }
+
+ FilePath& operator=(const FilePath& rhs) {
+ Set(rhs);
+ return *this;
+ }
+
+ void Set(const FilePath& rhs) {
+ pathname_ = rhs.pathname_;
+ }
+
+ const std::string& string() const { return pathname_; }
+ const char* c_str() const { return pathname_.c_str(); }
+
+ // Returns the current working directory, or "" if unsuccessful.
+ static FilePath GetCurrentDir();
+
+ // Given directory = "dir", base_name = "test", number = 0,
+ // extension = "xml", returns "dir/test.xml". If number is greater
+ // than zero (e.g., 12), returns "dir/test_12.xml".
+ // On Windows platform, uses \ as the separator rather than /.
+ static FilePath MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension);
+
+ // Given directory = "dir", relative_path = "test.xml",
+ // returns "dir/test.xml".
+ // On Windows, uses \ as the separator rather than /.
+ static FilePath ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path);
+
+ // Returns a pathname for a file that does not currently exist. The pathname
+ // will be directory/base_name.extension or
+ // directory/base_name_<number>.extension if directory/base_name.extension
+ // already exists. The number will be incremented until a pathname is found
+ // that does not already exist.
+ // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+ // There could be a race condition if two or more processes are calling this
+ // function at the same time -- they could both pick the same filename.
+ static FilePath GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension);
+
+ // Returns true if and only if the path is "".
+ bool IsEmpty() const { return pathname_.empty(); }
+
+ // If input name has a trailing separator character, removes it and returns
+ // the name, otherwise return the name string unmodified.
+ // On Windows platform, uses \ as the separator, other platforms use /.
+ FilePath RemoveTrailingPathSeparator() const;
+
+ // Returns a copy of the FilePath with the directory part removed.
+ // Example: FilePath("path/to/file").RemoveDirectoryName() returns
+ // FilePath("file"). If there is no directory part ("just_a_file"), it returns
+ // the FilePath unmodified. If there is no file part ("just_a_dir/") it
+ // returns an empty FilePath ("").
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveDirectoryName() const;
+
+ // RemoveFileName returns the directory path with the filename removed.
+ // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+ // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+ // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+ // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveFileName() const;
+
+ // Returns a copy of the FilePath with the case-insensitive extension removed.
+ // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+ // FilePath("dir/file"). If a case-insensitive extension is not
+ // found, returns a copy of the original FilePath.
+ FilePath RemoveExtension(const char* extension) const;
+
+ // Creates directories so that path exists. Returns true if successful or if
+ // the directories already exist; returns false if unable to create
+ // directories for any reason. Will also return false if the FilePath does
+ // not represent a directory (that is, it doesn't end with a path separator).
+ bool CreateDirectoriesRecursively() const;
+
+ // Create the directory so that path exists. Returns true if successful or
+ // if the directory already exists; returns false if unable to create the
+ // directory for any reason, including if the parent directory does not
+ // exist. Not named "CreateDirectory" because that's a macro on Windows.
+ bool CreateFolder() const;
+
+ // Returns true if FilePath describes something in the file-system,
+ // either a file, directory, or whatever, and that something exists.
+ bool FileOrDirectoryExists() const;
+
+ // Returns true if pathname describes a directory in the file-system
+ // that exists.
+ bool DirectoryExists() const;
+
+ // Returns true if FilePath ends with a path separator, which indicates that
+ // it is intended to represent a directory. Returns false otherwise.
+ // This does NOT check that a directory (or file) actually exists.
+ bool IsDirectory() const;
+
+ // Returns true if pathname describes a root directory. (Windows has one
+ // root directory per disk drive.)
+ bool IsRootDirectory() const;
+
+ // Returns true if pathname describes an absolute path.
+ bool IsAbsolutePath() const;
+
+ private:
+ // Replaces multiple consecutive separators with a single separator.
+ // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+ // redundancies that might be in a pathname involving "." or "..".
+ //
+ // A pathname with multiple consecutive separators may occur either through
+ // user error or as a result of some scripts or APIs that generate a pathname
+ // with a trailing separator. On other platforms the same API or script
+ // may NOT generate a pathname with a trailing "/". Then elsewhere that
+ // pathname may have another "/" and pathname components added to it,
+ // without checking for the separator already being there.
+ // The script language and operating system may allow paths like "foo//bar"
+ // but some of the functions in FilePath will not handle that correctly. In
+ // particular, RemoveTrailingPathSeparator() only removes one separator, and
+ // it is called in CreateDirectoriesRecursively() assuming that it will change
+ // a pathname from directory syntax (trailing separator) to filename syntax.
+ //
+ // On Windows this method also replaces the alternate path separator '/' with
+ // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
+ // "bar\\foo".
+
+ void Normalize();
+
+ // Returns a pointer to the last occurrence of a valid path separator in
+ // the FilePath. On Windows, for example, both '/' and '\' are valid path
+ // separators. Returns NULL if no path separator was found.
+ const char* FindLastPathSeparator() const;
+
+ std::string pathname_;
+}; // class FilePath
+
+} // namespace internal
+} // namespace testing
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Type utilities needed for implementing typed and type-parameterized
+// tests.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+
+// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+# if GTEST_HAS_CXXABI_H_
+# include <cxxabi.h>
+# elif defined(__HP_aCC)
+# include <acxx_demangle.h>
+# endif // GTEST_HASH_CXXABI_H_
+
+namespace testing {
+namespace internal {
+
+// Canonicalizes a given name with respect to the Standard C++ Library.
+// This handles removing the inline namespace within `std` that is
+// used by various standard libraries (e.g., `std::__1`). Names outside
+// of namespace std are returned unmodified.
+inline std::string CanonicalizeForStdLibVersioning(std::string s) {
+ static const char prefix[] = "std::__";
+ if (s.compare(0, strlen(prefix), prefix) == 0) {
+ std::string::size_type end = s.find("::", strlen(prefix));
+ if (end != s.npos) {
+ // Erase everything between the initial `std` and the second `::`.
+ s.erase(strlen("std"), end - strlen("std"));
+ }
+ }
+ return s;
+}
+
+#if GTEST_HAS_RTTI
+// GetTypeName(const std::type_info&) returns a human-readable name of type T.
+inline std::string GetTypeName(const std::type_info& type) {
+ const char* const name = type.name();
+#if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)
+ int status = 0;
+ // gcc's implementation of typeid(T).name() mangles the type name,
+ // so we have to demangle it.
+#if GTEST_HAS_CXXABI_H_
+ using abi::__cxa_demangle;
+#endif // GTEST_HAS_CXXABI_H_
+ char* const readable_name = __cxa_demangle(name, nullptr, nullptr, &status);
+ const std::string name_str(status == 0 ? readable_name : name);
+ free(readable_name);
+ return CanonicalizeForStdLibVersioning(name_str);
+#else
+ return name;
+#endif // GTEST_HAS_CXXABI_H_ || __HP_aCC
+}
+#endif // GTEST_HAS_RTTI
+
+// GetTypeName<T>() returns a human-readable name of type T if and only if
+// RTTI is enabled, otherwise it returns a dummy type name.
+// NB: This function is also used in Google Mock, so don't move it inside of
+// the typed-test-only section below.
+template <typename T>
+std::string GetTypeName() {
+#if GTEST_HAS_RTTI
+ return GetTypeName(typeid(T));
+#else
+ return "<type>";
+#endif // GTEST_HAS_RTTI
+}
+
+// A unique type indicating an empty node
+struct None {};
+
+# define GTEST_TEMPLATE_ template <typename T> class
+
+// The template "selector" struct TemplateSel<Tmpl> is used to
+// represent Tmpl, which must be a class template with one type
+// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
+// as the type Tmpl<T>. This allows us to actually instantiate the
+// template "selected" by TemplateSel<Tmpl>.
+//
+// This trick is necessary for simulating typedef for class templates,
+// which C++ doesn't support directly.
+template <GTEST_TEMPLATE_ Tmpl>
+struct TemplateSel {
+ template <typename T>
+ struct Bind {
+ typedef Tmpl<T> type;
+ };
+};
+
+# define GTEST_BIND_(TmplSel, T) \
+ TmplSel::template Bind<T>::type
+
+template <GTEST_TEMPLATE_ Head_, GTEST_TEMPLATE_... Tail_>
+struct Templates {
+ using Head = TemplateSel<Head_>;
+ using Tail = Templates<Tail_...>;
+};
+
+template <GTEST_TEMPLATE_ Head_>
+struct Templates<Head_> {
+ using Head = TemplateSel<Head_>;
+ using Tail = None;
+};
+
+// Tuple-like type lists
+template <typename Head_, typename... Tail_>
+struct Types {
+ using Head = Head_;
+ using Tail = Types<Tail_...>;
+};
+
+template <typename Head_>
+struct Types<Head_> {
+ using Head = Head_;
+ using Tail = None;
+};
+
+// Helper metafunctions to tell apart a single type from types
+// generated by ::testing::Types
+template <typename... Ts>
+struct ProxyTypeList {
+ using type = Types<Ts...>;
+};
+
+template <typename>
+struct is_proxy_type_list : std::false_type {};
+
+template <typename... Ts>
+struct is_proxy_type_list<ProxyTypeList<Ts...>> : std::true_type {};
+
+// Generator which conditionally creates type lists.
+// It recognizes if a requested type list should be created
+// and prevents creating a new type list nested within another one.
+template <typename T>
+struct GenerateTypeList {
+ private:
+ using proxy = typename std::conditional<is_proxy_type_list<T>::value, T,
+ ProxyTypeList<T>>::type;
+
+ public:
+ using type = typename proxy::type;
+};
+
+} // namespace internal
+
+template <typename... Ts>
+using Types = internal::ProxyTypeList<Ts...>;
+
+} // namespace testing
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+// Due to C++ preprocessor weirdness, we need double indirection to
+// concatenate two tokens when one of them is __LINE__. Writing
+//
+// foo ## __LINE__
+//
+// will result in the token foo__LINE__, instead of foo followed by
+// the current line number. For more details, see
+// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
+#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
+#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
+
+// Stringifies its argument.
+// Work around a bug in visual studio which doesn't accept code like this:
+//
+// #define GTEST_STRINGIFY_(name) #name
+// #define MACRO(a, b, c) ... GTEST_STRINGIFY_(a) ...
+// MACRO(, x, y)
+//
+// Complaining about the argument to GTEST_STRINGIFY_ being empty.
+// This is allowed by the spec.
+#define GTEST_STRINGIFY_HELPER_(name, ...) #name
+#define GTEST_STRINGIFY_(...) GTEST_STRINGIFY_HELPER_(__VA_ARGS__, )
+
+namespace proto2 {
+class MessageLite;
+}
+
+namespace testing {
+
+// Forward declarations.
+
+class AssertionResult; // Result of an assertion.
+class Message; // Represents a failure message.
+class Test; // Represents a test.
+class TestInfo; // Information about a test.
+class TestPartResult; // Result of a test part.
+class UnitTest; // A collection of test suites.
+
+template <typename T>
+::std::string PrintToString(const T& value);
+
+namespace internal {
+
+struct TraceInfo; // Information about a trace point.
+class TestInfoImpl; // Opaque implementation of TestInfo
+class UnitTestImpl; // Opaque implementation of UnitTest
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+GTEST_API_ extern const char kStackTraceMarker[];
+
+// An IgnoredValue object can be implicitly constructed from ANY value.
+class IgnoredValue {
+ struct Sink {};
+ public:
+ // This constructor template allows any value to be implicitly
+ // converted to IgnoredValue. The object has no data member and
+ // doesn't try to remember anything about the argument. We
+ // deliberately omit the 'explicit' keyword in order to allow the
+ // conversion to be implicit.
+ // Disable the conversion if T already has a magical conversion operator.
+ // Otherwise we get ambiguity.
+ template <typename T,
+ typename std::enable_if<!std::is_convertible<T, Sink>::value,
+ int>::type = 0>
+ IgnoredValue(const T& /* ignored */) {} // NOLINT(runtime/explicit)
+};
+
+// Appends the user-supplied message to the Google-Test-generated message.
+GTEST_API_ std::string AppendUserMessage(
+ const std::string& gtest_msg, const Message& user_msg);
+
+#if GTEST_HAS_EXCEPTIONS
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4275 \
+/* an exported class was derived from a class that was not exported */)
+
+// This exception is thrown by (and only by) a failed Google Test
+// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions
+// are enabled). We derive it from std::runtime_error, which is for
+// errors presumably detectable only at run time. Since
+// std::runtime_error inherits from std::exception, many testing
+// frameworks know how to extract and print the message inside it.
+class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error {
+ public:
+ explicit GoogleTestFailureException(const TestPartResult& failure);
+};
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4275
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+namespace edit_distance {
+// Returns the optimal edits to go from 'left' to 'right'.
+// All edits cost the same, with replace having lower priority than
+// add/remove.
+// Simple implementation of the Wagner-Fischer algorithm.
+// See http://en.wikipedia.org/wiki/Wagner-Fischer_algorithm
+enum EditType { kMatch, kAdd, kRemove, kReplace };
+GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
+ const std::vector<size_t>& left, const std::vector<size_t>& right);
+
+// Same as above, but the input is represented as strings.
+GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
+ const std::vector<std::string>& left,
+ const std::vector<std::string>& right);
+
+// Create a diff of the input strings in Unified diff format.
+GTEST_API_ std::string CreateUnifiedDiff(const std::vector<std::string>& left,
+ const std::vector<std::string>& right,
+ size_t context = 2);
+
+} // namespace edit_distance
+
+// Calculate the diff between 'left' and 'right' and return it in unified diff
+// format.
+// If not null, stores in 'total_line_count' the total number of lines found
+// in left + right.
+GTEST_API_ std::string DiffStrings(const std::string& left,
+ const std::string& right,
+ size_t* total_line_count);
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// expected_expression: "foo"
+// actual_expression: "bar"
+// expected_value: "5"
+// actual_value: "6"
+//
+// The ignoring_case parameter is true if and only if the assertion is a
+// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const std::string& expected_value,
+ const std::string& actual_value,
+ bool ignoring_case);
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+GTEST_API_ std::string GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value);
+
+// This template class represents an IEEE floating-point number
+// (either single-precision or double-precision, depending on the
+// template parameters).
+//
+// The purpose of this class is to do more sophisticated number
+// comparison. (Due to round-off error, etc, it's very unlikely that
+// two floating-points will be equal exactly. Hence a naive
+// comparison by the == operation often doesn't work.)
+//
+// Format of IEEE floating-point:
+//
+// The most-significant bit being the leftmost, an IEEE
+// floating-point looks like
+//
+// sign_bit exponent_bits fraction_bits
+//
+// Here, sign_bit is a single bit that designates the sign of the
+// number.
+//
+// For float, there are 8 exponent bits and 23 fraction bits.
+//
+// For double, there are 11 exponent bits and 52 fraction bits.
+//
+// More details can be found at
+// http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+template <typename RawType>
+class FloatingPoint {
+ public:
+ // Defines the unsigned integer type that has the same size as the
+ // floating point number.
+ typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
+
+ // Constants.
+
+ // # of bits in a number.
+ static const size_t kBitCount = 8*sizeof(RawType);
+
+ // # of fraction bits in a number.
+ static const size_t kFractionBitCount =
+ std::numeric_limits<RawType>::digits - 1;
+
+ // # of exponent bits in a number.
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ // The mask for the sign bit.
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ // The mask for the fraction bits.
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ // The mask for the exponent bits.
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ // How many ULP's (Units in the Last Place) we want to tolerate when
+ // comparing two numbers. The larger the value, the more error we
+ // allow. A 0 value means that two numbers must be exactly the same
+ // to be considered equal.
+ //
+ // The maximum error of a single floating-point operation is 0.5
+ // units in the last place. On Intel CPU's, all floating-point
+ // calculations are done with 80-bit precision, while double has 64
+ // bits. Therefore, 4 should be enough for ordinary use.
+ //
+ // See the following article for more details on ULP:
+ // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+ static const uint32_t kMaxUlps = 4;
+
+ // Constructs a FloatingPoint from a raw floating-point number.
+ //
+ // On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ // around may change its bits, although the new value is guaranteed
+ // to be also a NAN. Therefore, don't expect this constructor to
+ // preserve the bits in x when x is a NAN.
+ explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
+
+ // Static methods
+
+ // Reinterprets a bit pattern as a floating-point number.
+ //
+ // This function is needed to test the AlmostEquals() method.
+ static RawType ReinterpretBits(const Bits bits) {
+ FloatingPoint fp(0);
+ fp.u_.bits_ = bits;
+ return fp.u_.value_;
+ }
+
+ // Returns the floating-point number that represent positive infinity.
+ static RawType Infinity() {
+ return ReinterpretBits(kExponentBitMask);
+ }
+
+ // Returns the maximum representable finite floating-point number.
+ static RawType Max();
+
+ // Non-static methods
+
+ // Returns the bits that represents this number.
+ const Bits &bits() const { return u_.bits_; }
+
+ // Returns the exponent bits of this number.
+ Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
+
+ // Returns the fraction bits of this number.
+ Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
+
+ // Returns the sign bit of this number.
+ Bits sign_bit() const { return kSignBitMask & u_.bits_; }
+
+ // Returns true if and only if this is NAN (not a number).
+ bool is_nan() const {
+ // It's a NAN if the exponent bits are all ones and the fraction
+ // bits are not entirely zeros.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ // Returns true if and only if this number is at most kMaxUlps ULP's away
+ // from rhs. In particular, this function:
+ //
+ // - returns false if either number is (or both are) NAN.
+ // - treats really large numbers as almost equal to infinity.
+ // - thinks +0.0 and -0.0 are 0 DLP's apart.
+ bool AlmostEquals(const FloatingPoint& rhs) const {
+ // The IEEE standard says that any comparison operation involving
+ // a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
+ <= kMaxUlps;
+ }
+
+ private:
+ // The data type used to store the actual floating-point number.
+ union FloatingPointUnion {
+ RawType value_; // The raw floating-point number.
+ Bits bits_; // The bits that represent the number.
+ };
+
+ // Converts an integer from the sign-and-magnitude representation to
+ // the biased representation. More precisely, let N be 2 to the
+ // power of (kBitCount - 1), an integer x is represented by the
+ // unsigned number x + N.
+ //
+ // For instance,
+ //
+ // -N + 1 (the most negative number representable using
+ // sign-and-magnitude) is represented by 1;
+ // 0 is represented by N; and
+ // N - 1 (the biggest number representable using
+ // sign-and-magnitude) is represented by 2N - 1.
+ //
+ // Read http://en.wikipedia.org/wiki/Signed_number_representations
+ // for more details on signed number representations.
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ // Given two numbers in the sign-and-magnitude representation,
+ // returns the distance between them as an unsigned number.
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion u_;
+};
+
+// We cannot use std::numeric_limits<T>::max() as it clashes with the max()
+// macro defined by <windows.h>.
+template <>
+inline float FloatingPoint<float>::Max() { return FLT_MAX; }
+template <>
+inline double FloatingPoint<double>::Max() { return DBL_MAX; }
+
+// Typedefs the instances of the FloatingPoint template class that we
+// care to use.
+typedef FloatingPoint<float> Float;
+typedef FloatingPoint<double> Double;
+
+// In order to catch the mistake of putting tests that use different
+// test fixture classes in the same test suite, we need to assign
+// unique IDs to fixture classes and compare them. The TypeId type is
+// used to hold such IDs. The user should treat TypeId as an opaque
+// type: the only operation allowed on TypeId values is to compare
+// them for equality using the == operator.
+typedef const void* TypeId;
+
+template <typename T>
+class TypeIdHelper {
+ public:
+ // dummy_ must not have a const type. Otherwise an overly eager
+ // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
+ // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
+ static bool dummy_;
+};
+
+template <typename T>
+bool TypeIdHelper<T>::dummy_ = false;
+
+// GetTypeId<T>() returns the ID of type T. Different values will be
+// returned for different types. Calling the function twice with the
+// same type argument is guaranteed to return the same ID.
+template <typename T>
+TypeId GetTypeId() {
+ // The compiler is required to allocate a different
+ // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
+ // the template. Therefore, the address of dummy_ is guaranteed to
+ // be unique.
+ return &(TypeIdHelper<T>::dummy_);
+}
+
+// Returns the type ID of ::testing::Test. Always call this instead
+// of GetTypeId< ::testing::Test>() to get the type ID of
+// ::testing::Test, as the latter may give the wrong result due to a
+// suspected linker bug when compiling Google Test as a Mac OS X
+// framework.
+GTEST_API_ TypeId GetTestTypeId();
+
+// Defines the abstract factory interface that creates instances
+// of a Test object.
+class TestFactoryBase {
+ public:
+ virtual ~TestFactoryBase() {}
+
+ // Creates a test instance to run. The instance is both created and destroyed
+ // within TestInfoImpl::Run()
+ virtual Test* CreateTest() = 0;
+
+ protected:
+ TestFactoryBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
+};
+
+// This class provides implementation of TeastFactoryBase interface.
+// It is used in TEST and TEST_F macros.
+template <class TestClass>
+class TestFactoryImpl : public TestFactoryBase {
+ public:
+ Test* CreateTest() override { return new TestClass; }
+};
+
+#if GTEST_OS_WINDOWS
+
+// Predicate-formatters for implementing the HRESULT checking macros
+// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
+// We pass a long instead of HRESULT to avoid causing an
+// include dependency for the HRESULT type.
+GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
+ long hr); // NOLINT
+GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
+ long hr); // NOLINT
+
+#endif // GTEST_OS_WINDOWS
+
+// Types of SetUpTestSuite() and TearDownTestSuite() functions.
+using SetUpTestSuiteFunc = void (*)();
+using TearDownTestSuiteFunc = void (*)();
+
+struct CodeLocation {
+ CodeLocation(const std::string& a_file, int a_line)
+ : file(a_file), line(a_line) {}
+
+ std::string file;
+ int line;
+};
+
+// Helper to identify which setup function for TestCase / TestSuite to call.
+// Only one function is allowed, either TestCase or TestSute but not both.
+
+// Utility functions to help SuiteApiResolver
+using SetUpTearDownSuiteFuncType = void (*)();
+
+inline SetUpTearDownSuiteFuncType GetNotDefaultOrNull(
+ SetUpTearDownSuiteFuncType a, SetUpTearDownSuiteFuncType def) {
+ return a == def ? nullptr : a;
+}
+
+template <typename T>
+// Note that SuiteApiResolver inherits from T because
+// SetUpTestSuite()/TearDownTestSuite() could be protected. Ths way
+// SuiteApiResolver can access them.
+struct SuiteApiResolver : T {
+ // testing::Test is only forward declared at this point. So we make it a
+ // dependend class for the compiler to be OK with it.
+ using Test =
+ typename std::conditional<sizeof(T) != 0, ::testing::Test, void>::type;
+
+ static SetUpTearDownSuiteFuncType GetSetUpCaseOrSuite(const char* filename,
+ int line_num) {
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ SetUpTearDownSuiteFuncType test_case_fp =
+ GetNotDefaultOrNull(&T::SetUpTestCase, &Test::SetUpTestCase);
+ SetUpTearDownSuiteFuncType test_suite_fp =
+ GetNotDefaultOrNull(&T::SetUpTestSuite, &Test::SetUpTestSuite);
+
+ GTEST_CHECK_(!test_case_fp || !test_suite_fp)
+ << "Test can not provide both SetUpTestSuite and SetUpTestCase, please "
+ "make sure there is only one present at "
+ << filename << ":" << line_num;
+
+ return test_case_fp != nullptr ? test_case_fp : test_suite_fp;
+#else
+ (void)(filename);
+ (void)(line_num);
+ return &T::SetUpTestSuite;
+#endif
+ }
+
+ static SetUpTearDownSuiteFuncType GetTearDownCaseOrSuite(const char* filename,
+ int line_num) {
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ SetUpTearDownSuiteFuncType test_case_fp =
+ GetNotDefaultOrNull(&T::TearDownTestCase, &Test::TearDownTestCase);
+ SetUpTearDownSuiteFuncType test_suite_fp =
+ GetNotDefaultOrNull(&T::TearDownTestSuite, &Test::TearDownTestSuite);
+
+ GTEST_CHECK_(!test_case_fp || !test_suite_fp)
+ << "Test can not provide both TearDownTestSuite and TearDownTestCase,"
+ " please make sure there is only one present at"
+ << filename << ":" << line_num;
+
+ return test_case_fp != nullptr ? test_case_fp : test_suite_fp;
+#else
+ (void)(filename);
+ (void)(line_num);
+ return &T::TearDownTestSuite;
+#endif
+ }
+};
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_suite_name: name of the test suite
+// name: name of the test
+// type_param: the name of the test's type parameter, or NULL if
+// this is not a typed or a type-parameterized test.
+// value_param: text representation of the test's value parameter,
+// or NULL if this is not a type-parameterized test.
+// code_location: code location where the test is defined
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test suite
+// tear_down_tc: pointer to the function that tears down the test suite
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
+ const char* test_suite_name, const char* name, const char* type_param,
+ const char* value_param, CodeLocation code_location,
+ TypeId fixture_class_id, SetUpTestSuiteFunc set_up_tc,
+ TearDownTestSuiteFunc tear_down_tc, TestFactoryBase* factory);
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr);
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+// State of the definition of a type-parameterized test suite.
+class GTEST_API_ TypedTestSuitePState {
+ public:
+ TypedTestSuitePState() : registered_(false) {}
+
+ // Adds the given test name to defined_test_names_ and return true
+ // if the test suite hasn't been registered; otherwise aborts the
+ // program.
+ bool AddTestName(const char* file, int line, const char* case_name,
+ const char* test_name) {
+ if (registered_) {
+ fprintf(stderr,
+ "%s Test %s must be defined before "
+ "REGISTER_TYPED_TEST_SUITE_P(%s, ...).\n",
+ FormatFileLocation(file, line).c_str(), test_name, case_name);
+ fflush(stderr);
+ posix::Abort();
+ }
+ registered_tests_.insert(
+ ::std::make_pair(test_name, CodeLocation(file, line)));
+ return true;
+ }
+
+ bool TestExists(const std::string& test_name) const {
+ return registered_tests_.count(test_name) > 0;
+ }
+
+ const CodeLocation& GetCodeLocation(const std::string& test_name) const {
+ RegisteredTestsMap::const_iterator it = registered_tests_.find(test_name);
+ GTEST_CHECK_(it != registered_tests_.end());
+ return it->second;
+ }
+
+ // Verifies that registered_tests match the test names in
+ // defined_test_names_; returns registered_tests if successful, or
+ // aborts the program otherwise.
+ const char* VerifyRegisteredTestNames(const char* test_suite_name,
+ const char* file, int line,
+ const char* registered_tests);
+
+ private:
+ typedef ::std::map<std::string, CodeLocation> RegisteredTestsMap;
+
+ bool registered_;
+ RegisteredTestsMap registered_tests_;
+};
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+using TypedTestCasePState = TypedTestSuitePState;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+// Skips to the first non-space char after the first comma in 'str';
+// returns NULL if no comma is found in 'str'.
+inline const char* SkipComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ if (comma == nullptr) {
+ return nullptr;
+ }
+ while (IsSpace(*(++comma))) {}
+ return comma;
+}
+
+// Returns the prefix of 'str' before the first comma in it; returns
+// the entire string if it contains no comma.
+inline std::string GetPrefixUntilComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ return comma == nullptr ? str : std::string(str, comma);
+}
+
+// Splits a given string on a given delimiter, populating a given
+// vector with the fields.
+void SplitString(const ::std::string& str, char delimiter,
+ ::std::vector< ::std::string>* dest);
+
+// The default argument to the template below for the case when the user does
+// not provide a name generator.
+struct DefaultNameGenerator {
+ template <typename T>
+ static std::string GetName(int i) {
+ return StreamableToString(i);
+ }
+};
+
+template <typename Provided = DefaultNameGenerator>
+struct NameGeneratorSelector {
+ typedef Provided type;
+};
+
+template <typename NameGenerator>
+void GenerateNamesRecursively(internal::None, std::vector<std::string>*, int) {}
+
+template <typename NameGenerator, typename Types>
+void GenerateNamesRecursively(Types, std::vector<std::string>* result, int i) {
+ result->push_back(NameGenerator::template GetName<typename Types::Head>(i));
+ GenerateNamesRecursively<NameGenerator>(typename Types::Tail(), result,
+ i + 1);
+}
+
+template <typename NameGenerator, typename Types>
+std::vector<std::string> GenerateNames() {
+ std::vector<std::string> result;
+ GenerateNamesRecursively<NameGenerator>(Types(), &result, 0);
+ return result;
+}
+
+// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
+// registers a list of type-parameterized tests with Google Test. The
+// return value is insignificant - we just need to return something
+// such that we can call this function in a namespace scope.
+//
+// Implementation note: The GTEST_TEMPLATE_ macro declares a template
+// template parameter. It's defined in gtest-type-util.h.
+template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
+class TypeParameterizedTest {
+ public:
+ // 'index' is the index of the test in the type list 'Types'
+ // specified in INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, TestSuite,
+ // Types). Valid values for 'index' are [0, N - 1] where N is the
+ // length of Types.
+ static bool Register(const char* prefix, const CodeLocation& code_location,
+ const char* case_name, const char* test_names, int index,
+ const std::vector<std::string>& type_names =
+ GenerateNames<DefaultNameGenerator, Types>()) {
+ typedef typename Types::Head Type;
+ typedef Fixture<Type> FixtureClass;
+ typedef typename GTEST_BIND_(TestSel, Type) TestClass;
+
+ // First, registers the first type-parameterized test in the type
+ // list.
+ MakeAndRegisterTestInfo(
+ (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name +
+ "/" + type_names[static_cast<size_t>(index)])
+ .c_str(),
+ StripTrailingSpaces(GetPrefixUntilComma(test_names)).c_str(),
+ GetTypeName<Type>().c_str(),
+ nullptr, // No value parameter.
+ code_location, GetTypeId<FixtureClass>(),
+ SuiteApiResolver<TestClass>::GetSetUpCaseOrSuite(
+ code_location.file.c_str(), code_location.line),
+ SuiteApiResolver<TestClass>::GetTearDownCaseOrSuite(
+ code_location.file.c_str(), code_location.line),
+ new TestFactoryImpl<TestClass>);
+
+ // Next, recurses (at compile time) with the tail of the type list.
+ return TypeParameterizedTest<Fixture, TestSel,
+ typename Types::Tail>::Register(prefix,
+ code_location,
+ case_name,
+ test_names,
+ index + 1,
+ type_names);
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, class TestSel>
+class TypeParameterizedTest<Fixture, TestSel, internal::None> {
+ public:
+ static bool Register(const char* /*prefix*/, const CodeLocation&,
+ const char* /*case_name*/, const char* /*test_names*/,
+ int /*index*/,
+ const std::vector<std::string>& =
+ std::vector<std::string>() /*type_names*/) {
+ return true;
+ }
+};
+
+GTEST_API_ void RegisterTypeParameterizedTestSuite(const char* test_suite_name,
+ CodeLocation code_location);
+GTEST_API_ void RegisterTypeParameterizedTestSuiteInstantiation(
+ const char* case_name);
+
+// TypeParameterizedTestSuite<Fixture, Tests, Types>::Register()
+// registers *all combinations* of 'Tests' and 'Types' with Google
+// Test. The return value is insignificant - we just need to return
+// something such that we can call this function in a namespace scope.
+template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
+class TypeParameterizedTestSuite {
+ public:
+ static bool Register(const char* prefix, CodeLocation code_location,
+ const TypedTestSuitePState* state, const char* case_name,
+ const char* test_names,
+ const std::vector<std::string>& type_names =
+ GenerateNames<DefaultNameGenerator, Types>()) {
+ RegisterTypeParameterizedTestSuiteInstantiation(case_name);
+ std::string test_name = StripTrailingSpaces(
+ GetPrefixUntilComma(test_names));
+ if (!state->TestExists(test_name)) {
+ fprintf(stderr, "Failed to get code location for test %s.%s at %s.",
+ case_name, test_name.c_str(),
+ FormatFileLocation(code_location.file.c_str(),
+ code_location.line).c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+ const CodeLocation& test_location = state->GetCodeLocation(test_name);
+
+ typedef typename Tests::Head Head;
+
+ // First, register the first test in 'Test' for each type in 'Types'.
+ TypeParameterizedTest<Fixture, Head, Types>::Register(
+ prefix, test_location, case_name, test_names, 0, type_names);
+
+ // Next, recurses (at compile time) with the tail of the test list.
+ return TypeParameterizedTestSuite<Fixture, typename Tests::Tail,
+ Types>::Register(prefix, code_location,
+ state, case_name,
+ SkipComma(test_names),
+ type_names);
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, typename Types>
+class TypeParameterizedTestSuite<Fixture, internal::None, Types> {
+ public:
+ static bool Register(const char* /*prefix*/, const CodeLocation&,
+ const TypedTestSuitePState* /*state*/,
+ const char* /*case_name*/, const char* /*test_names*/,
+ const std::vector<std::string>& =
+ std::vector<std::string>() /*type_names*/) {
+ return true;
+ }
+};
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+GTEST_API_ std::string GetCurrentOsStackTraceExceptTop(
+ UnitTest* unit_test, int skip_count);
+
+// Helpers for suppressing warnings on unreachable code or constant
+// condition.
+
+// Always returns true.
+GTEST_API_ bool AlwaysTrue();
+
+// Always returns false.
+inline bool AlwaysFalse() { return !AlwaysTrue(); }
+
+// Helper for suppressing false warning from Clang on a const char*
+// variable declared in a conditional expression always being NULL in
+// the else branch.
+struct GTEST_API_ ConstCharPtr {
+ ConstCharPtr(const char* str) : value(str) {}
+ operator bool() const { return true; }
+ const char* value;
+};
+
+// Helper for declaring std::string within 'if' statement
+// in pre C++17 build environment.
+struct TrueWithString {
+ TrueWithString() = default;
+ explicit TrueWithString(const char* str) : value(str) {}
+ explicit TrueWithString(const std::string& str) : value(str) {}
+ explicit operator bool() const { return true; }
+ std::string value;
+};
+
+// A simple Linear Congruential Generator for generating random
+// numbers with a uniform distribution. Unlike rand() and srand(), it
+// doesn't use global state (and therefore can't interfere with user
+// code). Unlike rand_r(), it's portable. An LCG isn't very random,
+// but it's good enough for our purposes.
+class GTEST_API_ Random {
+ public:
+ static const uint32_t kMaxRange = 1u << 31;
+
+ explicit Random(uint32_t seed) : state_(seed) {}
+
+ void Reseed(uint32_t seed) { state_ = seed; }
+
+ // Generates a random number from [0, range). Crashes if 'range' is
+ // 0 or greater than kMaxRange.
+ uint32_t Generate(uint32_t range);
+
+ private:
+ uint32_t state_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
+};
+
+// Turns const U&, U&, const U, and U all into U.
+#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \
+ typename std::remove_const<typename std::remove_reference<T>::type>::type
+
+// HasDebugStringAndShortDebugString<T>::value is a compile-time bool constant
+// that's true if and only if T has methods DebugString() and ShortDebugString()
+// that return std::string.
+template <typename T>
+class HasDebugStringAndShortDebugString {
+ private:
+ template <typename C>
+ static auto CheckDebugString(C*) -> typename std::is_same<
+ std::string, decltype(std::declval<const C>().DebugString())>::type;
+ template <typename>
+ static std::false_type CheckDebugString(...);
+
+ template <typename C>
+ static auto CheckShortDebugString(C*) -> typename std::is_same<
+ std::string, decltype(std::declval<const C>().ShortDebugString())>::type;
+ template <typename>
+ static std::false_type CheckShortDebugString(...);
+
+ using HasDebugStringType = decltype(CheckDebugString<T>(nullptr));
+ using HasShortDebugStringType = decltype(CheckShortDebugString<T>(nullptr));
+
+ public:
+ static constexpr bool value =
+ HasDebugStringType::value && HasShortDebugStringType::value;
+};
+
+template <typename T>
+constexpr bool HasDebugStringAndShortDebugString<T>::value;
+
+// When the compiler sees expression IsContainerTest<C>(0), if C is an
+// STL-style container class, the first overload of IsContainerTest
+// will be viable (since both C::iterator* and C::const_iterator* are
+// valid types and NULL can be implicitly converted to them). It will
+// be picked over the second overload as 'int' is a perfect match for
+// the type of argument 0. If C::iterator or C::const_iterator is not
+// a valid type, the first overload is not viable, and the second
+// overload will be picked. Therefore, we can determine whether C is
+// a container class by checking the type of IsContainerTest<C>(0).
+// The value of the expression is insignificant.
+//
+// In C++11 mode we check the existence of a const_iterator and that an
+// iterator is properly implemented for the container.
+//
+// For pre-C++11 that we look for both C::iterator and C::const_iterator.
+// The reason is that C++ injects the name of a class as a member of the
+// class itself (e.g. you can refer to class iterator as either
+// 'iterator' or 'iterator::iterator'). If we look for C::iterator
+// only, for example, we would mistakenly think that a class named
+// iterator is an STL container.
+//
+// Also note that the simpler approach of overloading
+// IsContainerTest(typename C::const_iterator*) and
+// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++.
+typedef int IsContainer;
+template <class C,
+ class Iterator = decltype(::std::declval<const C&>().begin()),
+ class = decltype(::std::declval<const C&>().end()),
+ class = decltype(++::std::declval<Iterator&>()),
+ class = decltype(*::std::declval<Iterator>()),
+ class = typename C::const_iterator>
+IsContainer IsContainerTest(int /* dummy */) {
+ return 0;
+}
+
+typedef char IsNotContainer;
+template <class C>
+IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; }
+
+// Trait to detect whether a type T is a hash table.
+// The heuristic used is that the type contains an inner type `hasher` and does
+// not contain an inner type `reverse_iterator`.
+// If the container is iterable in reverse, then order might actually matter.
+template <typename T>
+struct IsHashTable {
+ private:
+ template <typename U>
+ static char test(typename U::hasher*, typename U::reverse_iterator*);
+ template <typename U>
+ static int test(typename U::hasher*, ...);
+ template <typename U>
+ static char test(...);
+
+ public:
+ static const bool value = sizeof(test<T>(nullptr, nullptr)) == sizeof(int);
+};
+
+template <typename T>
+const bool IsHashTable<T>::value;
+
+template <typename C,
+ bool = sizeof(IsContainerTest<C>(0)) == sizeof(IsContainer)>
+struct IsRecursiveContainerImpl;
+
+template <typename C>
+struct IsRecursiveContainerImpl<C, false> : public std::false_type {};
+
+// Since the IsRecursiveContainerImpl depends on the IsContainerTest we need to
+// obey the same inconsistencies as the IsContainerTest, namely check if
+// something is a container is relying on only const_iterator in C++11 and
+// is relying on both const_iterator and iterator otherwise
+template <typename C>
+struct IsRecursiveContainerImpl<C, true> {
+ using value_type = decltype(*std::declval<typename C::const_iterator>());
+ using type =
+ std::is_same<typename std::remove_const<
+ typename std::remove_reference<value_type>::type>::type,
+ C>;
+};
+
+// IsRecursiveContainer<Type> is a unary compile-time predicate that
+// evaluates whether C is a recursive container type. A recursive container
+// type is a container type whose value_type is equal to the container type
+// itself. An example for a recursive container type is
+// boost::filesystem::path, whose iterator has a value_type that is equal to
+// boost::filesystem::path.
+template <typename C>
+struct IsRecursiveContainer : public IsRecursiveContainerImpl<C>::type {};
+
+// Utilities for native arrays.
+
+// ArrayEq() compares two k-dimensional native arrays using the
+// elements' operator==, where k can be any integer >= 0. When k is
+// 0, ArrayEq() degenerates into comparing a single pair of values.
+
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) {
+ return internal::ArrayEq(lhs, N, rhs);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous ArrayEq() function, arrays with different sizes would
+// lead to different copies of the template code.
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs) {
+ for (size_t i = 0; i != size; i++) {
+ if (!internal::ArrayEq(lhs[i], rhs[i]))
+ return false;
+ }
+ return true;
+}
+
+// Finds the first element in the iterator range [begin, end) that
+// equals elem. Element may be a native array type itself.
+template <typename Iter, typename Element>
+Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) {
+ for (Iter it = begin; it != end; ++it) {
+ if (internal::ArrayEq(*it, elem))
+ return it;
+ }
+ return end;
+}
+
+// CopyArray() copies a k-dimensional native array using the elements'
+// operator=, where k can be any integer >= 0. When k is 0,
+// CopyArray() degenerates into copying a single value.
+
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline void CopyArray(const T& from, U* to) { *to = from; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline void CopyArray(const T(&from)[N], U(*to)[N]) {
+ internal::CopyArray(from, N, *to);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous CopyArray() function, arrays with different sizes
+// would lead to different copies of the template code.
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to) {
+ for (size_t i = 0; i != size; i++) {
+ internal::CopyArray(from[i], to + i);
+ }
+}
+
+// The relation between an NativeArray object (see below) and the
+// native array it represents.
+// We use 2 different structs to allow non-copyable types to be used, as long
+// as RelationToSourceReference() is passed.
+struct RelationToSourceReference {};
+struct RelationToSourceCopy {};
+
+// Adapts a native array to a read-only STL-style container. Instead
+// of the complete STL container concept, this adaptor only implements
+// members useful for Google Mock's container matchers. New members
+// should be added as needed. To simplify the implementation, we only
+// support Element being a raw type (i.e. having no top-level const or
+// reference modifier). It's the client's responsibility to satisfy
+// this requirement. Element can be an array type itself (hence
+// multi-dimensional arrays are supported).
+template <typename Element>
+class NativeArray {
+ public:
+ // STL-style container typedefs.
+ typedef Element value_type;
+ typedef Element* iterator;
+ typedef const Element* const_iterator;
+
+ // Constructs from a native array. References the source.
+ NativeArray(const Element* array, size_t count, RelationToSourceReference) {
+ InitRef(array, count);
+ }
+
+ // Constructs from a native array. Copies the source.
+ NativeArray(const Element* array, size_t count, RelationToSourceCopy) {
+ InitCopy(array, count);
+ }
+
+ // Copy constructor.
+ NativeArray(const NativeArray& rhs) {
+ (this->*rhs.clone_)(rhs.array_, rhs.size_);
+ }
+
+ ~NativeArray() {
+ if (clone_ != &NativeArray::InitRef)
+ delete[] array_;
+ }
+
+ // STL-style container methods.
+ size_t size() const { return size_; }
+ const_iterator begin() const { return array_; }
+ const_iterator end() const { return array_ + size_; }
+ bool operator==(const NativeArray& rhs) const {
+ return size() == rhs.size() &&
+ ArrayEq(begin(), size(), rhs.begin());
+ }
+
+ private:
+ static_assert(!std::is_const<Element>::value, "Type must not be const");
+ static_assert(!std::is_reference<Element>::value,
+ "Type must not be a reference");
+
+ // Initializes this object with a copy of the input.
+ void InitCopy(const Element* array, size_t a_size) {
+ Element* const copy = new Element[a_size];
+ CopyArray(array, a_size, copy);
+ array_ = copy;
+ size_ = a_size;
+ clone_ = &NativeArray::InitCopy;
+ }
+
+ // Initializes this object with a reference of the input.
+ void InitRef(const Element* array, size_t a_size) {
+ array_ = array;
+ size_ = a_size;
+ clone_ = &NativeArray::InitRef;
+ }
+
+ const Element* array_;
+ size_t size_;
+ void (NativeArray::*clone_)(const Element*, size_t);
+};
+
+// Backport of std::index_sequence.
+template <size_t... Is>
+struct IndexSequence {
+ using type = IndexSequence;
+};
+
+// Double the IndexSequence, and one if plus_one is true.
+template <bool plus_one, typename T, size_t sizeofT>
+struct DoubleSequence;
+template <size_t... I, size_t sizeofT>
+struct DoubleSequence<true, IndexSequence<I...>, sizeofT> {
+ using type = IndexSequence<I..., (sizeofT + I)..., 2 * sizeofT>;
+};
+template <size_t... I, size_t sizeofT>
+struct DoubleSequence<false, IndexSequence<I...>, sizeofT> {
+ using type = IndexSequence<I..., (sizeofT + I)...>;
+};
+
+// Backport of std::make_index_sequence.
+// It uses O(ln(N)) instantiation depth.
+template <size_t N>
+struct MakeIndexSequenceImpl
+ : DoubleSequence<N % 2 == 1, typename MakeIndexSequenceImpl<N / 2>::type,
+ N / 2>::type {};
+
+template <>
+struct MakeIndexSequenceImpl<0> : IndexSequence<> {};
+
+template <size_t N>
+using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::type;
+
+template <typename... T>
+using IndexSequenceFor = typename MakeIndexSequence<sizeof...(T)>::type;
+
+template <size_t>
+struct Ignore {
+ Ignore(...); // NOLINT
+};
+
+template <typename>
+struct ElemFromListImpl;
+template <size_t... I>
+struct ElemFromListImpl<IndexSequence<I...>> {
+ // We make Ignore a template to solve a problem with MSVC.
+ // A non-template Ignore would work fine with `decltype(Ignore(I))...`, but
+ // MSVC doesn't understand how to deal with that pack expansion.
+ // Use `0 * I` to have a single instantiation of Ignore.
+ template <typename R>
+ static R Apply(Ignore<0 * I>..., R (*)(), ...);
+};
+
+template <size_t N, typename... T>
+struct ElemFromList {
+ using type =
+ decltype(ElemFromListImpl<typename MakeIndexSequence<N>::type>::Apply(
+ static_cast<T (*)()>(nullptr)...));
+};
+
+struct FlatTupleConstructTag {};
+
+template <typename... T>
+class FlatTuple;
+
+template <typename Derived, size_t I>
+struct FlatTupleElemBase;
+
+template <typename... T, size_t I>
+struct FlatTupleElemBase<FlatTuple<T...>, I> {
+ using value_type = typename ElemFromList<I, T...>::type;
+ FlatTupleElemBase() = default;
+ template <typename Arg>
+ explicit FlatTupleElemBase(FlatTupleConstructTag, Arg&& t)
+ : value(std::forward<Arg>(t)) {}
+ value_type value;
+};
+
+template <typename Derived, typename Idx>
+struct FlatTupleBase;
+
+template <size_t... Idx, typename... T>
+struct FlatTupleBase<FlatTuple<T...>, IndexSequence<Idx...>>
+ : FlatTupleElemBase<FlatTuple<T...>, Idx>... {
+ using Indices = IndexSequence<Idx...>;
+ FlatTupleBase() = default;
+ template <typename... Args>
+ explicit FlatTupleBase(FlatTupleConstructTag, Args&&... args)
+ : FlatTupleElemBase<FlatTuple<T...>, Idx>(FlatTupleConstructTag{},
+ std::forward<Args>(args))... {}
+
+ template <size_t I>
+ const typename ElemFromList<I, T...>::type& Get() const {
+ return FlatTupleElemBase<FlatTuple<T...>, I>::value;
+ }
+
+ template <size_t I>
+ typename ElemFromList<I, T...>::type& Get() {
+ return FlatTupleElemBase<FlatTuple<T...>, I>::value;
+ }
+
+ template <typename F>
+ auto Apply(F&& f) -> decltype(std::forward<F>(f)(this->Get<Idx>()...)) {
+ return std::forward<F>(f)(Get<Idx>()...);
+ }
+
+ template <typename F>
+ auto Apply(F&& f) const -> decltype(std::forward<F>(f)(this->Get<Idx>()...)) {
+ return std::forward<F>(f)(Get<Idx>()...);
+ }
+};
+
+// Analog to std::tuple but with different tradeoffs.
+// This class minimizes the template instantiation depth, thus allowing more
+// elements than std::tuple would. std::tuple has been seen to require an
+// instantiation depth of more than 10x the number of elements in some
+// implementations.
+// FlatTuple and ElemFromList are not recursive and have a fixed depth
+// regardless of T...
+// MakeIndexSequence, on the other hand, it is recursive but with an
+// instantiation depth of O(ln(N)).
+template <typename... T>
+class FlatTuple
+ : private FlatTupleBase<FlatTuple<T...>,
+ typename MakeIndexSequence<sizeof...(T)>::type> {
+ using Indices = typename FlatTupleBase<
+ FlatTuple<T...>, typename MakeIndexSequence<sizeof...(T)>::type>::Indices;
+
+ public:
+ FlatTuple() = default;
+ template <typename... Args>
+ explicit FlatTuple(FlatTupleConstructTag tag, Args&&... args)
+ : FlatTuple::FlatTupleBase(tag, std::forward<Args>(args)...) {}
+
+ using FlatTuple::FlatTupleBase::Apply;
+ using FlatTuple::FlatTupleBase::Get;
+};
+
+// Utility functions to be called with static_assert to induce deprecation
+// warnings.
+GTEST_INTERNAL_DEPRECATED(
+ "INSTANTIATE_TEST_CASE_P is deprecated, please use "
+ "INSTANTIATE_TEST_SUITE_P")
+constexpr bool InstantiateTestCase_P_IsDeprecated() { return true; }
+
+GTEST_INTERNAL_DEPRECATED(
+ "TYPED_TEST_CASE_P is deprecated, please use "
+ "TYPED_TEST_SUITE_P")
+constexpr bool TypedTestCase_P_IsDeprecated() { return true; }
+
+GTEST_INTERNAL_DEPRECATED(
+ "TYPED_TEST_CASE is deprecated, please use "
+ "TYPED_TEST_SUITE")
+constexpr bool TypedTestCaseIsDeprecated() { return true; }
+
+GTEST_INTERNAL_DEPRECATED(
+ "REGISTER_TYPED_TEST_CASE_P is deprecated, please use "
+ "REGISTER_TYPED_TEST_SUITE_P")
+constexpr bool RegisterTypedTestCase_P_IsDeprecated() { return true; }
+
+GTEST_INTERNAL_DEPRECATED(
+ "INSTANTIATE_TYPED_TEST_CASE_P is deprecated, please use "
+ "INSTANTIATE_TYPED_TEST_SUITE_P")
+constexpr bool InstantiateTypedTestCase_P_IsDeprecated() { return true; }
+
+} // namespace internal
+} // namespace testing
+
+namespace std {
+// Some standard library implementations use `struct tuple_size` and some use
+// `class tuple_size`. Clang warns about the mismatch.
+// https://reviews.llvm.org/D55466
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmismatched-tags"
+#endif
+template <typename... Ts>
+struct tuple_size<testing::internal::FlatTuple<Ts...>>
+ : std::integral_constant<size_t, sizeof...(Ts)> {};
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+} // namespace std
+
+#define GTEST_MESSAGE_AT_(file, line, message, result_type) \
+ ::testing::internal::AssertHelper(result_type, file, line, message) \
+ = ::testing::Message()
+
+#define GTEST_MESSAGE_(message, result_type) \
+ GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type)
+
+#define GTEST_FATAL_FAILURE_(message) \
+ return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
+
+#define GTEST_NONFATAL_FAILURE_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
+
+#define GTEST_SUCCESS_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
+
+#define GTEST_SKIP_(message) \
+ return GTEST_MESSAGE_(message, ::testing::TestPartResult::kSkip)
+
+// Suppress MSVC warning 4072 (unreachable code) for the code following
+// statement if it returns or throws (or doesn't return or throw in some
+// situations).
+// NOTE: The "else" is important to keep this expansion to prevent a top-level
+// "else" from attaching to our "if".
+#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
+ if (::testing::internal::AlwaysTrue()) { \
+ statement; \
+ } else /* NOLINT */ \
+ static_assert(true, "") // User must have a semicolon after expansion.
+
+#if GTEST_HAS_EXCEPTIONS
+
+namespace testing {
+namespace internal {
+
+class NeverThrown {
+ public:
+ const char* what() const noexcept {
+ return "this exception should never be thrown";
+ }
+};
+
+} // namespace internal
+} // namespace testing
+
+#if GTEST_HAS_RTTI
+
+#define GTEST_EXCEPTION_TYPE_(e) ::testing::internal::GetTypeName(typeid(e))
+
+#else // GTEST_HAS_RTTI
+
+#define GTEST_EXCEPTION_TYPE_(e) \
+ std::string { "an std::exception-derived error" }
+
+#endif // GTEST_HAS_RTTI
+
+#define GTEST_TEST_THROW_CATCH_STD_EXCEPTION_(statement, expected_exception) \
+ catch (std::conditional_t< \
+ std::is_same_v<std::remove_cv_t<std::remove_reference_t< \
+ expected_exception>>, std::exception>, \
+ const ::testing::internal::NeverThrown&, const std::exception&> \
+ e) { \
+ gtest_msg.value = "Expected: " #statement \
+ " throws an exception of type " #expected_exception \
+ ".\n Actual: it throws "; \
+ gtest_msg.value += GTEST_EXCEPTION_TYPE_(e); \
+ gtest_msg.value += " with description \""; \
+ gtest_msg.value += e.what(); \
+ gtest_msg.value += "\"."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ }
+
+#else // GTEST_HAS_EXCEPTIONS
+
+#define GTEST_TEST_THROW_CATCH_STD_EXCEPTION_(statement, expected_exception)
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::TrueWithString gtest_msg{}) { \
+ bool gtest_caught_expected = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } catch (expected_exception const&) { \
+ gtest_caught_expected = true; \
+ } \
+ GTEST_TEST_THROW_CATCH_STD_EXCEPTION_(statement, expected_exception) \
+ catch (...) { \
+ gtest_msg.value = "Expected: " #statement \
+ " throws an exception of type " #expected_exception \
+ ".\n Actual: it throws a different type."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ if (!gtest_caught_expected) { \
+ gtest_msg.value = "Expected: " #statement \
+ " throws an exception of type " #expected_exception \
+ ".\n Actual: it throws nothing."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ } else /*NOLINT*/ \
+ GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__) \
+ : fail(gtest_msg.value.c_str())
+
+#if GTEST_HAS_EXCEPTIONS
+
+#define GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_() \
+ catch (std::exception const& e) { \
+ gtest_msg.value = "it throws "; \
+ gtest_msg.value += GTEST_EXCEPTION_TYPE_(e); \
+ gtest_msg.value += " with description \""; \
+ gtest_msg.value += e.what(); \
+ gtest_msg.value += "\"."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
+ }
+
+#else // GTEST_HAS_EXCEPTIONS
+
+#define GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_()
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+#define GTEST_TEST_NO_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::TrueWithString gtest_msg{}) { \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ GTEST_TEST_NO_THROW_CATCH_STD_EXCEPTION_() \
+ catch (...) { \
+ gtest_msg.value = "it throws."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
+ fail(("Expected: " #statement " doesn't throw an exception.\n" \
+ " Actual: " + gtest_msg.value).c_str())
+
+#define GTEST_TEST_ANY_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ bool gtest_caught_any = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ gtest_caught_any = true; \
+ } \
+ if (!gtest_caught_any) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
+ fail("Expected: " #statement " throws an exception.\n" \
+ " Actual: it doesn't.")
+
+
+// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
+// either a boolean expression or an AssertionResult. text is a textual
+// representation of expression as it was passed into the EXPECT_TRUE.
+#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar_ = \
+ ::testing::AssertionResult(expression)) \
+ ; \
+ else \
+ fail(::testing::internal::GetBoolAssertionFailureMessage(\
+ gtest_ar_, text, #actual, #expected).c_str())
+
+#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
+ fail("Expected: " #statement " doesn't generate new fatal " \
+ "failures in the current thread.\n" \
+ " Actual: it does.")
+
+// Expands to the name of the class that implements the given test.
+#define GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \
+ test_suite_name##_##test_name##_Test
+
+// Helper macro for defining tests.
+#define GTEST_TEST_(test_suite_name, test_name, parent_class, parent_id) \
+ static_assert(sizeof(GTEST_STRINGIFY_(test_suite_name)) > 1, \
+ "test_suite_name must not be empty"); \
+ static_assert(sizeof(GTEST_STRINGIFY_(test_name)) > 1, \
+ "test_name must not be empty"); \
+ class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \
+ : public parent_class { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() { (void)test_info_; }\
+ ~GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() override = default; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \
+ test_name)); \
+ GTEST_DISALLOW_MOVE_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \
+ test_name)); \
+ \
+ private: \
+ void TestBody() override; \
+ static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_; \
+ }; \
+ \
+ ::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_suite_name, \
+ test_name)::test_info_ = \
+ ::testing::internal::MakeAndRegisterTestInfo( \
+ #test_suite_name, #test_name, nullptr, nullptr, \
+ ::testing::internal::CodeLocation(__FILE__, __LINE__), (parent_id), \
+ ::testing::internal::SuiteApiResolver< \
+ parent_class>::GetSetUpCaseOrSuite(__FILE__, __LINE__), \
+ ::testing::internal::SuiteApiResolver< \
+ parent_class>::GetTearDownCaseOrSuite(__FILE__, __LINE__), \
+ new ::testing::internal::TestFactoryImpl<GTEST_TEST_CLASS_NAME_( \
+ test_suite_name, test_name)>); \
+ void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This header file defines the public API for death tests. It is
+// #included by gtest.h so a user doesn't need to include this
+// directly.
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This header file defines internal utilities needed for implementing
+// death tests. They are subject to change without notice.
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The Google C++ Testing and Mocking Framework (Google Test)
+//
+// This file implements just enough of the matcher interface to allow
+// EXPECT_DEATH and friends to accept a matcher argument.
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_MATCHERS_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_MATCHERS_H_
+
+#include <atomic>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <type_traits>
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Google Test - The Google C++ Testing and Mocking Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+// void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// A user can teach this function how to print a class type T by
+// defining either operator<<() or PrintTo() in the namespace that
+// defines T. More specifically, the FIRST defined function in the
+// following list will be used (assuming T is defined in namespace
+// foo):
+//
+// 1. foo::PrintTo(const T&, ostream*)
+// 2. operator<<(ostream&, const T&) defined in either foo or the
+// global namespace.
+//
+// However if T is an STL-style container then it is printed element-wise
+// unless foo::PrintTo(const T&, ostream*) is defined. Note that
+// operator<<() is ignored for container types.
+//
+// If none of the above is defined, it will print the debug string of
+// the value if it is a protocol buffer, or print the raw bytes in the
+// value otherwise.
+//
+// To aid debugging: when T is a reference type, the address of the
+// value is also printed; when T is a (const) char pointer, both the
+// pointer value and the NUL-terminated string it points to are
+// printed.
+//
+// We also provide some convenient wrappers:
+//
+// // Prints a value to a string. For a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// std::string ::testing::PrintToString(const T& value);
+//
+// // Prints a value tersely: for a reference type, the referenced
+// // value (but not the address) is printed; for a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// void ::testing::internal::UniversalTersePrint(const T& value, ostream*);
+//
+// // Prints value using the type inferred by the compiler. The difference
+// // from UniversalTersePrint() is that this function prints both the
+// // pointer and the NUL-terminated string for a (const or not) char pointer.
+// void ::testing::internal::UniversalPrint(const T& value, ostream*);
+//
+// // Prints the fields of a tuple tersely to a string vector, one
+// // element for each field. Tuple support must be enabled in
+// // gtest-port.h.
+// std::vector<string> UniversalTersePrintTupleFieldsToStrings(
+// const Tuple& value);
+//
+// Known limitation:
+//
+// The print primitives print the elements of an STL-style container
+// using the compiler-inferred type of *iter where iter is a
+// const_iterator of the container. When const_iterator is an input
+// iterator but not a forward iterator, this inferred type may not
+// match value_type, and the print output may be incorrect. In
+// practice, this is rarely a problem as for most containers
+// const_iterator is a forward iterator. We'll fix this if there's an
+// actual need for it. Note that this fix cannot rely on value_type
+// being defined as many user-defined container types don't have
+// value_type.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#include <functional>
+#include <memory>
+#include <ostream> // NOLINT
+#include <sstream>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+
+namespace testing {
+
+// Definitions in the internal* namespaces are subject to change without notice.
+// DO NOT USE THEM IN USER CODE!
+namespace internal {
+
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os);
+
+// Used to print an STL-style container when the user doesn't define
+// a PrintTo() for it.
+struct ContainerPrinter {
+ template <typename T,
+ typename = typename std::enable_if<
+ (sizeof(IsContainerTest<T>(0)) == sizeof(IsContainer)) &&
+ !IsRecursiveContainer<T>::value>::type>
+ static void PrintValue(const T& container, std::ostream* os) {
+ const size_t kMaxCount = 32; // The maximum number of elements to print.
+ *os << '{';
+ size_t count = 0;
+ for (auto&& elem : container) {
+ if (count > 0) {
+ *os << ',';
+ if (count == kMaxCount) { // Enough has been printed.
+ *os << " ...";
+ break;
+ }
+ }
+ *os << ' ';
+ // We cannot call PrintTo(elem, os) here as PrintTo() doesn't
+ // handle `elem` being a native array.
+ internal::UniversalPrint(elem, os);
+ ++count;
+ }
+
+ if (count > 0) {
+ *os << ' ';
+ }
+ *os << '}';
+ }
+};
+
+// Used to print a pointer that is neither a char pointer nor a member
+// pointer, when the user doesn't define PrintTo() for it. (A member
+// variable pointer or member function pointer doesn't really point to
+// a location in the address space. Their representation is
+// implementation-defined. Therefore they will be printed as raw
+// bytes.)
+struct FunctionPointerPrinter {
+ template <typename T, typename = typename std::enable_if<
+ std::is_function<T>::value>::type>
+ static void PrintValue(T* p, ::std::ostream* os) {
+ if (p == nullptr) {
+ *os << "NULL";
+ } else {
+ // T is a function type, so '*os << p' doesn't do what we want
+ // (it just prints p as bool). We want to print p as a const
+ // void*.
+ *os << reinterpret_cast<const void*>(p);
+ }
+ }
+};
+
+struct PointerPrinter {
+ template <typename T>
+ static void PrintValue(T* p, ::std::ostream* os) {
+ if (p == nullptr) {
+ *os << "NULL";
+ } else {
+ // T is not a function type. We just call << to print p,
+ // relying on ADL to pick up user-defined << for their pointer
+ // types, if any.
+ *os << p;
+ }
+ }
+};
+
+namespace internal_stream_operator_without_lexical_name_lookup {
+
+// The presence of an operator<< here will terminate lexical scope lookup
+// straight away (even though it cannot be a match because of its argument
+// types). Thus, the two operator<< calls in StreamPrinter will find only ADL
+// candidates.
+struct LookupBlocker {};
+void operator<<(LookupBlocker, LookupBlocker);
+
+struct StreamPrinter {
+ template <typename T,
+ // Don't accept member pointers here. We'd print them via implicit
+ // conversion to bool, which isn't useful.
+ typename = typename std::enable_if<
+ !std::is_member_pointer<T>::value>::type,
+ // Only accept types for which we can find a streaming operator via
+ // ADL (possibly involving implicit conversions).
+ typename = decltype(std::declval<std::ostream&>()
+ << std::declval<const T&>())>
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ // Call streaming operator found by ADL, possibly with implicit conversions
+ // of the arguments.
+ *os << value;
+ }
+};
+
+} // namespace internal_stream_operator_without_lexical_name_lookup
+
+struct ProtobufPrinter {
+ // We print a protobuf using its ShortDebugString() when the string
+ // doesn't exceed this many characters; otherwise we print it using
+ // DebugString() for better readability.
+ static const size_t kProtobufOneLinerMaxLength = 50;
+
+ template <typename T,
+ typename = typename std::enable_if<
+ internal::HasDebugStringAndShortDebugString<T>::value>::type>
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ std::string pretty_str = value.ShortDebugString();
+ if (pretty_str.length() > kProtobufOneLinerMaxLength) {
+ pretty_str = "\n" + value.DebugString();
+ }
+ *os << ("<" + pretty_str + ">");
+ }
+};
+
+struct ConvertibleToIntegerPrinter {
+ // Since T has no << operator or PrintTo() but can be implicitly
+ // converted to BiggestInt, we print it as a BiggestInt.
+ //
+ // Most likely T is an enum type (either named or unnamed), in which
+ // case printing it as an integer is the desired behavior. In case
+ // T is not an enum, printing it as an integer is the best we can do
+ // given that it has no user-defined printer.
+ static void PrintValue(internal::BiggestInt value, ::std::ostream* os) {
+ *os << value;
+ }
+};
+
+struct ConvertibleToStringViewPrinter {
+#if GTEST_INTERNAL_HAS_STRING_VIEW
+ static void PrintValue(internal::StringView value, ::std::ostream* os) {
+ internal::UniversalPrint(value, os);
+ }
+#endif
+};
+
+
+// Prints the given number of bytes in the given object to the given
+// ostream.
+GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,
+ size_t count,
+ ::std::ostream* os);
+struct RawBytesPrinter {
+ // SFINAE on `sizeof` to make sure we have a complete type.
+ template <typename T, size_t = sizeof(T)>
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ PrintBytesInObjectTo(
+ static_cast<const unsigned char*>(
+ // Load bearing cast to void* to support iOS
+ reinterpret_cast<const void*>(std::addressof(value))),
+ sizeof(value), os);
+ }
+};
+
+struct FallbackPrinter {
+ template <typename T>
+ static void PrintValue(const T&, ::std::ostream* os) {
+ *os << "(incomplete type)";
+ }
+};
+
+// Try every printer in order and return the first one that works.
+template <typename T, typename E, typename Printer, typename... Printers>
+struct FindFirstPrinter : FindFirstPrinter<T, E, Printers...> {};
+
+template <typename T, typename Printer, typename... Printers>
+struct FindFirstPrinter<
+ T, decltype(Printer::PrintValue(std::declval<const T&>(), nullptr)),
+ Printer, Printers...> {
+ using type = Printer;
+};
+
+// Select the best printer in the following order:
+// - Print containers (they have begin/end/etc).
+// - Print function pointers.
+// - Print object pointers.
+// - Use the stream operator, if available.
+// - Print protocol buffers.
+// - Print types convertible to BiggestInt.
+// - Print types convertible to StringView, if available.
+// - Fallback to printing the raw bytes of the object.
+template <typename T>
+void PrintWithFallback(const T& value, ::std::ostream* os) {
+ using Printer = typename FindFirstPrinter<
+ T, void, ContainerPrinter, FunctionPointerPrinter, PointerPrinter,
+ internal_stream_operator_without_lexical_name_lookup::StreamPrinter,
+ ProtobufPrinter, ConvertibleToIntegerPrinter,
+ ConvertibleToStringViewPrinter, RawBytesPrinter, FallbackPrinter>::type;
+ Printer::PrintValue(value, os);
+}
+
+// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a
+// value of type ToPrint that is an operand of a comparison assertion
+// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in
+// the comparison, and is used to help determine the best way to
+// format the value. In particular, when the value is a C string
+// (char pointer) and the other operand is an STL string object, we
+// want to format the C string as a string, since we know it is
+// compared by value with the string object. If the value is a char
+// pointer but the other operand is not an STL string object, we don't
+// know whether the pointer is supposed to point to a NUL-terminated
+// string, and thus want to print it as a pointer to be safe.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// The default case.
+template <typename ToPrint, typename OtherOperand>
+class FormatForComparison {
+ public:
+ static ::std::string Format(const ToPrint& value) {
+ return ::testing::PrintToString(value);
+ }
+};
+
+// Array.
+template <typename ToPrint, size_t N, typename OtherOperand>
+class FormatForComparison<ToPrint[N], OtherOperand> {
+ public:
+ static ::std::string Format(const ToPrint* value) {
+ return FormatForComparison<const ToPrint*, OtherOperand>::Format(value);
+ }
+};
+
+// By default, print C string as pointers to be safe, as we don't know
+// whether they actually point to a NUL-terminated string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \
+ template <typename OtherOperand> \
+ class FormatForComparison<CharType*, OtherOperand> { \
+ public: \
+ static ::std::string Format(CharType* value) { \
+ return ::testing::PrintToString(static_cast<const void*>(value)); \
+ } \
+ }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);
+#ifdef __cpp_lib_char8_t
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char8_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char8_t);
+#endif
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char16_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char16_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char32_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char32_t);
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_
+
+// If a C string is compared with an STL string object, we know it's meant
+// to point to a NUL-terminated string, and thus can print it as a string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \
+ template <> \
+ class FormatForComparison<CharType*, OtherStringType> { \
+ public: \
+ static ::std::string Format(CharType* value) { \
+ return ::testing::PrintToString(value); \
+ } \
+ }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string);
+#ifdef __cpp_lib_char8_t
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char8_t, ::std::u8string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char8_t, ::std::u8string);
+#endif
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char16_t, ::std::u16string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char16_t, ::std::u16string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char32_t, ::std::u32string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char32_t, ::std::u32string);
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);
+#endif
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_
+
+// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
+// operand to be used in a failure message. The type (but not value)
+// of the other operand may affect the format. This allows us to
+// print a char* as a raw pointer when it is compared against another
+// char* or void*, and print it as a C string when it is compared
+// against an std::string object, for example.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename T1, typename T2>
+std::string FormatForComparisonFailureMessage(
+ const T1& value, const T2& /* other_operand */) {
+ return FormatForComparison<T1, T2>::Format(value);
+}
+
+// UniversalPrinter<T>::Print(value, ostream_ptr) prints the given
+// value to the given ostream. The caller must ensure that
+// 'ostream_ptr' is not NULL, or the behavior is undefined.
+//
+// We define UniversalPrinter as a class template (as opposed to a
+// function template), as we need to partially specialize it for
+// reference types, which cannot be done with function templates.
+template <typename T>
+class UniversalPrinter;
+
+// Prints the given value using the << operator if it has one;
+// otherwise prints the bytes in it. This is what
+// UniversalPrinter<T>::Print() does when PrintTo() is not specialized
+// or overloaded for type T.
+//
+// A user can override this behavior for a class type Foo by defining
+// an overload of PrintTo() in the namespace where Foo is defined. We
+// give the user this option as sometimes defining a << operator for
+// Foo is not desirable (e.g. the coding style may prevent doing it,
+// or there is already a << operator but it doesn't do what the user
+// wants).
+template <typename T>
+void PrintTo(const T& value, ::std::ostream* os) {
+ internal::PrintWithFallback(value, os);
+}
+
+// The following list of PrintTo() overloads tells
+// UniversalPrinter<T>::Print() how to print standard types (built-in
+// types, strings, plain arrays, and pointers).
+
+// Overloads for various char types.
+GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os);
+GTEST_API_ void PrintTo(signed char c, ::std::ostream* os);
+inline void PrintTo(char c, ::std::ostream* os) {
+ // When printing a plain char, we always treat it as unsigned. This
+ // way, the output won't be affected by whether the compiler thinks
+ // char is signed or not.
+ PrintTo(static_cast<unsigned char>(c), os);
+}
+
+// Overloads for other simple built-in types.
+inline void PrintTo(bool x, ::std::ostream* os) {
+ *os << (x ? "true" : "false");
+}
+
+// Overload for wchar_t type.
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its decimal code (except for L'\0').
+// The L'\0' char is printed as "L'\\0'". The decimal code is printed
+// as signed integer when wchar_t is implemented by the compiler
+// as a signed type and is printed as an unsigned integer when wchar_t
+// is implemented as an unsigned type.
+GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);
+
+GTEST_API_ void PrintTo(char32_t c, ::std::ostream* os);
+inline void PrintTo(char16_t c, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<char32_t>(c), os);
+}
+#ifdef __cpp_lib_char8_t
+inline void PrintTo(char8_t c, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<char32_t>(c), os);
+}
+#endif
+
+// Overloads for C strings.
+GTEST_API_ void PrintTo(const char* s, ::std::ostream* os);
+inline void PrintTo(char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const char*>(s), os);
+}
+
+// signed/unsigned char is often used for representing binary data, so
+// we print pointers to it as void* to be safe.
+inline void PrintTo(const signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(const unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+#ifdef __cpp_lib_char8_t
+// Overloads for u8 strings.
+GTEST_API_ void PrintTo(const char8_t* s, ::std::ostream* os);
+inline void PrintTo(char8_t* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const char8_t*>(s), os);
+}
+#endif
+// Overloads for u16 strings.
+GTEST_API_ void PrintTo(const char16_t* s, ::std::ostream* os);
+inline void PrintTo(char16_t* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const char16_t*>(s), os);
+}
+// Overloads for u32 strings.
+GTEST_API_ void PrintTo(const char32_t* s, ::std::ostream* os);
+inline void PrintTo(char32_t* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const char32_t*>(s), os);
+}
+
+// MSVC can be configured to define wchar_t as a typedef of unsigned
+// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native
+// type. When wchar_t is a typedef, defining an overload for const
+// wchar_t* would cause unsigned short* be printed as a wide string,
+// possibly causing invalid memory accesses.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Overloads for wide C strings
+GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os);
+inline void PrintTo(wchar_t* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const wchar_t*>(s), os);
+}
+#endif
+
+// Overload for C arrays. Multi-dimensional arrays are printed
+// properly.
+
+// Prints the given number of elements in an array, without printing
+// the curly braces.
+template <typename T>
+void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
+ UniversalPrint(a[0], os);
+ for (size_t i = 1; i != count; i++) {
+ *os << ", ";
+ UniversalPrint(a[i], os);
+ }
+}
+
+// Overloads for ::std::string.
+GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os);
+inline void PrintTo(const ::std::string& s, ::std::ostream* os) {
+ PrintStringTo(s, os);
+}
+
+// Overloads for ::std::u8string
+#ifdef __cpp_lib_char8_t
+GTEST_API_ void PrintU8StringTo(const ::std::u8string& s, ::std::ostream* os);
+inline void PrintTo(const ::std::u8string& s, ::std::ostream* os) {
+ PrintU8StringTo(s, os);
+}
+#endif
+
+// Overloads for ::std::u16string
+GTEST_API_ void PrintU16StringTo(const ::std::u16string& s, ::std::ostream* os);
+inline void PrintTo(const ::std::u16string& s, ::std::ostream* os) {
+ PrintU16StringTo(s, os);
+}
+
+// Overloads for ::std::u32string
+GTEST_API_ void PrintU32StringTo(const ::std::u32string& s, ::std::ostream* os);
+inline void PrintTo(const ::std::u32string& s, ::std::ostream* os) {
+ PrintU32StringTo(s, os);
+}
+
+// Overloads for ::std::wstring.
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {
+ PrintWideStringTo(s, os);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_INTERNAL_HAS_STRING_VIEW
+// Overload for internal::StringView.
+inline void PrintTo(internal::StringView sp, ::std::ostream* os) {
+ PrintTo(::std::string(sp), os);
+}
+#endif // GTEST_INTERNAL_HAS_STRING_VIEW
+
+inline void PrintTo(std::nullptr_t, ::std::ostream* os) { *os << "(nullptr)"; }
+
+template <typename T>
+void PrintTo(std::reference_wrapper<T> ref, ::std::ostream* os) {
+ UniversalPrinter<T&>::Print(ref.get(), os);
+}
+
+inline const void* VoidifyPointer(const void* p) { return p; }
+inline const void* VoidifyPointer(volatile const void* p) {
+ return const_cast<const void*>(p);
+}
+
+template <typename T, typename Ptr>
+void PrintSmartPointer(const Ptr& ptr, std::ostream* os, char) {
+ if (ptr == nullptr) {
+ *os << "(nullptr)";
+ } else {
+ // We can't print the value. Just print the pointer..
+ *os << "(" << (VoidifyPointer)(ptr.get()) << ")";
+ }
+}
+template <typename T, typename Ptr,
+ typename = typename std::enable_if<!std::is_void<T>::value &&
+ !std::is_array<T>::value>::type>
+void PrintSmartPointer(const Ptr& ptr, std::ostream* os, int) {
+ if (ptr == nullptr) {
+ *os << "(nullptr)";
+ } else {
+ *os << "(ptr = " << (VoidifyPointer)(ptr.get()) << ", value = ";
+ UniversalPrinter<T>::Print(*ptr, os);
+ *os << ")";
+ }
+}
+
+template <typename T, typename D>
+void PrintTo(const std::unique_ptr<T, D>& ptr, std::ostream* os) {
+ (PrintSmartPointer<T>)(ptr, os, 0);
+}
+
+template <typename T>
+void PrintTo(const std::shared_ptr<T>& ptr, std::ostream* os) {
+ (PrintSmartPointer<T>)(ptr, os, 0);
+}
+
+// Helper function for printing a tuple. T must be instantiated with
+// a tuple type.
+template <typename T>
+void PrintTupleTo(const T&, std::integral_constant<size_t, 0>,
+ ::std::ostream*) {}
+
+template <typename T, size_t I>
+void PrintTupleTo(const T& t, std::integral_constant<size_t, I>,
+ ::std::ostream* os) {
+ PrintTupleTo(t, std::integral_constant<size_t, I - 1>(), os);
+ GTEST_INTENTIONAL_CONST_COND_PUSH_()
+ if (I > 1) {
+ GTEST_INTENTIONAL_CONST_COND_POP_()
+ *os << ", ";
+ }
+ UniversalPrinter<typename std::tuple_element<I - 1, T>::type>::Print(
+ std::get<I - 1>(t), os);
+}
+
+template <typename... Types>
+void PrintTo(const ::std::tuple<Types...>& t, ::std::ostream* os) {
+ *os << "(";
+ PrintTupleTo(t, std::integral_constant<size_t, sizeof...(Types)>(), os);
+ *os << ")";
+}
+
+// Overload for std::pair.
+template <typename T1, typename T2>
+void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
+ *os << '(';
+ // We cannot use UniversalPrint(value.first, os) here, as T1 may be
+ // a reference type. The same for printing value.second.
+ UniversalPrinter<T1>::Print(value.first, os);
+ *os << ", ";
+ UniversalPrinter<T2>::Print(value.second, os);
+ *os << ')';
+}
+
+// Implements printing a non-reference type T by letting the compiler
+// pick the right overload of PrintTo() for T.
+template <typename T>
+class UniversalPrinter {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
+
+ // Note: we deliberately don't call this PrintTo(), as that name
+ // conflicts with ::testing::internal::PrintTo in the body of the
+ // function.
+ static void Print(const T& value, ::std::ostream* os) {
+ // By default, ::testing::internal::PrintTo() is used for printing
+ // the value.
+ //
+ // Thanks to Koenig look-up, if T is a class and has its own
+ // PrintTo() function defined in its namespace, that function will
+ // be visible here. Since it is more specific than the generic ones
+ // in ::testing::internal, it will be picked by the compiler in the
+ // following statement - exactly what we want.
+ PrintTo(value, os);
+ }
+
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+};
+
+// Remove any const-qualifiers before passing a type to UniversalPrinter.
+template <typename T>
+class UniversalPrinter<const T> : public UniversalPrinter<T> {};
+
+#if GTEST_INTERNAL_HAS_ANY
+
+// Printer for std::any / absl::any
+
+template <>
+class UniversalPrinter<Any> {
+ public:
+ static void Print(const Any& value, ::std::ostream* os) {
+ if (value.has_value()) {
+ *os << "value of type " << GetTypeName(value);
+ } else {
+ *os << "no value";
+ }
+ }
+
+ private:
+ static std::string GetTypeName(const Any& value) {
+#if GTEST_HAS_RTTI
+ return internal::GetTypeName(value.type());
+#else
+ static_cast<void>(value); // possibly unused
+ return "<unknown_type>";
+#endif // GTEST_HAS_RTTI
+ }
+};
+
+#endif // GTEST_INTERNAL_HAS_ANY
+
+#if GTEST_INTERNAL_HAS_OPTIONAL
+
+// Printer for std::optional / absl::optional
+
+template <typename T>
+class UniversalPrinter<Optional<T>> {
+ public:
+ static void Print(const Optional<T>& value, ::std::ostream* os) {
+ *os << '(';
+ if (!value) {
+ *os << "nullopt";
+ } else {
+ UniversalPrint(*value, os);
+ }
+ *os << ')';
+ }
+};
+
+#endif // GTEST_INTERNAL_HAS_OPTIONAL
+
+#if GTEST_INTERNAL_HAS_VARIANT
+
+// Printer for std::variant / absl::variant
+
+template <typename... T>
+class UniversalPrinter<Variant<T...>> {
+ public:
+ static void Print(const Variant<T...>& value, ::std::ostream* os) {
+ *os << '(';
+#if GTEST_HAS_ABSL
+ absl::visit(Visitor{os, value.index()}, value);
+#else
+ std::visit(Visitor{os, value.index()}, value);
+#endif // GTEST_HAS_ABSL
+ *os << ')';
+ }
+
+ private:
+ struct Visitor {
+ template <typename U>
+ void operator()(const U& u) const {
+ *os << "'" << GetTypeName<U>() << "(index = " << index
+ << ")' with value ";
+ UniversalPrint(u, os);
+ }
+ ::std::ostream* os;
+ std::size_t index;
+ };
+};
+
+#endif // GTEST_INTERNAL_HAS_VARIANT
+
+// UniversalPrintArray(begin, len, os) prints an array of 'len'
+// elements, starting at address 'begin'.
+template <typename T>
+void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
+ if (len == 0) {
+ *os << "{}";
+ } else {
+ *os << "{ ";
+ const size_t kThreshold = 18;
+ const size_t kChunkSize = 8;
+ // If the array has more than kThreshold elements, we'll have to
+ // omit some details by printing only the first and the last
+ // kChunkSize elements.
+ if (len <= kThreshold) {
+ PrintRawArrayTo(begin, len, os);
+ } else {
+ PrintRawArrayTo(begin, kChunkSize, os);
+ *os << ", ..., ";
+ PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
+ }
+ *os << " }";
+ }
+}
+// This overload prints a (const) char array compactly.
+GTEST_API_ void UniversalPrintArray(
+ const char* begin, size_t len, ::std::ostream* os);
+
+#ifdef __cpp_lib_char8_t
+// This overload prints a (const) char8_t array compactly.
+GTEST_API_ void UniversalPrintArray(const char8_t* begin, size_t len,
+ ::std::ostream* os);
+#endif
+
+// This overload prints a (const) char16_t array compactly.
+GTEST_API_ void UniversalPrintArray(const char16_t* begin, size_t len,
+ ::std::ostream* os);
+
+// This overload prints a (const) char32_t array compactly.
+GTEST_API_ void UniversalPrintArray(const char32_t* begin, size_t len,
+ ::std::ostream* os);
+
+// This overload prints a (const) wchar_t array compactly.
+GTEST_API_ void UniversalPrintArray(
+ const wchar_t* begin, size_t len, ::std::ostream* os);
+
+// Implements printing an array type T[N].
+template <typename T, size_t N>
+class UniversalPrinter<T[N]> {
+ public:
+ // Prints the given array, omitting some elements when there are too
+ // many.
+ static void Print(const T (&a)[N], ::std::ostream* os) {
+ UniversalPrintArray(a, N, os);
+ }
+};
+
+// Implements printing a reference type T&.
+template <typename T>
+class UniversalPrinter<T&> {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
+
+ static void Print(const T& value, ::std::ostream* os) {
+ // Prints the address of the value. We use reinterpret_cast here
+ // as static_cast doesn't compile when T is a function type.
+ *os << "@" << reinterpret_cast<const void*>(&value) << " ";
+
+ // Then prints the value itself.
+ UniversalPrint(value, os);
+ }
+
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+};
+
+// Prints a value tersely: for a reference type, the referenced value
+// (but not the address) is printed; for a (const) char pointer, the
+// NUL-terminated string (but not the pointer) is printed.
+
+template <typename T>
+class UniversalTersePrinter {
+ public:
+ static void Print(const T& value, ::std::ostream* os) {
+ UniversalPrint(value, os);
+ }
+};
+template <typename T>
+class UniversalTersePrinter<T&> {
+ public:
+ static void Print(const T& value, ::std::ostream* os) {
+ UniversalPrint(value, os);
+ }
+};
+template <typename T, size_t N>
+class UniversalTersePrinter<T[N]> {
+ public:
+ static void Print(const T (&value)[N], ::std::ostream* os) {
+ UniversalPrinter<T[N]>::Print(value, os);
+ }
+};
+template <>
+class UniversalTersePrinter<const char*> {
+ public:
+ static void Print(const char* str, ::std::ostream* os) {
+ if (str == nullptr) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(std::string(str), os);
+ }
+ }
+};
+template <>
+class UniversalTersePrinter<char*> : public UniversalTersePrinter<const char*> {
+};
+
+#ifdef __cpp_lib_char8_t
+template <>
+class UniversalTersePrinter<const char8_t*> {
+ public:
+ static void Print(const char8_t* str, ::std::ostream* os) {
+ if (str == nullptr) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(::std::u8string(str), os);
+ }
+ }
+};
+template <>
+class UniversalTersePrinter<char8_t*>
+ : public UniversalTersePrinter<const char8_t*> {};
+#endif
+
+template <>
+class UniversalTersePrinter<const char16_t*> {
+ public:
+ static void Print(const char16_t* str, ::std::ostream* os) {
+ if (str == nullptr) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(::std::u16string(str), os);
+ }
+ }
+};
+template <>
+class UniversalTersePrinter<char16_t*>
+ : public UniversalTersePrinter<const char16_t*> {};
+
+template <>
+class UniversalTersePrinter<const char32_t*> {
+ public:
+ static void Print(const char32_t* str, ::std::ostream* os) {
+ if (str == nullptr) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(::std::u32string(str), os);
+ }
+ }
+};
+template <>
+class UniversalTersePrinter<char32_t*>
+ : public UniversalTersePrinter<const char32_t*> {};
+
+#if GTEST_HAS_STD_WSTRING
+template <>
+class UniversalTersePrinter<const wchar_t*> {
+ public:
+ static void Print(const wchar_t* str, ::std::ostream* os) {
+ if (str == nullptr) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(::std::wstring(str), os);
+ }
+ }
+};
+#endif
+
+template <>
+class UniversalTersePrinter<wchar_t*> {
+ public:
+ static void Print(wchar_t* str, ::std::ostream* os) {
+ UniversalTersePrinter<const wchar_t*>::Print(str, os);
+ }
+};
+
+template <typename T>
+void UniversalTersePrint(const T& value, ::std::ostream* os) {
+ UniversalTersePrinter<T>::Print(value, os);
+}
+
+// Prints a value using the type inferred by the compiler. The
+// difference between this and UniversalTersePrint() is that for a
+// (const) char pointer, this prints both the pointer and the
+// NUL-terminated string.
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os) {
+ // A workarond for the bug in VC++ 7.1 that prevents us from instantiating
+ // UniversalPrinter with T directly.
+ typedef T T1;
+ UniversalPrinter<T1>::Print(value, os);
+}
+
+typedef ::std::vector< ::std::string> Strings;
+
+ // Tersely prints the first N fields of a tuple to a string vector,
+ // one element for each field.
+template <typename Tuple>
+void TersePrintPrefixToStrings(const Tuple&, std::integral_constant<size_t, 0>,
+ Strings*) {}
+template <typename Tuple, size_t I>
+void TersePrintPrefixToStrings(const Tuple& t,
+ std::integral_constant<size_t, I>,
+ Strings* strings) {
+ TersePrintPrefixToStrings(t, std::integral_constant<size_t, I - 1>(),
+ strings);
+ ::std::stringstream ss;
+ UniversalTersePrint(std::get<I - 1>(t), &ss);
+ strings->push_back(ss.str());
+}
+
+// Prints the fields of a tuple tersely to a string vector, one
+// element for each field. See the comment before
+// UniversalTersePrint() for how we define "tersely".
+template <typename Tuple>
+Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {
+ Strings result;
+ TersePrintPrefixToStrings(
+ value, std::integral_constant<size_t, std::tuple_size<Tuple>::value>(),
+ &result);
+ return result;
+}
+
+} // namespace internal
+
+template <typename T>
+::std::string PrintToString(const T& value) {
+ ::std::stringstream ss;
+ internal::UniversalTersePrinter<T>::Print(value, &ss);
+ return ss.str();
+}
+
+} // namespace testing
+
+// Include any custom printer added by the local installation.
+// We must include this header at the end to make sure it can use the
+// declarations from this file.
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file provides an injection point for custom printers in a local
+// installation of gTest.
+// It will be included from gtest-printers.h and the overrides in this file
+// will be visible to everyone.
+//
+// Injection point for custom user configurations. See README for details
+//
+// ** Custom implementation starts here **
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+// MSVC warning C5046 is new as of VS2017 version 15.8.
+#if defined(_MSC_VER) && _MSC_VER >= 1915
+#define GTEST_MAYBE_5046_ 5046
+#else
+#define GTEST_MAYBE_5046_
+#endif
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(
+ 4251 GTEST_MAYBE_5046_ /* class A needs to have dll-interface to be used by
+ clients of class B */
+ /* Symbol involving type with internal linkage not defined */)
+
+namespace testing {
+
+// To implement a matcher Foo for type T, define:
+// 1. a class FooMatcherMatcher that implements the matcher interface:
+// using is_gtest_matcher = void;
+// bool MatchAndExplain(const T&, std::ostream*);
+// (MatchResultListener* can also be used instead of std::ostream*)
+// void DescribeTo(std::ostream*);
+// void DescribeNegationTo(std::ostream*);
+//
+// 2. a factory function that creates a Matcher<T> object from a
+// FooMatcherMatcher.
+
+class MatchResultListener {
+ public:
+ // Creates a listener object with the given underlying ostream. The
+ // listener does not own the ostream, and does not dereference it
+ // in the constructor or destructor.
+ explicit MatchResultListener(::std::ostream* os) : stream_(os) {}
+ virtual ~MatchResultListener() = 0; // Makes this class abstract.
+
+ // Streams x to the underlying ostream; does nothing if the ostream
+ // is NULL.
+ template <typename T>
+ MatchResultListener& operator<<(const T& x) {
+ if (stream_ != nullptr) *stream_ << x;
+ return *this;
+ }
+
+ // Returns the underlying ostream.
+ ::std::ostream* stream() { return stream_; }
+
+ // Returns true if and only if the listener is interested in an explanation
+ // of the match result. A matcher's MatchAndExplain() method can use
+ // this information to avoid generating the explanation when no one
+ // intends to hear it.
+ bool IsInterested() const { return stream_ != nullptr; }
+
+ private:
+ ::std::ostream* const stream_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener);
+};
+
+inline MatchResultListener::~MatchResultListener() {
+}
+
+// An instance of a subclass of this knows how to describe itself as a
+// matcher.
+class GTEST_API_ MatcherDescriberInterface {
+ public:
+ virtual ~MatcherDescriberInterface() {}
+
+ // Describes this matcher to an ostream. The function should print
+ // a verb phrase that describes the property a value matching this
+ // matcher should have. The subject of the verb phrase is the value
+ // being matched. For example, the DescribeTo() method of the Gt(7)
+ // matcher prints "is greater than 7".
+ virtual void DescribeTo(::std::ostream* os) const = 0;
+
+ // Describes the negation of this matcher to an ostream. For
+ // example, if the description of this matcher is "is greater than
+ // 7", the negated description could be "is not greater than 7".
+ // You are not required to override this when implementing
+ // MatcherInterface, but it is highly advised so that your matcher
+ // can produce good error messages.
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "not (";
+ DescribeTo(os);
+ *os << ")";
+ }
+};
+
+// The implementation of a matcher.
+template <typename T>
+class MatcherInterface : public MatcherDescriberInterface {
+ public:
+ // Returns true if and only if the matcher matches x; also explains the
+ // match result to 'listener' if necessary (see the next paragraph), in
+ // the form of a non-restrictive relative clause ("which ...",
+ // "whose ...", etc) that describes x. For example, the
+ // MatchAndExplain() method of the Pointee(...) matcher should
+ // generate an explanation like "which points to ...".
+ //
+ // Implementations of MatchAndExplain() should add an explanation of
+ // the match result *if and only if* they can provide additional
+ // information that's not already present (or not obvious) in the
+ // print-out of x and the matcher's description. Whether the match
+ // succeeds is not a factor in deciding whether an explanation is
+ // needed, as sometimes the caller needs to print a failure message
+ // when the match succeeds (e.g. when the matcher is used inside
+ // Not()).
+ //
+ // For example, a "has at least 10 elements" matcher should explain
+ // what the actual element count is, regardless of the match result,
+ // as it is useful information to the reader; on the other hand, an
+ // "is empty" matcher probably only needs to explain what the actual
+ // size is when the match fails, as it's redundant to say that the
+ // size is 0 when the value is already known to be empty.
+ //
+ // You should override this method when defining a new matcher.
+ //
+ // It's the responsibility of the caller (Google Test) to guarantee
+ // that 'listener' is not NULL. This helps to simplify a matcher's
+ // implementation when it doesn't care about the performance, as it
+ // can talk to 'listener' without checking its validity first.
+ // However, in order to implement dummy listeners efficiently,
+ // listener->stream() may be NULL.
+ virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0;
+
+ // Inherits these methods from MatcherDescriberInterface:
+ // virtual void DescribeTo(::std::ostream* os) const = 0;
+ // virtual void DescribeNegationTo(::std::ostream* os) const;
+};
+
+namespace internal {
+
+struct AnyEq {
+ template <typename A, typename B>
+ bool operator()(const A& a, const B& b) const { return a == b; }
+};
+struct AnyNe {
+ template <typename A, typename B>
+ bool operator()(const A& a, const B& b) const { return a != b; }
+};
+struct AnyLt {
+ template <typename A, typename B>
+ bool operator()(const A& a, const B& b) const { return a < b; }
+};
+struct AnyGt {
+ template <typename A, typename B>
+ bool operator()(const A& a, const B& b) const { return a > b; }
+};
+struct AnyLe {
+ template <typename A, typename B>
+ bool operator()(const A& a, const B& b) const { return a <= b; }
+};
+struct AnyGe {
+ template <typename A, typename B>
+ bool operator()(const A& a, const B& b) const { return a >= b; }
+};
+
+// A match result listener that ignores the explanation.
+class DummyMatchResultListener : public MatchResultListener {
+ public:
+ DummyMatchResultListener() : MatchResultListener(nullptr) {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener);
+};
+
+// A match result listener that forwards the explanation to a given
+// ostream. The difference between this and MatchResultListener is
+// that the former is concrete.
+class StreamMatchResultListener : public MatchResultListener {
+ public:
+ explicit StreamMatchResultListener(::std::ostream* os)
+ : MatchResultListener(os) {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener);
+};
+
+struct SharedPayloadBase {
+ std::atomic<int> ref{1};
+ void Ref() { ref.fetch_add(1, std::memory_order_relaxed); }
+ bool Unref() { return ref.fetch_sub(1, std::memory_order_acq_rel) == 1; }
+};
+
+template <typename T>
+struct SharedPayload : SharedPayloadBase {
+ explicit SharedPayload(const T& v) : value(v) {}
+ explicit SharedPayload(T&& v) : value(std::move(v)) {}
+
+ static void Destroy(SharedPayloadBase* shared) {
+ delete static_cast<SharedPayload*>(shared);
+ }
+
+ T value;
+};
+
+// An internal class for implementing Matcher<T>, which will derive
+// from it. We put functionalities common to all Matcher<T>
+// specializations here to avoid code duplication.
+template <typename T>
+class MatcherBase : private MatcherDescriberInterface {
+ public:
+ // Returns true if and only if the matcher matches x; also explains the
+ // match result to 'listener'.
+ bool MatchAndExplain(const T& x, MatchResultListener* listener) const {
+ GTEST_CHECK_(vtable_ != nullptr);
+ return vtable_->match_and_explain(*this, x, listener);
+ }
+
+ // Returns true if and only if this matcher matches x.
+ bool Matches(const T& x) const {
+ DummyMatchResultListener dummy;
+ return MatchAndExplain(x, &dummy);
+ }
+
+ // Describes this matcher to an ostream.
+ void DescribeTo(::std::ostream* os) const final {
+ GTEST_CHECK_(vtable_ != nullptr);
+ vtable_->describe(*this, os, false);
+ }
+
+ // Describes the negation of this matcher to an ostream.
+ void DescribeNegationTo(::std::ostream* os) const final {
+ GTEST_CHECK_(vtable_ != nullptr);
+ vtable_->describe(*this, os, true);
+ }
+
+ // Explains why x matches, or doesn't match, the matcher.
+ void ExplainMatchResultTo(const T& x, ::std::ostream* os) const {
+ StreamMatchResultListener listener(os);
+ MatchAndExplain(x, &listener);
+ }
+
+ // Returns the describer for this matcher object; retains ownership
+ // of the describer, which is only guaranteed to be alive when
+ // this matcher object is alive.
+ const MatcherDescriberInterface* GetDescriber() const {
+ if (vtable_ == nullptr) return nullptr;
+ return vtable_->get_describer(*this);
+ }
+
+ protected:
+ MatcherBase() : vtable_(nullptr) {}
+
+ // Constructs a matcher from its implementation.
+ template <typename U>
+ explicit MatcherBase(const MatcherInterface<U>* impl) {
+ Init(impl);
+ }
+
+ template <typename M, typename = typename std::remove_reference<
+ M>::type::is_gtest_matcher>
+ MatcherBase(M&& m) { // NOLINT
+ Init(std::forward<M>(m));
+ }
+
+ MatcherBase(const MatcherBase& other)
+ : vtable_(other.vtable_), buffer_(other.buffer_) {
+ if (IsShared()) buffer_.shared->Ref();
+ }
+
+ MatcherBase& operator=(const MatcherBase& other) {
+ if (this == &other) return *this;
+ Destroy();
+ vtable_ = other.vtable_;
+ buffer_ = other.buffer_;
+ if (IsShared()) buffer_.shared->Ref();
+ return *this;
+ }
+
+ MatcherBase(MatcherBase&& other)
+ : vtable_(other.vtable_), buffer_(other.buffer_) {
+ other.vtable_ = nullptr;
+ }
+
+ MatcherBase& operator=(MatcherBase&& other) {
+ if (this == &other) return *this;
+ Destroy();
+ vtable_ = other.vtable_;
+ buffer_ = other.buffer_;
+ other.vtable_ = nullptr;
+ return *this;
+ }
+
+ ~MatcherBase() override { Destroy(); }
+
+ private:
+ struct VTable {
+ bool (*match_and_explain)(const MatcherBase&, const T&,
+ MatchResultListener*);
+ void (*describe)(const MatcherBase&, std::ostream*, bool negation);
+ // Returns the captured object if it implements the interface, otherwise
+ // returns the MatcherBase itself.
+ const MatcherDescriberInterface* (*get_describer)(const MatcherBase&);
+ // Called on shared instances when the reference count reaches 0.
+ void (*shared_destroy)(SharedPayloadBase*);
+ };
+
+ bool IsShared() const {
+ return vtable_ != nullptr && vtable_->shared_destroy != nullptr;
+ }
+
+ // If the implementation uses a listener, call that.
+ template <typename P>
+ static auto MatchAndExplainImpl(const MatcherBase& m, const T& value,
+ MatchResultListener* listener)
+ -> decltype(P::Get(m).MatchAndExplain(value, listener->stream())) {
+ return P::Get(m).MatchAndExplain(value, listener->stream());
+ }
+
+ template <typename P>
+ static auto MatchAndExplainImpl(const MatcherBase& m, const T& value,
+ MatchResultListener* listener)
+ -> decltype(P::Get(m).MatchAndExplain(value, listener)) {
+ return P::Get(m).MatchAndExplain(value, listener);
+ }
+
+ template <typename P>
+ static void DescribeImpl(const MatcherBase& m, std::ostream* os,
+ bool negation) {
+ if (negation) {
+ P::Get(m).DescribeNegationTo(os);
+ } else {
+ P::Get(m).DescribeTo(os);
+ }
+ }
+
+ template <typename P>
+ static const MatcherDescriberInterface* GetDescriberImpl(
+ const MatcherBase& m) {
+ // If the impl is a MatcherDescriberInterface, then return it.
+ // Otherwise use MatcherBase itself.
+ // This allows us to implement the GetDescriber() function without support
+ // from the impl, but some users really want to get their impl back when
+ // they call GetDescriber().
+ // We use std::get on a tuple as a workaround of not having `if constexpr`.
+ return std::get<(
+ std::is_convertible<decltype(&P::Get(m)),
+ const MatcherDescriberInterface*>::value
+ ? 1
+ : 0)>(std::make_tuple(&m, &P::Get(m)));
+ }
+
+ template <typename P>
+ const VTable* GetVTable() {
+ static constexpr VTable kVTable = {&MatchAndExplainImpl<P>,
+ &DescribeImpl<P>, &GetDescriberImpl<P>,
+ P::shared_destroy};
+ return &kVTable;
+ }
+
+ union Buffer {
+ // Add some types to give Buffer some common alignment/size use cases.
+ void* ptr;
+ double d;
+ int64_t i;
+ // And add one for the out-of-line cases.
+ SharedPayloadBase* shared;
+ };
+
+ void Destroy() {
+ if (IsShared() && buffer_.shared->Unref()) {
+ vtable_->shared_destroy(buffer_.shared);
+ }
+ }
+
+ template <typename M>
+ static constexpr bool IsInlined() {
+ return sizeof(M) <= sizeof(Buffer) && alignof(M) <= alignof(Buffer) &&
+ std::is_trivially_copy_constructible<M>::value &&
+ std::is_trivially_destructible<M>::value;
+ }
+
+ template <typename M, bool = MatcherBase::IsInlined<M>()>
+ struct ValuePolicy {
+ static const M& Get(const MatcherBase& m) {
+ // When inlined along with Init, need to be explicit to avoid violating
+ // strict aliasing rules.
+ const M *ptr = static_cast<const M*>(
+ static_cast<const void*>(&m.buffer_));
+ return *ptr;
+ }
+ static void Init(MatcherBase& m, M impl) {
+ ::new (static_cast<void*>(&m.buffer_)) M(impl);
+ }
+ static constexpr auto shared_destroy = nullptr;
+ };
+
+ template <typename M>
+ struct ValuePolicy<M, false> {
+ using Shared = SharedPayload<M>;
+ static const M& Get(const MatcherBase& m) {
+ return static_cast<Shared*>(m.buffer_.shared)->value;
+ }
+ template <typename Arg>
+ static void Init(MatcherBase& m, Arg&& arg) {
+ m.buffer_.shared = new Shared(std::forward<Arg>(arg));
+ }
+ static constexpr auto shared_destroy = &Shared::Destroy;
+ };
+
+ template <typename U, bool B>
+ struct ValuePolicy<const MatcherInterface<U>*, B> {
+ using M = const MatcherInterface<U>;
+ using Shared = SharedPayload<std::unique_ptr<M>>;
+ static const M& Get(const MatcherBase& m) {
+ return *static_cast<Shared*>(m.buffer_.shared)->value;
+ }
+ static void Init(MatcherBase& m, M* impl) {
+ m.buffer_.shared = new Shared(std::unique_ptr<M>(impl));
+ }
+
+ static constexpr auto shared_destroy = &Shared::Destroy;
+ };
+
+ template <typename M>
+ void Init(M&& m) {
+ using MM = typename std::decay<M>::type;
+ using Policy = ValuePolicy<MM>;
+ vtable_ = GetVTable<Policy>();
+ Policy::Init(*this, std::forward<M>(m));
+ }
+
+ const VTable* vtable_;
+ Buffer buffer_;
+};
+
+} // namespace internal
+
+// A Matcher<T> is a copyable and IMMUTABLE (except by assignment)
+// object that can check whether a value of type T matches. The
+// implementation of Matcher<T> is just a std::shared_ptr to const
+// MatcherInterface<T>. Don't inherit from Matcher!
+template <typename T>
+class Matcher : public internal::MatcherBase<T> {
+ public:
+ // Constructs a null matcher. Needed for storing Matcher objects in STL
+ // containers. A default-constructed matcher is not yet initialized. You
+ // cannot use it until a valid value has been assigned to it.
+ explicit Matcher() {} // NOLINT
+
+ // Constructs a matcher from its implementation.
+ explicit Matcher(const MatcherInterface<const T&>* impl)
+ : internal::MatcherBase<T>(impl) {}
+
+ template <typename U>
+ explicit Matcher(
+ const MatcherInterface<U>* impl,
+ typename std::enable_if<!std::is_same<U, const U&>::value>::type* =
+ nullptr)
+ : internal::MatcherBase<T>(impl) {}
+
+ template <typename M, typename = typename std::remove_reference<
+ M>::type::is_gtest_matcher>
+ Matcher(M&& m) : internal::MatcherBase<T>(std::forward<M>(m)) {} // NOLINT
+
+ // Implicit constructor here allows people to write
+ // EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes
+ Matcher(T value); // NOLINT
+};
+
+// The following two specializations allow the user to write str
+// instead of Eq(str) and "foo" instead of Eq("foo") when a std::string
+// matcher is expected.
+template <>
+class GTEST_API_ Matcher<const std::string&>
+ : public internal::MatcherBase<const std::string&> {
+ public:
+ Matcher() {}
+
+ explicit Matcher(const MatcherInterface<const std::string&>* impl)
+ : internal::MatcherBase<const std::string&>(impl) {}
+
+ template <typename M, typename = typename std::remove_reference<
+ M>::type::is_gtest_matcher>
+ Matcher(M&& m) // NOLINT
+ : internal::MatcherBase<const std::string&>(std::forward<M>(m)) {}
+
+ // Allows the user to write str instead of Eq(str) sometimes, where
+ // str is a std::string object.
+ Matcher(const std::string& s); // NOLINT
+
+ // Allows the user to write "foo" instead of Eq("foo") sometimes.
+ Matcher(const char* s); // NOLINT
+};
+
+template <>
+class GTEST_API_ Matcher<std::string>
+ : public internal::MatcherBase<std::string> {
+ public:
+ Matcher() {}
+
+ explicit Matcher(const MatcherInterface<const std::string&>* impl)
+ : internal::MatcherBase<std::string>(impl) {}
+ explicit Matcher(const MatcherInterface<std::string>* impl)
+ : internal::MatcherBase<std::string>(impl) {}
+
+ template <typename M, typename = typename std::remove_reference<
+ M>::type::is_gtest_matcher>
+ Matcher(M&& m) // NOLINT
+ : internal::MatcherBase<std::string>(std::forward<M>(m)) {}
+
+ // Allows the user to write str instead of Eq(str) sometimes, where
+ // str is a string object.
+ Matcher(const std::string& s); // NOLINT
+
+ // Allows the user to write "foo" instead of Eq("foo") sometimes.
+ Matcher(const char* s); // NOLINT
+};
+
+#if GTEST_INTERNAL_HAS_STRING_VIEW
+// The following two specializations allow the user to write str
+// instead of Eq(str) and "foo" instead of Eq("foo") when a absl::string_view
+// matcher is expected.
+template <>
+class GTEST_API_ Matcher<const internal::StringView&>
+ : public internal::MatcherBase<const internal::StringView&> {
+ public:
+ Matcher() {}
+
+ explicit Matcher(const MatcherInterface<const internal::StringView&>* impl)
+ : internal::MatcherBase<const internal::StringView&>(impl) {}
+
+ template <typename M, typename = typename std::remove_reference<
+ M>::type::is_gtest_matcher>
+ Matcher(M&& m) // NOLINT
+ : internal::MatcherBase<const internal::StringView&>(std::forward<M>(m)) {
+ }
+
+ // Allows the user to write str instead of Eq(str) sometimes, where
+ // str is a std::string object.
+ Matcher(const std::string& s); // NOLINT
+
+ // Allows the user to write "foo" instead of Eq("foo") sometimes.
+ Matcher(const char* s); // NOLINT
+
+ // Allows the user to pass absl::string_views or std::string_views directly.
+ Matcher(internal::StringView s); // NOLINT
+};
+
+template <>
+class GTEST_API_ Matcher<internal::StringView>
+ : public internal::MatcherBase<internal::StringView> {
+ public:
+ Matcher() {}
+
+ explicit Matcher(const MatcherInterface<const internal::StringView&>* impl)
+ : internal::MatcherBase<internal::StringView>(impl) {}
+ explicit Matcher(const MatcherInterface<internal::StringView>* impl)
+ : internal::MatcherBase<internal::StringView>(impl) {}
+
+ template <typename M, typename = typename std::remove_reference<
+ M>::type::is_gtest_matcher>
+ Matcher(M&& m) // NOLINT
+ : internal::MatcherBase<internal::StringView>(std::forward<M>(m)) {}
+
+ // Allows the user to write str instead of Eq(str) sometimes, where
+ // str is a std::string object.
+ Matcher(const std::string& s); // NOLINT
+
+ // Allows the user to write "foo" instead of Eq("foo") sometimes.
+ Matcher(const char* s); // NOLINT
+
+ // Allows the user to pass absl::string_views or std::string_views directly.
+ Matcher(internal::StringView s); // NOLINT
+};
+#endif // GTEST_INTERNAL_HAS_STRING_VIEW
+
+// Prints a matcher in a human-readable format.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const Matcher<T>& matcher) {
+ matcher.DescribeTo(&os);
+ return os;
+}
+
+// The PolymorphicMatcher class template makes it easy to implement a
+// polymorphic matcher (i.e. a matcher that can match values of more
+// than one type, e.g. Eq(n) and NotNull()).
+//
+// To define a polymorphic matcher, a user should provide an Impl
+// class that has a DescribeTo() method and a DescribeNegationTo()
+// method, and define a member function (or member function template)
+//
+// bool MatchAndExplain(const Value& value,
+// MatchResultListener* listener) const;
+//
+// See the definition of NotNull() for a complete example.
+template <class Impl>
+class PolymorphicMatcher {
+ public:
+ explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {}
+
+ // Returns a mutable reference to the underlying matcher
+ // implementation object.
+ Impl& mutable_impl() { return impl_; }
+
+ // Returns an immutable reference to the underlying matcher
+ // implementation object.
+ const Impl& impl() const { return impl_; }
+
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new MonomorphicImpl<const T&>(impl_));
+ }
+
+ private:
+ template <typename T>
+ class MonomorphicImpl : public MatcherInterface<T> {
+ public:
+ explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
+
+ void DescribeTo(::std::ostream* os) const override { impl_.DescribeTo(os); }
+
+ void DescribeNegationTo(::std::ostream* os) const override {
+ impl_.DescribeNegationTo(os);
+ }
+
+ bool MatchAndExplain(T x, MatchResultListener* listener) const override {
+ return impl_.MatchAndExplain(x, listener);
+ }
+
+ private:
+ const Impl impl_;
+ };
+
+ Impl impl_;
+};
+
+// Creates a matcher from its implementation.
+// DEPRECATED: Especially in the generic code, prefer:
+// Matcher<T>(new MyMatcherImpl<const T&>(...));
+//
+// MakeMatcher may create a Matcher that accepts its argument by value, which
+// leads to unnecessary copies & lack of support for non-copyable types.
+template <typename T>
+inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) {
+ return Matcher<T>(impl);
+}
+
+// Creates a polymorphic matcher from its implementation. This is
+// easier to use than the PolymorphicMatcher<Impl> constructor as it
+// doesn't require you to explicitly write the template argument, e.g.
+//
+// MakePolymorphicMatcher(foo);
+// vs
+// PolymorphicMatcher<TypeOfFoo>(foo);
+template <class Impl>
+inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl& impl) {
+ return PolymorphicMatcher<Impl>(impl);
+}
+
+namespace internal {
+// Implements a matcher that compares a given value with a
+// pre-supplied value using one of the ==, <=, <, etc, operators. The
+// two values being compared don't have to have the same type.
+//
+// The matcher defined here is polymorphic (for example, Eq(5) can be
+// used to match an int, a short, a double, etc). Therefore we use
+// a template type conversion operator in the implementation.
+//
+// The following template definition assumes that the Rhs parameter is
+// a "bare" type (i.e. neither 'const T' nor 'T&').
+template <typename D, typename Rhs, typename Op>
+class ComparisonBase {
+ public:
+ explicit ComparisonBase(const Rhs& rhs) : rhs_(rhs) {}
+
+ using is_gtest_matcher = void;
+
+ template <typename Lhs>
+ bool MatchAndExplain(const Lhs& lhs, std::ostream*) const {
+ return Op()(lhs, Unwrap(rhs_));
+ }
+ void DescribeTo(std::ostream* os) const {
+ *os << D::Desc() << " ";
+ UniversalPrint(Unwrap(rhs_), os);
+ }
+ void DescribeNegationTo(std::ostream* os) const {
+ *os << D::NegatedDesc() << " ";
+ UniversalPrint(Unwrap(rhs_), os);
+ }
+
+ private:
+ template <typename T>
+ static const T& Unwrap(const T& v) {
+ return v;
+ }
+ template <typename T>
+ static const T& Unwrap(std::reference_wrapper<T> v) {
+ return v;
+ }
+
+ Rhs rhs_;
+};
+
+template <typename Rhs>
+class EqMatcher : public ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq> {
+ public:
+ explicit EqMatcher(const Rhs& rhs)
+ : ComparisonBase<EqMatcher<Rhs>, Rhs, AnyEq>(rhs) { }
+ static const char* Desc() { return "is equal to"; }
+ static const char* NegatedDesc() { return "isn't equal to"; }
+};
+template <typename Rhs>
+class NeMatcher : public ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe> {
+ public:
+ explicit NeMatcher(const Rhs& rhs)
+ : ComparisonBase<NeMatcher<Rhs>, Rhs, AnyNe>(rhs) { }
+ static const char* Desc() { return "isn't equal to"; }
+ static const char* NegatedDesc() { return "is equal to"; }
+};
+template <typename Rhs>
+class LtMatcher : public ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt> {
+ public:
+ explicit LtMatcher(const Rhs& rhs)
+ : ComparisonBase<LtMatcher<Rhs>, Rhs, AnyLt>(rhs) { }
+ static const char* Desc() { return "is <"; }
+ static const char* NegatedDesc() { return "isn't <"; }
+};
+template <typename Rhs>
+class GtMatcher : public ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt> {
+ public:
+ explicit GtMatcher(const Rhs& rhs)
+ : ComparisonBase<GtMatcher<Rhs>, Rhs, AnyGt>(rhs) { }
+ static const char* Desc() { return "is >"; }
+ static const char* NegatedDesc() { return "isn't >"; }
+};
+template <typename Rhs>
+class LeMatcher : public ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe> {
+ public:
+ explicit LeMatcher(const Rhs& rhs)
+ : ComparisonBase<LeMatcher<Rhs>, Rhs, AnyLe>(rhs) { }
+ static const char* Desc() { return "is <="; }
+ static const char* NegatedDesc() { return "isn't <="; }
+};
+template <typename Rhs>
+class GeMatcher : public ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe> {
+ public:
+ explicit GeMatcher(const Rhs& rhs)
+ : ComparisonBase<GeMatcher<Rhs>, Rhs, AnyGe>(rhs) { }
+ static const char* Desc() { return "is >="; }
+ static const char* NegatedDesc() { return "isn't >="; }
+};
+
+template <typename T, typename = typename std::enable_if<
+ std::is_constructible<std::string, T>::value>::type>
+using StringLike = T;
+
+// Implements polymorphic matchers MatchesRegex(regex) and
+// ContainsRegex(regex), which can be used as a Matcher<T> as long as
+// T can be converted to a string.
+class MatchesRegexMatcher {
+ public:
+ MatchesRegexMatcher(const RE* regex, bool full_match)
+ : regex_(regex), full_match_(full_match) {}
+
+#if GTEST_INTERNAL_HAS_STRING_VIEW
+ bool MatchAndExplain(const internal::StringView& s,
+ MatchResultListener* listener) const {
+ return MatchAndExplain(std::string(s), listener);
+ }
+#endif // GTEST_INTERNAL_HAS_STRING_VIEW
+
+ // Accepts pointer types, particularly:
+ // const char*
+ // char*
+ // const wchar_t*
+ // wchar_t*
+ template <typename CharType>
+ bool MatchAndExplain(CharType* s, MatchResultListener* listener) const {
+ return s != nullptr && MatchAndExplain(std::string(s), listener);
+ }
+
+ // Matches anything that can convert to std::string.
+ //
+ // This is a template, not just a plain function with const std::string&,
+ // because absl::string_view has some interfering non-explicit constructors.
+ template <class MatcheeStringType>
+ bool MatchAndExplain(const MatcheeStringType& s,
+ MatchResultListener* /* listener */) const {
+ const std::string& s2(s);
+ return full_match_ ? RE::FullMatch(s2, *regex_)
+ : RE::PartialMatch(s2, *regex_);
+ }
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << (full_match_ ? "matches" : "contains") << " regular expression ";
+ UniversalPrinter<std::string>::Print(regex_->pattern(), os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't " << (full_match_ ? "match" : "contain")
+ << " regular expression ";
+ UniversalPrinter<std::string>::Print(regex_->pattern(), os);
+ }
+
+ private:
+ const std::shared_ptr<const RE> regex_;
+ const bool full_match_;
+};
+} // namespace internal
+
+// Matches a string that fully matches regular expression 'regex'.
+// The matcher takes ownership of 'regex'.
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
+ const internal::RE* regex) {
+ return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true));
+}
+template <typename T = std::string>
+PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
+ const internal::StringLike<T>& regex) {
+ return MatchesRegex(new internal::RE(std::string(regex)));
+}
+
+// Matches a string that contains regular expression 'regex'.
+// The matcher takes ownership of 'regex'.
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
+ const internal::RE* regex) {
+ return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false));
+}
+template <typename T = std::string>
+PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
+ const internal::StringLike<T>& regex) {
+ return ContainsRegex(new internal::RE(std::string(regex)));
+}
+
+// Creates a polymorphic matcher that matches anything equal to x.
+// Note: if the parameter of Eq() were declared as const T&, Eq("foo")
+// wouldn't compile.
+template <typename T>
+inline internal::EqMatcher<T> Eq(T x) { return internal::EqMatcher<T>(x); }
+
+// Constructs a Matcher<T> from a 'value' of type T. The constructed
+// matcher matches any value that's equal to 'value'.
+template <typename T>
+Matcher<T>::Matcher(T value) { *this = Eq(value); }
+
+// Creates a monomorphic matcher that matches anything with type Lhs
+// and equal to rhs. A user may need to use this instead of Eq(...)
+// in order to resolve an overloading ambiguity.
+//
+// TypedEq<T>(x) is just a convenient short-hand for Matcher<T>(Eq(x))
+// or Matcher<T>(x), but more readable than the latter.
+//
+// We could define similar monomorphic matchers for other comparison
+// operations (e.g. TypedLt, TypedGe, and etc), but decided not to do
+// it yet as those are used much less than Eq() in practice. A user
+// can always write Matcher<T>(Lt(5)) to be explicit about the type,
+// for example.
+template <typename Lhs, typename Rhs>
+inline Matcher<Lhs> TypedEq(const Rhs& rhs) { return Eq(rhs); }
+
+// Creates a polymorphic matcher that matches anything >= x.
+template <typename Rhs>
+inline internal::GeMatcher<Rhs> Ge(Rhs x) {
+ return internal::GeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything > x.
+template <typename Rhs>
+inline internal::GtMatcher<Rhs> Gt(Rhs x) {
+ return internal::GtMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything <= x.
+template <typename Rhs>
+inline internal::LeMatcher<Rhs> Le(Rhs x) {
+ return internal::LeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything < x.
+template <typename Rhs>
+inline internal::LtMatcher<Rhs> Lt(Rhs x) {
+ return internal::LtMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything != x.
+template <typename Rhs>
+inline internal::NeMatcher<Rhs> Ne(Rhs x) {
+ return internal::NeMatcher<Rhs>(x);
+}
+} // namespace testing
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 5046
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_MATCHERS_H_
+
+#include <stdio.h>
+#include <memory>
+
+namespace testing {
+namespace internal {
+
+GTEST_DECLARE_string_(internal_run_death_test);
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kDeathTestStyleFlag[] = "death_test_style";
+const char kDeathTestUseFork[] = "death_test_use_fork";
+const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
+
+#if GTEST_HAS_DEATH_TEST
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+// DeathTest is a class that hides much of the complexity of the
+// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
+// returns a concrete class that depends on the prevailing death test
+// style, as defined by the --gtest_death_test_style and/or
+// --gtest_internal_run_death_test flags.
+
+// In describing the results of death tests, these terms are used with
+// the corresponding definitions:
+//
+// exit status: The integer exit information in the format specified
+// by wait(2)
+// exit code: The integer code passed to exit(3), _exit(2), or
+// returned from main()
+class GTEST_API_ DeathTest {
+ public:
+ // Create returns false if there was an error determining the
+ // appropriate action to take for the current death test; for example,
+ // if the gtest_death_test_style flag is set to an invalid value.
+ // The LastMessage method will return a more detailed message in that
+ // case. Otherwise, the DeathTest pointer pointed to by the "test"
+ // argument is set. If the death test should be skipped, the pointer
+ // is set to NULL; otherwise, it is set to the address of a new concrete
+ // DeathTest object that controls the execution of the current test.
+ static bool Create(const char* statement, Matcher<const std::string&> matcher,
+ const char* file, int line, DeathTest** test);
+ DeathTest();
+ virtual ~DeathTest() { }
+
+ // A helper class that aborts a death test when it's deleted.
+ class ReturnSentinel {
+ public:
+ explicit ReturnSentinel(DeathTest* test) : test_(test) { }
+ ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
+ private:
+ DeathTest* const test_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
+ } GTEST_ATTRIBUTE_UNUSED_;
+
+ // An enumeration of possible roles that may be taken when a death
+ // test is encountered. EXECUTE means that the death test logic should
+ // be executed immediately. OVERSEE means that the program should prepare
+ // the appropriate environment for a child process to execute the death
+ // test, then wait for it to complete.
+ enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
+
+ // An enumeration of the three reasons that a test might be aborted.
+ enum AbortReason {
+ TEST_ENCOUNTERED_RETURN_STATEMENT,
+ TEST_THREW_EXCEPTION,
+ TEST_DID_NOT_DIE
+ };
+
+ // Assumes one of the above roles.
+ virtual TestRole AssumeRole() = 0;
+
+ // Waits for the death test to finish and returns its status.
+ virtual int Wait() = 0;
+
+ // Returns true if the death test passed; that is, the test process
+ // exited during the test, its exit status matches a user-supplied
+ // predicate, and its stderr output matches a user-supplied regular
+ // expression.
+ // The user-supplied predicate may be a macro expression rather
+ // than a function pointer or functor, or else Wait and Passed could
+ // be combined.
+ virtual bool Passed(bool exit_status_ok) = 0;
+
+ // Signals that the death test did not die as expected.
+ virtual void Abort(AbortReason reason) = 0;
+
+ // Returns a human-readable outcome message regarding the outcome of
+ // the last death test.
+ static const char* LastMessage();
+
+ static void set_last_death_test_message(const std::string& message);
+
+ private:
+ // A string containing a description of the outcome of the last death test.
+ static std::string last_death_test_message_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
+};
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+// Factory interface for death tests. May be mocked out for testing.
+class DeathTestFactory {
+ public:
+ virtual ~DeathTestFactory() { }
+ virtual bool Create(const char* statement,
+ Matcher<const std::string&> matcher, const char* file,
+ int line, DeathTest** test) = 0;
+};
+
+// A concrete DeathTestFactory implementation for normal use.
+class DefaultDeathTestFactory : public DeathTestFactory {
+ public:
+ bool Create(const char* statement, Matcher<const std::string&> matcher,
+ const char* file, int line, DeathTest** test) override;
+};
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
+
+// A string passed to EXPECT_DEATH (etc.) is caught by one of these overloads
+// and interpreted as a regex (rather than an Eq matcher) for legacy
+// compatibility.
+inline Matcher<const ::std::string&> MakeDeathTestMatcher(
+ ::testing::internal::RE regex) {
+ return ContainsRegex(regex.pattern());
+}
+inline Matcher<const ::std::string&> MakeDeathTestMatcher(const char* regex) {
+ return ContainsRegex(regex);
+}
+inline Matcher<const ::std::string&> MakeDeathTestMatcher(
+ const ::std::string& regex) {
+ return ContainsRegex(regex);
+}
+
+// If a Matcher<const ::std::string&> is passed to EXPECT_DEATH (etc.), it's
+// used directly.
+inline Matcher<const ::std::string&> MakeDeathTestMatcher(
+ Matcher<const ::std::string&> matcher) {
+ return matcher;
+}
+
+// Traps C++ exceptions escaping statement and reports them as test
+// failures. Note that trapping SEH exceptions is not implemented here.
+# if GTEST_HAS_EXCEPTIONS
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } catch (const ::std::exception& gtest_exception) { \
+ fprintf(\
+ stderr, \
+ "\n%s: Caught std::exception-derived exception escaping the " \
+ "death test statement. Exception message: %s\n", \
+ ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \
+ gtest_exception.what()); \
+ fflush(stderr); \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ } catch (...) { \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ }
+
+# else
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+
+# endif
+
+// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
+// ASSERT_EXIT*, and EXPECT_EXIT*.
+#define GTEST_DEATH_TEST_(statement, predicate, regex_or_matcher, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ ::testing::internal::DeathTest* gtest_dt; \
+ if (!::testing::internal::DeathTest::Create( \
+ #statement, \
+ ::testing::internal::MakeDeathTestMatcher(regex_or_matcher), \
+ __FILE__, __LINE__, >est_dt)) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ if (gtest_dt != nullptr) { \
+ std::unique_ptr< ::testing::internal::DeathTest> gtest_dt_ptr(gtest_dt); \
+ switch (gtest_dt->AssumeRole()) { \
+ case ::testing::internal::DeathTest::OVERSEE_TEST: \
+ if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ break; \
+ case ::testing::internal::DeathTest::EXECUTE_TEST: { \
+ ::testing::internal::DeathTest::ReturnSentinel gtest_sentinel( \
+ gtest_dt); \
+ GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
+ gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
+ break; \
+ } \
+ default: \
+ break; \
+ } \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__) \
+ : fail(::testing::internal::DeathTest::LastMessage())
+// The symbol "fail" here expands to something into which a message
+// can be streamed.
+
+// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in
+// NDEBUG mode. In this case we need the statements to be executed and the macro
+// must accept a streamed message even though the message is never printed.
+// The regex object is not evaluated, but it is used to prevent "unused"
+// warnings and to avoid an expression that doesn't compile in debug mode.
+#define GTEST_EXECUTE_STATEMENT_(statement, regex_or_matcher) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } else if (!::testing::internal::AlwaysTrue()) { \
+ ::testing::internal::MakeDeathTestMatcher(regex_or_matcher); \
+ } else \
+ ::testing::Message()
+
+// A class representing the parsed contents of the
+// --gtest_internal_run_death_test flag, as it existed when
+// RUN_ALL_TESTS was called.
+class InternalRunDeathTestFlag {
+ public:
+ InternalRunDeathTestFlag(const std::string& a_file,
+ int a_line,
+ int an_index,
+ int a_write_fd)
+ : file_(a_file), line_(a_line), index_(an_index),
+ write_fd_(a_write_fd) {}
+
+ ~InternalRunDeathTestFlag() {
+ if (write_fd_ >= 0)
+ posix::Close(write_fd_);
+ }
+
+ const std::string& file() const { return file_; }
+ int line() const { return line_; }
+ int index() const { return index_; }
+ int write_fd() const { return write_fd_; }
+
+ private:
+ std::string file_;
+ int line_;
+ int index_;
+ int write_fd_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
+};
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace internal
+} // namespace testing
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+namespace testing {
+
+// This flag controls the style of death tests. Valid values are "threadsafe",
+// meaning that the death test child process will re-execute the test binary
+// from the start, running only a single death test, or "fast",
+// meaning that the child process will execute the test logic immediately
+// after forking.
+GTEST_DECLARE_string_(death_test_style);
+
+#if GTEST_HAS_DEATH_TEST
+
+namespace internal {
+
+// Returns a Boolean value indicating whether the caller is currently
+// executing in the context of the death test child process. Tools such as
+// Valgrind heap checkers may need this to modify their behavior in death
+// tests. IMPORTANT: This is an internal utility. Using it may break the
+// implementation of death tests. User code MUST NOT use it.
+GTEST_API_ bool InDeathTestChild();
+
+} // namespace internal
+
+// The following macros are useful for writing death tests.
+
+// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
+// executed:
+//
+// 1. It generates a warning if there is more than one active
+// thread. This is because it's safe to fork() or clone() only
+// when there is a single thread.
+//
+// 2. The parent process clone()s a sub-process and runs the death
+// test in it; the sub-process exits with code 0 at the end of the
+// death test, if it hasn't exited already.
+//
+// 3. The parent process waits for the sub-process to terminate.
+//
+// 4. The parent process checks the exit code and error message of
+// the sub-process.
+//
+// Examples:
+//
+// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
+// for (int i = 0; i < 5; i++) {
+// EXPECT_DEATH(server.ProcessRequest(i),
+// "Invalid request .* in ProcessRequest()")
+// << "Failed to die on request " << i;
+// }
+//
+// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
+//
+// bool KilledBySIGHUP(int exit_code) {
+// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
+// }
+//
+// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
+//
+// The final parameter to each of these macros is a matcher applied to any data
+// the sub-process wrote to stderr. For compatibility with existing tests, a
+// bare string is interpreted as a regular expression matcher.
+//
+// On the regular expressions used in death tests:
+//
+// GOOGLETEST_CM0005 DO NOT DELETE
+// On POSIX-compliant systems (*nix), we use the <regex.h> library,
+// which uses the POSIX extended regex syntax.
+//
+// On other platforms (e.g. Windows or Mac), we only support a simple regex
+// syntax implemented as part of Google Test. This limited
+// implementation should be enough most of the time when writing
+// death tests; though it lacks many features you can find in PCRE
+// or POSIX extended regex syntax. For example, we don't support
+// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
+// repetition count ("x{5,7}"), among others.
+//
+// Below is the syntax that we do support. We chose it to be a
+// subset of both PCRE and POSIX extended regex, so it's easy to
+// learn wherever you come from. In the following: 'A' denotes a
+// literal character, period (.), or a single \\ escape sequence;
+// 'x' and 'y' denote regular expressions; 'm' and 'n' are for
+// natural numbers.
+//
+// c matches any literal character c
+// \\d matches any decimal digit
+// \\D matches any character that's not a decimal digit
+// \\f matches \f
+// \\n matches \n
+// \\r matches \r
+// \\s matches any ASCII whitespace, including \n
+// \\S matches any character that's not a whitespace
+// \\t matches \t
+// \\v matches \v
+// \\w matches any letter, _, or decimal digit
+// \\W matches any character that \\w doesn't match
+// \\c matches any literal character c, which must be a punctuation
+// . matches any single character except \n
+// A? matches 0 or 1 occurrences of A
+// A* matches 0 or many occurrences of A
+// A+ matches 1 or many occurrences of A
+// ^ matches the beginning of a string (not that of each line)
+// $ matches the end of a string (not that of each line)
+// xy matches x followed by y
+//
+// If you accidentally use PCRE or POSIX extended regex features
+// not implemented by us, you will get a run-time failure. In that
+// case, please try to rewrite your regular expression within the
+// above syntax.
+//
+// This implementation is *not* meant to be as highly tuned or robust
+// as a compiled regex library, but should perform well enough for a
+// death test, which already incurs significant overhead by launching
+// a child process.
+//
+// Known caveats:
+//
+// A "threadsafe" style death test obtains the path to the test
+// program from argv[0] and re-executes it in the sub-process. For
+// simplicity, the current implementation doesn't search the PATH
+// when launching the sub-process. This means that the user must
+// invoke the test program via a path that contains at least one
+// path separator (e.g. path/to/foo_test and
+// /absolute/path/to/bar_test are fine, but foo_test is not). This
+// is rarely a problem as people usually don't put the test binary
+// directory in PATH.
+//
+
+// Asserts that a given `statement` causes the program to exit, with an
+// integer exit status that satisfies `predicate`, and emitting error output
+// that matches `matcher`.
+# define ASSERT_EXIT(statement, predicate, matcher) \
+ GTEST_DEATH_TEST_(statement, predicate, matcher, GTEST_FATAL_FAILURE_)
+
+// Like `ASSERT_EXIT`, but continues on to successive tests in the
+// test suite, if any:
+# define EXPECT_EXIT(statement, predicate, matcher) \
+ GTEST_DEATH_TEST_(statement, predicate, matcher, GTEST_NONFATAL_FAILURE_)
+
+// Asserts that a given `statement` causes the program to exit, either by
+// explicitly exiting with a nonzero exit code or being killed by a
+// signal, and emitting error output that matches `matcher`.
+# define ASSERT_DEATH(statement, matcher) \
+ ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, matcher)
+
+// Like `ASSERT_DEATH`, but continues on to successive tests in the
+// test suite, if any:
+# define EXPECT_DEATH(statement, matcher) \
+ EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, matcher)
+
+// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
+
+// Tests that an exit code describes a normal exit with a given exit code.
+class GTEST_API_ ExitedWithCode {
+ public:
+ explicit ExitedWithCode(int exit_code);
+ ExitedWithCode(const ExitedWithCode&) = default;
+ void operator=(const ExitedWithCode& other) = delete;
+ bool operator()(int exit_status) const;
+ private:
+ const int exit_code_;
+};
+
+# if !GTEST_OS_WINDOWS && !GTEST_OS_FUCHSIA
+// Tests that an exit code describes an exit due to termination by a
+// given signal.
+// GOOGLETEST_CM0006 DO NOT DELETE
+class GTEST_API_ KilledBySignal {
+ public:
+ explicit KilledBySignal(int signum);
+ bool operator()(int exit_status) const;
+ private:
+ const int signum_;
+};
+# endif // !GTEST_OS_WINDOWS
+
+// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
+// The death testing framework causes this to have interesting semantics,
+// since the sideeffects of the call are only visible in opt mode, and not
+// in debug mode.
+//
+// In practice, this can be used to test functions that utilize the
+// LOG(DFATAL) macro using the following style:
+//
+// int DieInDebugOr12(int* sideeffect) {
+// if (sideeffect) {
+// *sideeffect = 12;
+// }
+// LOG(DFATAL) << "death";
+// return 12;
+// }
+//
+// TEST(TestSuite, TestDieOr12WorksInDgbAndOpt) {
+// int sideeffect = 0;
+// // Only asserts in dbg.
+// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
+//
+// #ifdef NDEBUG
+// // opt-mode has sideeffect visible.
+// EXPECT_EQ(12, sideeffect);
+// #else
+// // dbg-mode no visible sideeffect.
+// EXPECT_EQ(0, sideeffect);
+// #endif
+// }
+//
+// This will assert that DieInDebugReturn12InOpt() crashes in debug
+// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
+// appropriate fallback value (12 in this case) in opt mode. If you
+// need to test that a function has appropriate side-effects in opt
+// mode, include assertions against the side-effects. A general
+// pattern for this is:
+//
+// EXPECT_DEBUG_DEATH({
+// // Side-effects here will have an effect after this statement in
+// // opt mode, but none in debug mode.
+// EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
+// }, "death");
+//
+# ifdef NDEBUG
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+# else
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+
+# endif // NDEBUG for EXPECT_DEBUG_DEATH
+#endif // GTEST_HAS_DEATH_TEST
+
+// This macro is used for implementing macros such as
+// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
+// death tests are not supported. Those macros must compile on such systems
+// if and only if EXPECT_DEATH and ASSERT_DEATH compile with the same parameters
+// on systems that support death tests. This allows one to write such a macro on
+// a system that does not support death tests and be sure that it will compile
+// on a death-test supporting system. It is exposed publicly so that systems
+// that have death-tests with stricter requirements than GTEST_HAS_DEATH_TEST
+// can write their own equivalent of EXPECT_DEATH_IF_SUPPORTED and
+// ASSERT_DEATH_IF_SUPPORTED.
+//
+// Parameters:
+// statement - A statement that a macro such as EXPECT_DEATH would test
+// for program termination. This macro has to make sure this
+// statement is compiled but not executed, to ensure that
+// EXPECT_DEATH_IF_SUPPORTED compiles with a certain
+// parameter if and only if EXPECT_DEATH compiles with it.
+// regex - A regex that a macro such as EXPECT_DEATH would use to test
+// the output of statement. This parameter has to be
+// compiled but not evaluated by this macro, to ensure that
+// this macro only accepts expressions that a macro such as
+// EXPECT_DEATH would accept.
+// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
+// and a return statement for ASSERT_DEATH_IF_SUPPORTED.
+// This ensures that ASSERT_DEATH_IF_SUPPORTED will not
+// compile inside functions where ASSERT_DEATH doesn't
+// compile.
+//
+// The branch that has an always false condition is used to ensure that
+// statement and regex are compiled (and thus syntactically correct) but
+// never executed. The unreachable code macro protects the terminator
+// statement from generating an 'unreachable code' warning in case
+// statement unconditionally returns or throws. The Message constructor at
+// the end allows the syntax of streaming additional messages into the
+// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
+# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_LOG_(WARNING) \
+ << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified."; \
+ } else if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::RE::PartialMatch(".*", (regex)); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ terminator; \
+ } else \
+ ::testing::Message()
+
+// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
+// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
+// death tests are supported; otherwise they just issue a warning. This is
+// useful when you are combining death test assertions with normal test
+// assertions in one test.
+#if GTEST_HAS_DEATH_TEST
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+#else
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, )
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, return)
+#endif
+
+} // namespace testing
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Macros and functions for implementing parameterized tests
+// in Google C++ Testing and Mocking Framework (Google Test)
+//
+// GOOGLETEST_CM0001 DO NOT DELETE
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+
+// Value-parameterized tests allow you to test your code with different
+// parameters without writing multiple copies of the same test.
+//
+// Here is how you use value-parameterized tests:
+
+#if 0
+
+// To write value-parameterized tests, first you should define a fixture
+// class. It is usually derived from testing::TestWithParam<T> (see below for
+// another inheritance scheme that's sometimes useful in more complicated
+// class hierarchies), where the type of your parameter values.
+// TestWithParam<T> is itself derived from testing::Test. T can be any
+// copyable type. If it's a raw pointer, you are responsible for managing the
+// lifespan of the pointed values.
+
+class FooTest : public ::testing::TestWithParam<const char*> {
+ // You can implement all the usual class fixture members here.
+};
+
+// Then, use the TEST_P macro to define as many parameterized tests
+// for this fixture as you want. The _P suffix is for "parameterized"
+// or "pattern", whichever you prefer to think.
+
+TEST_P(FooTest, DoesBlah) {
+ // Inside a test, access the test parameter with the GetParam() method
+ // of the TestWithParam<T> class:
+ EXPECT_TRUE(foo.Blah(GetParam()));
+ ...
+}
+
+TEST_P(FooTest, HasBlahBlah) {
+ ...
+}
+
+// Finally, you can use INSTANTIATE_TEST_SUITE_P to instantiate the test
+// case with any set of parameters you want. Google Test defines a number
+// of functions for generating test parameters. They return what we call
+// (surprise!) parameter generators. Here is a summary of them, which
+// are all in the testing namespace:
+//
+//
+// Range(begin, end [, step]) - Yields values {begin, begin+step,
+// begin+step+step, ...}. The values do not
+// include end. step defaults to 1.
+// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}.
+// ValuesIn(container) - Yields values from a C-style array, an STL
+// ValuesIn(begin,end) container, or an iterator range [begin, end).
+// Bool() - Yields sequence {false, true}.
+// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product
+// for the math savvy) of the values generated
+// by the N generators.
+//
+// For more details, see comments at the definitions of these functions below
+// in this file.
+//
+// The following statement will instantiate tests from the FooTest test suite
+// each with parameter values "meeny", "miny", and "moe".
+
+INSTANTIATE_TEST_SUITE_P(InstantiationName,
+ FooTest,
+ Values("meeny", "miny", "moe"));
+
+// To distinguish different instances of the pattern, (yes, you
+// can instantiate it more than once) the first argument to the
+// INSTANTIATE_TEST_SUITE_P macro is a prefix that will be added to the
+// actual test suite name. Remember to pick unique prefixes for different
+// instantiations. The tests from the instantiation above will have
+// these names:
+//
+// * InstantiationName/FooTest.DoesBlah/0 for "meeny"
+// * InstantiationName/FooTest.DoesBlah/1 for "miny"
+// * InstantiationName/FooTest.DoesBlah/2 for "moe"
+// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
+// * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
+// * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
+//
+// You can use these names in --gtest_filter.
+//
+// This statement will instantiate all tests from FooTest again, each
+// with parameter values "cat" and "dog":
+
+const char* pets[] = {"cat", "dog"};
+INSTANTIATE_TEST_SUITE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
+
+// The tests from the instantiation above will have these names:
+//
+// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
+//
+// Please note that INSTANTIATE_TEST_SUITE_P will instantiate all tests
+// in the given test suite, whether their definitions come before or
+// AFTER the INSTANTIATE_TEST_SUITE_P statement.
+//
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
+//
+// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
+// for more examples.
+//
+// In the future, we plan to publish the API for defining new parameter
+// generators. But for now this interface remains part of the internal
+// implementation and is subject to change.
+//
+//
+// A parameterized test fixture must be derived from testing::Test and from
+// testing::WithParamInterface<T>, where T is the type of the parameter
+// values. Inheriting from TestWithParam<T> satisfies that requirement because
+// TestWithParam<T> inherits from both Test and WithParamInterface. In more
+// complicated hierarchies, however, it is occasionally useful to inherit
+// separately from Test and WithParamInterface. For example:
+
+class BaseTest : public ::testing::Test {
+ // You can inherit all the usual members for a non-parameterized test
+ // fixture here.
+};
+
+class DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {
+ // The usual test fixture members go here too.
+};
+
+TEST_F(BaseTest, HasFoo) {
+ // This is an ordinary non-parameterized test.
+}
+
+TEST_P(DerivedTest, DoesBlah) {
+ // GetParam works just the same here as if you inherit from TestWithParam.
+ EXPECT_TRUE(foo.Blah(GetParam()));
+}
+
+#endif // 0
+
+#include <iterator>
+#include <utility>
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Type and function utilities for implementing parameterized tests.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+#define GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+
+#include <ctype.h>
+
+#include <cassert>
+#include <iterator>
+#include <memory>
+#include <set>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+
+#include <iosfwd>
+#include <vector>
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+namespace testing {
+
+// A copyable object representing the result of a test part (i.e. an
+// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
+//
+// Don't inherit from TestPartResult as its destructor is not virtual.
+class GTEST_API_ TestPartResult {
+ public:
+ // The possible outcomes of a test part (i.e. an assertion or an
+ // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
+ enum Type {
+ kSuccess, // Succeeded.
+ kNonFatalFailure, // Failed but the test can continue.
+ kFatalFailure, // Failed and the test should be terminated.
+ kSkip // Skipped.
+ };
+
+ // C'tor. TestPartResult does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestPartResult object.
+ TestPartResult(Type a_type, const char* a_file_name, int a_line_number,
+ const char* a_message)
+ : type_(a_type),
+ file_name_(a_file_name == nullptr ? "" : a_file_name),
+ line_number_(a_line_number),
+ summary_(ExtractSummary(a_message)),
+ message_(a_message) {}
+
+ // Gets the outcome of the test part.
+ Type type() const { return type_; }
+
+ // Gets the name of the source file where the test part took place, or
+ // NULL if it's unknown.
+ const char* file_name() const {
+ return file_name_.empty() ? nullptr : file_name_.c_str();
+ }
+
+ // Gets the line in the source file where the test part took place,
+ // or -1 if it's unknown.
+ int line_number() const { return line_number_; }
+
+ // Gets the summary of the failure message.
+ const char* summary() const { return summary_.c_str(); }
+
+ // Gets the message associated with the test part.
+ const char* message() const { return message_.c_str(); }
+
+ // Returns true if and only if the test part was skipped.
+ bool skipped() const { return type_ == kSkip; }
+
+ // Returns true if and only if the test part passed.
+ bool passed() const { return type_ == kSuccess; }
+
+ // Returns true if and only if the test part non-fatally failed.
+ bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
+
+ // Returns true if and only if the test part fatally failed.
+ bool fatally_failed() const { return type_ == kFatalFailure; }
+
+ // Returns true if and only if the test part failed.
+ bool failed() const { return fatally_failed() || nonfatally_failed(); }
+
+ private:
+ Type type_;
+
+ // Gets the summary of the failure message by omitting the stack
+ // trace in it.
+ static std::string ExtractSummary(const char* message);
+
+ // The name of the source file where the test part took place, or
+ // "" if the source file is unknown.
+ std::string file_name_;
+ // The line in the source file where the test part took place, or -1
+ // if the line number is unknown.
+ int line_number_;
+ std::string summary_; // The test failure summary.
+ std::string message_; // The test failure message.
+};
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
+
+// An array of TestPartResult objects.
+//
+// Don't inherit from TestPartResultArray as its destructor is not
+// virtual.
+class GTEST_API_ TestPartResultArray {
+ public:
+ TestPartResultArray() {}
+
+ // Appends the given TestPartResult to the array.
+ void Append(const TestPartResult& result);
+
+ // Returns the TestPartResult at the given index (0-based).
+ const TestPartResult& GetTestPartResult(int index) const;
+
+ // Returns the number of TestPartResult objects in the array.
+ int size() const;
+
+ private:
+ std::vector<TestPartResult> array_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
+};
+
+// This interface knows how to report a test part result.
+class GTEST_API_ TestPartResultReporterInterface {
+ public:
+ virtual ~TestPartResultReporterInterface() {}
+
+ virtual void ReportTestPartResult(const TestPartResult& result) = 0;
+};
+
+namespace internal {
+
+// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
+// statement generates new fatal failures. To do so it registers itself as the
+// current test part result reporter. Besides checking if fatal failures were
+// reported, it only delegates the reporting to the former result reporter.
+// The original result reporter is restored in the destructor.
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+class GTEST_API_ HasNewFatalFailureHelper
+ : public TestPartResultReporterInterface {
+ public:
+ HasNewFatalFailureHelper();
+ ~HasNewFatalFailureHelper() override;
+ void ReportTestPartResult(const TestPartResult& result) override;
+ bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
+ private:
+ bool has_new_fatal_failure_;
+ TestPartResultReporterInterface* original_reporter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+
+namespace testing {
+// Input to a parameterized test name generator, describing a test parameter.
+// Consists of the parameter value and the integer parameter index.
+template <class ParamType>
+struct TestParamInfo {
+ TestParamInfo(const ParamType& a_param, size_t an_index) :
+ param(a_param),
+ index(an_index) {}
+ ParamType param;
+ size_t index;
+};
+
+// A builtin parameterized test name generator which returns the result of
+// testing::PrintToString.
+struct PrintToStringParamName {
+ template <class ParamType>
+ std::string operator()(const TestParamInfo<ParamType>& info) const {
+ return PrintToString(info.param);
+ }
+};
+
+namespace internal {
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+// Utility Functions
+
+// Outputs a message explaining invalid registration of different
+// fixture class for the same test suite. This may happen when
+// TEST_P macro is used to define two tests with the same name
+// but in different namespaces.
+GTEST_API_ void ReportInvalidTestSuiteType(const char* test_suite_name,
+ CodeLocation code_location);
+
+template <typename> class ParamGeneratorInterface;
+template <typename> class ParamGenerator;
+
+// Interface for iterating over elements provided by an implementation
+// of ParamGeneratorInterface<T>.
+template <typename T>
+class ParamIteratorInterface {
+ public:
+ virtual ~ParamIteratorInterface() {}
+ // A pointer to the base generator instance.
+ // Used only for the purposes of iterator comparison
+ // to make sure that two iterators belong to the same generator.
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
+ // Advances iterator to point to the next element
+ // provided by the generator. The caller is responsible
+ // for not calling Advance() on an iterator equal to
+ // BaseGenerator()->End().
+ virtual void Advance() = 0;
+ // Clones the iterator object. Used for implementing copy semantics
+ // of ParamIterator<T>.
+ virtual ParamIteratorInterface* Clone() const = 0;
+ // Dereferences the current iterator and provides (read-only) access
+ // to the pointed value. It is the caller's responsibility not to call
+ // Current() on an iterator equal to BaseGenerator()->End().
+ // Used for implementing ParamGenerator<T>::operator*().
+ virtual const T* Current() const = 0;
+ // Determines whether the given iterator and other point to the same
+ // element in the sequence generated by the generator.
+ // Used for implementing ParamGenerator<T>::operator==().
+ virtual bool Equals(const ParamIteratorInterface& other) const = 0;
+};
+
+// Class iterating over elements provided by an implementation of
+// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
+// and implements the const forward iterator concept.
+template <typename T>
+class ParamIterator {
+ public:
+ typedef T value_type;
+ typedef const T& reference;
+ typedef ptrdiff_t difference_type;
+
+ // ParamIterator assumes ownership of the impl_ pointer.
+ ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
+ ParamIterator& operator=(const ParamIterator& other) {
+ if (this != &other)
+ impl_.reset(other.impl_->Clone());
+ return *this;
+ }
+
+ const T& operator*() const { return *impl_->Current(); }
+ const T* operator->() const { return impl_->Current(); }
+ // Prefix version of operator++.
+ ParamIterator& operator++() {
+ impl_->Advance();
+ return *this;
+ }
+ // Postfix version of operator++.
+ ParamIterator operator++(int /*unused*/) {
+ ParamIteratorInterface<T>* clone = impl_->Clone();
+ impl_->Advance();
+ return ParamIterator(clone);
+ }
+ bool operator==(const ParamIterator& other) const {
+ return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
+ }
+ bool operator!=(const ParamIterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ friend class ParamGenerator<T>;
+ explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
+ std::unique_ptr<ParamIteratorInterface<T> > impl_;
+};
+
+// ParamGeneratorInterface<T> is the binary interface to access generators
+// defined in other translation units.
+template <typename T>
+class ParamGeneratorInterface {
+ public:
+ typedef T ParamType;
+
+ virtual ~ParamGeneratorInterface() {}
+
+ // Generator interface definition
+ virtual ParamIteratorInterface<T>* Begin() const = 0;
+ virtual ParamIteratorInterface<T>* End() const = 0;
+};
+
+// Wraps ParamGeneratorInterface<T> and provides general generator syntax
+// compatible with the STL Container concept.
+// This class implements copy initialization semantics and the contained
+// ParamGeneratorInterface<T> instance is shared among all copies
+// of the original object. This is possible because that instance is immutable.
+template<typename T>
+class ParamGenerator {
+ public:
+ typedef ParamIterator<T> iterator;
+
+ explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
+ ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
+
+ ParamGenerator& operator=(const ParamGenerator& other) {
+ impl_ = other.impl_;
+ return *this;
+ }
+
+ iterator begin() const { return iterator(impl_->Begin()); }
+ iterator end() const { return iterator(impl_->End()); }
+
+ private:
+ std::shared_ptr<const ParamGeneratorInterface<T> > impl_;
+};
+
+// Generates values from a range of two comparable values. Can be used to
+// generate sequences of user-defined types that implement operator+() and
+// operator<().
+// This class is used in the Range() function.
+template <typename T, typename IncrementT>
+class RangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ RangeGenerator(T begin, T end, IncrementT step)
+ : begin_(begin), end_(end),
+ step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
+ ~RangeGenerator() override {}
+
+ ParamIteratorInterface<T>* Begin() const override {
+ return new Iterator(this, begin_, 0, step_);
+ }
+ ParamIteratorInterface<T>* End() const override {
+ return new Iterator(this, end_, end_index_, step_);
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
+ IncrementT step)
+ : base_(base), value_(value), index_(index), step_(step) {}
+ ~Iterator() override {}
+
+ const ParamGeneratorInterface<T>* BaseGenerator() const override {
+ return base_;
+ }
+ void Advance() override {
+ value_ = static_cast<T>(value_ + step_);
+ index_++;
+ }
+ ParamIteratorInterface<T>* Clone() const override {
+ return new Iterator(*this);
+ }
+ const T* Current() const override { return &value_; }
+ bool Equals(const ParamIteratorInterface<T>& other) const override {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const int other_index =
+ CheckedDowncastToActualType<const Iterator>(&other)->index_;
+ return index_ == other_index;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : ParamIteratorInterface<T>(),
+ base_(other.base_), value_(other.value_), index_(other.index_),
+ step_(other.step_) {}
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<T>* const base_;
+ T value_;
+ int index_;
+ const IncrementT step_;
+ }; // class RangeGenerator::Iterator
+
+ static int CalculateEndIndex(const T& begin,
+ const T& end,
+ const IncrementT& step) {
+ int end_index = 0;
+ for (T i = begin; i < end; i = static_cast<T>(i + step))
+ end_index++;
+ return end_index;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const RangeGenerator& other);
+
+ const T begin_;
+ const T end_;
+ const IncrementT step_;
+ // The index for the end() iterator. All the elements in the generated
+ // sequence are indexed (0-based) to aid iterator comparison.
+ const int end_index_;
+}; // class RangeGenerator
+
+
+// Generates values from a pair of STL-style iterators. Used in the
+// ValuesIn() function. The elements are copied from the source range
+// since the source can be located on the stack, and the generator
+// is likely to persist beyond that stack frame.
+template <typename T>
+class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ template <typename ForwardIterator>
+ ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
+ : container_(begin, end) {}
+ ~ValuesInIteratorRangeGenerator() override {}
+
+ ParamIteratorInterface<T>* Begin() const override {
+ return new Iterator(this, container_.begin());
+ }
+ ParamIteratorInterface<T>* End() const override {
+ return new Iterator(this, container_.end());
+ }
+
+ private:
+ typedef typename ::std::vector<T> ContainerType;
+
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base,
+ typename ContainerType::const_iterator iterator)
+ : base_(base), iterator_(iterator) {}
+ ~Iterator() override {}
+
+ const ParamGeneratorInterface<T>* BaseGenerator() const override {
+ return base_;
+ }
+ void Advance() override {
+ ++iterator_;
+ value_.reset();
+ }
+ ParamIteratorInterface<T>* Clone() const override {
+ return new Iterator(*this);
+ }
+ // We need to use cached value referenced by iterator_ because *iterator_
+ // can return a temporary object (and of type other then T), so just
+ // having "return &*iterator_;" doesn't work.
+ // value_ is updated here and not in Advance() because Advance()
+ // can advance iterator_ beyond the end of the range, and we cannot
+ // detect that fact. The client code, on the other hand, is
+ // responsible for not calling Current() on an out-of-range iterator.
+ const T* Current() const override {
+ if (value_.get() == nullptr) value_.reset(new T(*iterator_));
+ return value_.get();
+ }
+ bool Equals(const ParamIteratorInterface<T>& other) const override {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ return iterator_ ==
+ CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ // The explicit constructor call suppresses a false warning
+ // emitted by gcc when supplied with the -Wextra option.
+ : ParamIteratorInterface<T>(),
+ base_(other.base_),
+ iterator_(other.iterator_) {}
+
+ const ParamGeneratorInterface<T>* const base_;
+ typename ContainerType::const_iterator iterator_;
+ // A cached value of *iterator_. We keep it here to allow access by
+ // pointer in the wrapping iterator's operator->().
+ // value_ needs to be mutable to be accessed in Current().
+ // Use of std::unique_ptr helps manage cached value's lifetime,
+ // which is bound by the lifespan of the iterator itself.
+ mutable std::unique_ptr<const T> value_;
+ }; // class ValuesInIteratorRangeGenerator::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const ValuesInIteratorRangeGenerator& other);
+
+ const ContainerType container_;
+}; // class ValuesInIteratorRangeGenerator
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Default parameterized test name generator, returns a string containing the
+// integer test parameter index.
+template <class ParamType>
+std::string DefaultParamName(const TestParamInfo<ParamType>& info) {
+ Message name_stream;
+ name_stream << info.index;
+ return name_stream.GetString();
+}
+
+template <typename T = int>
+void TestNotEmpty() {
+ static_assert(sizeof(T) == 0, "Empty arguments are not allowed.");
+}
+template <typename T = int>
+void TestNotEmpty(const T&) {}
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Stores a parameter value and later creates tests parameterized with that
+// value.
+template <class TestClass>
+class ParameterizedTestFactory : public TestFactoryBase {
+ public:
+ typedef typename TestClass::ParamType ParamType;
+ explicit ParameterizedTestFactory(ParamType parameter) :
+ parameter_(parameter) {}
+ Test* CreateTest() override {
+ TestClass::SetParam(¶meter_);
+ return new TestClass();
+ }
+
+ private:
+ const ParamType parameter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactoryBase is a base class for meta-factories that create
+// test factories for passing into MakeAndRegisterTestInfo function.
+template <class ParamType>
+class TestMetaFactoryBase {
+ public:
+ virtual ~TestMetaFactoryBase() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactory creates test factories for passing into
+// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
+// ownership of test factory pointer, same factory object cannot be passed
+// into that method twice. But ParameterizedTestSuiteInfo is going to call
+// it for each Test/Parameter value combination. Thus it needs meta factory
+// creator class.
+template <class TestSuite>
+class TestMetaFactory
+ : public TestMetaFactoryBase<typename TestSuite::ParamType> {
+ public:
+ using ParamType = typename TestSuite::ParamType;
+
+ TestMetaFactory() {}
+
+ TestFactoryBase* CreateTestFactory(ParamType parameter) override {
+ return new ParameterizedTestFactory<TestSuite>(parameter);
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestSuiteInfoBase is a generic interface
+// to ParameterizedTestSuiteInfo classes. ParameterizedTestSuiteInfoBase
+// accumulates test information provided by TEST_P macro invocations
+// and generators provided by INSTANTIATE_TEST_SUITE_P macro invocations
+// and uses that information to register all resulting test instances
+// in RegisterTests method. The ParameterizeTestSuiteRegistry class holds
+// a collection of pointers to the ParameterizedTestSuiteInfo objects
+// and calls RegisterTests() on each of them when asked.
+class ParameterizedTestSuiteInfoBase {
+ public:
+ virtual ~ParameterizedTestSuiteInfoBase() {}
+
+ // Base part of test suite name for display purposes.
+ virtual const std::string& GetTestSuiteName() const = 0;
+ // Test suite id to verify identity.
+ virtual TypeId GetTestSuiteTypeId() const = 0;
+ // UnitTest class invokes this method to register tests in this
+ // test suite right before running them in RUN_ALL_TESTS macro.
+ // This method should not be called more than once on any single
+ // instance of a ParameterizedTestSuiteInfoBase derived class.
+ virtual void RegisterTests() = 0;
+
+ protected:
+ ParameterizedTestSuiteInfoBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteInfoBase);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Report a the name of a test_suit as safe to ignore
+// as the side effect of construction of this type.
+struct GTEST_API_ MarkAsIgnored {
+ explicit MarkAsIgnored(const char* test_suite);
+};
+
+GTEST_API_ void InsertSyntheticTestCase(const std::string& name,
+ CodeLocation location, bool has_test_p);
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestSuiteInfo accumulates tests obtained from TEST_P
+// macro invocations for a particular test suite and generators
+// obtained from INSTANTIATE_TEST_SUITE_P macro invocations for that
+// test suite. It registers tests with all values generated by all
+// generators when asked.
+template <class TestSuite>
+class ParameterizedTestSuiteInfo : public ParameterizedTestSuiteInfoBase {
+ public:
+ // ParamType and GeneratorCreationFunc are private types but are required
+ // for declarations of public methods AddTestPattern() and
+ // AddTestSuiteInstantiation().
+ using ParamType = typename TestSuite::ParamType;
+ // A function that returns an instance of appropriate generator type.
+ typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
+ using ParamNameGeneratorFunc = std::string(const TestParamInfo<ParamType>&);
+
+ explicit ParameterizedTestSuiteInfo(const char* name,
+ CodeLocation code_location)
+ : test_suite_name_(name), code_location_(code_location) {}
+
+ // Test suite base name for display purposes.
+ const std::string& GetTestSuiteName() const override {
+ return test_suite_name_;
+ }
+ // Test suite id to verify identity.
+ TypeId GetTestSuiteTypeId() const override { return GetTypeId<TestSuite>(); }
+ // TEST_P macro uses AddTestPattern() to record information
+ // about a single test in a LocalTestInfo structure.
+ // test_suite_name is the base name of the test suite (without invocation
+ // prefix). test_base_name is the name of an individual test without
+ // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
+ // test suite base name and DoBar is test base name.
+ void AddTestPattern(const char* test_suite_name, const char* test_base_name,
+ TestMetaFactoryBase<ParamType>* meta_factory,
+ CodeLocation code_location) {
+ tests_.push_back(std::shared_ptr<TestInfo>(new TestInfo(
+ test_suite_name, test_base_name, meta_factory, code_location)));
+ }
+ // INSTANTIATE_TEST_SUITE_P macro uses AddGenerator() to record information
+ // about a generator.
+ int AddTestSuiteInstantiation(const std::string& instantiation_name,
+ GeneratorCreationFunc* func,
+ ParamNameGeneratorFunc* name_func,
+ const char* file, int line) {
+ instantiations_.push_back(
+ InstantiationInfo(instantiation_name, func, name_func, file, line));
+ return 0; // Return value used only to run this method in namespace scope.
+ }
+ // UnitTest class invokes this method to register tests in this test suite
+ // right before running tests in RUN_ALL_TESTS macro.
+ // This method should not be called more than once on any single
+ // instance of a ParameterizedTestSuiteInfoBase derived class.
+ // UnitTest has a guard to prevent from calling this method more than once.
+ void RegisterTests() override {
+ bool generated_instantiations = false;
+
+ for (typename TestInfoContainer::iterator test_it = tests_.begin();
+ test_it != tests_.end(); ++test_it) {
+ std::shared_ptr<TestInfo> test_info = *test_it;
+ for (typename InstantiationContainer::iterator gen_it =
+ instantiations_.begin(); gen_it != instantiations_.end();
+ ++gen_it) {
+ const std::string& instantiation_name = gen_it->name;
+ ParamGenerator<ParamType> generator((*gen_it->generator)());
+ ParamNameGeneratorFunc* name_func = gen_it->name_func;
+ const char* file = gen_it->file;
+ int line = gen_it->line;
+
+ std::string test_suite_name;
+ if ( !instantiation_name.empty() )
+ test_suite_name = instantiation_name + "/";
+ test_suite_name += test_info->test_suite_base_name;
+
+ size_t i = 0;
+ std::set<std::string> test_param_names;
+ for (typename ParamGenerator<ParamType>::iterator param_it =
+ generator.begin();
+ param_it != generator.end(); ++param_it, ++i) {
+ generated_instantiations = true;
+
+ Message test_name_stream;
+
+ std::string param_name = name_func(
+ TestParamInfo<ParamType>(*param_it, i));
+
+ GTEST_CHECK_(IsValidParamName(param_name))
+ << "Parameterized test name '" << param_name
+ << "' is invalid, in " << file
+ << " line " << line << std::endl;
+
+ GTEST_CHECK_(test_param_names.count(param_name) == 0)
+ << "Duplicate parameterized test name '" << param_name
+ << "', in " << file << " line " << line << std::endl;
+
+ test_param_names.insert(param_name);
+
+ if (!test_info->test_base_name.empty()) {
+ test_name_stream << test_info->test_base_name << "/";
+ }
+ test_name_stream << param_name;
+ MakeAndRegisterTestInfo(
+ test_suite_name.c_str(), test_name_stream.GetString().c_str(),
+ nullptr, // No type parameter.
+ PrintToString(*param_it).c_str(), test_info->code_location,
+ GetTestSuiteTypeId(),
+ SuiteApiResolver<TestSuite>::GetSetUpCaseOrSuite(file, line),
+ SuiteApiResolver<TestSuite>::GetTearDownCaseOrSuite(file, line),
+ test_info->test_meta_factory->CreateTestFactory(*param_it));
+ } // for param_it
+ } // for gen_it
+ } // for test_it
+
+ if (!generated_instantiations) {
+ // There are no generaotrs, or they all generate nothing ...
+ InsertSyntheticTestCase(GetTestSuiteName(), code_location_,
+ !tests_.empty());
+ }
+ } // RegisterTests
+
+ private:
+ // LocalTestInfo structure keeps information about a single test registered
+ // with TEST_P macro.
+ struct TestInfo {
+ TestInfo(const char* a_test_suite_base_name, const char* a_test_base_name,
+ TestMetaFactoryBase<ParamType>* a_test_meta_factory,
+ CodeLocation a_code_location)
+ : test_suite_base_name(a_test_suite_base_name),
+ test_base_name(a_test_base_name),
+ test_meta_factory(a_test_meta_factory),
+ code_location(a_code_location) {}
+
+ const std::string test_suite_base_name;
+ const std::string test_base_name;
+ const std::unique_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
+ const CodeLocation code_location;
+ };
+ using TestInfoContainer = ::std::vector<std::shared_ptr<TestInfo> >;
+ // Records data received from INSTANTIATE_TEST_SUITE_P macros:
+ // <Instantiation name, Sequence generator creation function,
+ // Name generator function, Source file, Source line>
+ struct InstantiationInfo {
+ InstantiationInfo(const std::string &name_in,
+ GeneratorCreationFunc* generator_in,
+ ParamNameGeneratorFunc* name_func_in,
+ const char* file_in,
+ int line_in)
+ : name(name_in),
+ generator(generator_in),
+ name_func(name_func_in),
+ file(file_in),
+ line(line_in) {}
+
+ std::string name;
+ GeneratorCreationFunc* generator;
+ ParamNameGeneratorFunc* name_func;
+ const char* file;
+ int line;
+ };
+ typedef ::std::vector<InstantiationInfo> InstantiationContainer;
+
+ static bool IsValidParamName(const std::string& name) {
+ // Check for empty string
+ if (name.empty())
+ return false;
+
+ // Check for invalid characters
+ for (std::string::size_type index = 0; index < name.size(); ++index) {
+ if (!IsAlNum(name[index]) && name[index] != '_')
+ return false;
+ }
+
+ return true;
+ }
+
+ const std::string test_suite_name_;
+ CodeLocation code_location_;
+ TestInfoContainer tests_;
+ InstantiationContainer instantiations_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteInfo);
+}; // class ParameterizedTestSuiteInfo
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+template <class TestCase>
+using ParameterizedTestCaseInfo = ParameterizedTestSuiteInfo<TestCase>;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestSuiteRegistry contains a map of
+// ParameterizedTestSuiteInfoBase classes accessed by test suite names. TEST_P
+// and INSTANTIATE_TEST_SUITE_P macros use it to locate their corresponding
+// ParameterizedTestSuiteInfo descriptors.
+class ParameterizedTestSuiteRegistry {
+ public:
+ ParameterizedTestSuiteRegistry() {}
+ ~ParameterizedTestSuiteRegistry() {
+ for (auto& test_suite_info : test_suite_infos_) {
+ delete test_suite_info;
+ }
+ }
+
+ // Looks up or creates and returns a structure containing information about
+ // tests and instantiations of a particular test suite.
+ template <class TestSuite>
+ ParameterizedTestSuiteInfo<TestSuite>* GetTestSuitePatternHolder(
+ const char* test_suite_name, CodeLocation code_location) {
+ ParameterizedTestSuiteInfo<TestSuite>* typed_test_info = nullptr;
+ for (auto& test_suite_info : test_suite_infos_) {
+ if (test_suite_info->GetTestSuiteName() == test_suite_name) {
+ if (test_suite_info->GetTestSuiteTypeId() != GetTypeId<TestSuite>()) {
+ // Complain about incorrect usage of Google Test facilities
+ // and terminate the program since we cannot guaranty correct
+ // test suite setup and tear-down in this case.
+ ReportInvalidTestSuiteType(test_suite_name, code_location);
+ posix::Abort();
+ } else {
+ // At this point we are sure that the object we found is of the same
+ // type we are looking for, so we downcast it to that type
+ // without further checks.
+ typed_test_info = CheckedDowncastToActualType<
+ ParameterizedTestSuiteInfo<TestSuite> >(test_suite_info);
+ }
+ break;
+ }
+ }
+ if (typed_test_info == nullptr) {
+ typed_test_info = new ParameterizedTestSuiteInfo<TestSuite>(
+ test_suite_name, code_location);
+ test_suite_infos_.push_back(typed_test_info);
+ }
+ return typed_test_info;
+ }
+ void RegisterTests() {
+ for (auto& test_suite_info : test_suite_infos_) {
+ test_suite_info->RegisterTests();
+ }
+ }
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ template <class TestCase>
+ ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
+ const char* test_case_name, CodeLocation code_location) {
+ return GetTestSuitePatternHolder<TestCase>(test_case_name, code_location);
+ }
+
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ private:
+ using TestSuiteInfoContainer = ::std::vector<ParameterizedTestSuiteInfoBase*>;
+
+ TestSuiteInfoContainer test_suite_infos_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestSuiteRegistry);
+};
+
+// Keep track of what type-parameterized test suite are defined and
+// where as well as which are intatiated. This allows susequently
+// identifying suits that are defined but never used.
+class TypeParameterizedTestSuiteRegistry {
+ public:
+ // Add a suite definition
+ void RegisterTestSuite(const char* test_suite_name,
+ CodeLocation code_location);
+
+ // Add an instantiation of a suit.
+ void RegisterInstantiation(const char* test_suite_name);
+
+ // For each suit repored as defined but not reported as instantiation,
+ // emit a test that reports that fact (configurably, as an error).
+ void CheckForInstantiations();
+
+ private:
+ struct TypeParameterizedTestSuiteInfo {
+ explicit TypeParameterizedTestSuiteInfo(CodeLocation c)
+ : code_location(c), instantiated(false) {}
+
+ CodeLocation code_location;
+ bool instantiated;
+ };
+
+ std::map<std::string, TypeParameterizedTestSuiteInfo> suites_;
+};
+
+} // namespace internal
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container);
+
+namespace internal {
+// Used in the Values() function to provide polymorphic capabilities.
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4100)
+#endif
+
+template <typename... Ts>
+class ValueArray {
+ public:
+ explicit ValueArray(Ts... v) : v_(FlatTupleConstructTag{}, std::move(v)...) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const { // NOLINT
+ return ValuesIn(MakeVector<T>(MakeIndexSequence<sizeof...(Ts)>()));
+ }
+
+ private:
+ template <typename T, size_t... I>
+ std::vector<T> MakeVector(IndexSequence<I...>) const {
+ return std::vector<T>{static_cast<T>(v_.template Get<I>())...};
+ }
+
+ FlatTuple<Ts...> v_;
+};
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+template <typename... T>
+class CartesianProductGenerator
+ : public ParamGeneratorInterface<::std::tuple<T...>> {
+ public:
+ typedef ::std::tuple<T...> ParamType;
+
+ CartesianProductGenerator(const std::tuple<ParamGenerator<T>...>& g)
+ : generators_(g) {}
+ ~CartesianProductGenerator() override {}
+
+ ParamIteratorInterface<ParamType>* Begin() const override {
+ return new Iterator(this, generators_, false);
+ }
+ ParamIteratorInterface<ParamType>* End() const override {
+ return new Iterator(this, generators_, true);
+ }
+
+ private:
+ template <class I>
+ class IteratorImpl;
+ template <size_t... I>
+ class IteratorImpl<IndexSequence<I...>>
+ : public ParamIteratorInterface<ParamType> {
+ public:
+ IteratorImpl(const ParamGeneratorInterface<ParamType>* base,
+ const std::tuple<ParamGenerator<T>...>& generators, bool is_end)
+ : base_(base),
+ begin_(std::get<I>(generators).begin()...),
+ end_(std::get<I>(generators).end()...),
+ current_(is_end ? end_ : begin_) {
+ ComputeCurrentValue();
+ }
+ ~IteratorImpl() override {}
+
+ const ParamGeneratorInterface<ParamType>* BaseGenerator() const override {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ void Advance() override {
+ assert(!AtEnd());
+ // Advance the last iterator.
+ ++std::get<sizeof...(T) - 1>(current_);
+ // if that reaches end, propagate that up.
+ AdvanceIfEnd<sizeof...(T) - 1>();
+ ComputeCurrentValue();
+ }
+ ParamIteratorInterface<ParamType>* Clone() const override {
+ return new IteratorImpl(*this);
+ }
+
+ const ParamType* Current() const override { return current_value_.get(); }
+
+ bool Equals(const ParamIteratorInterface<ParamType>& other) const override {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const IteratorImpl* typed_other =
+ CheckedDowncastToActualType<const IteratorImpl>(&other);
+
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ if (AtEnd() && typed_other->AtEnd()) return true;
+
+ bool same = true;
+ bool dummy[] = {
+ (same = same && std::get<I>(current_) ==
+ std::get<I>(typed_other->current_))...};
+ (void)dummy;
+ return same;
+ }
+
+ private:
+ template <size_t ThisI>
+ void AdvanceIfEnd() {
+ if (std::get<ThisI>(current_) != std::get<ThisI>(end_)) return;
+
+ bool last = ThisI == 0;
+ if (last) {
+ // We are done. Nothing else to propagate.
+ return;
+ }
+
+ constexpr size_t NextI = ThisI - (ThisI != 0);
+ std::get<ThisI>(current_) = std::get<ThisI>(begin_);
+ ++std::get<NextI>(current_);
+ AdvanceIfEnd<NextI>();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = std::make_shared<ParamType>(*std::get<I>(current_)...);
+ }
+ bool AtEnd() const {
+ bool at_end = false;
+ bool dummy[] = {
+ (at_end = at_end || std::get<I>(current_) == std::get<I>(end_))...};
+ (void)dummy;
+ return at_end;
+ }
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ std::tuple<typename ParamGenerator<T>::iterator...> begin_;
+ std::tuple<typename ParamGenerator<T>::iterator...> end_;
+ std::tuple<typename ParamGenerator<T>::iterator...> current_;
+ std::shared_ptr<ParamType> current_value_;
+ };
+
+ using Iterator = IteratorImpl<typename MakeIndexSequence<sizeof...(T)>::type>;
+
+ std::tuple<ParamGenerator<T>...> generators_;
+};
+
+template <class... Gen>
+class CartesianProductHolder {
+ public:
+ CartesianProductHolder(const Gen&... g) : generators_(g...) {}
+ template <typename... T>
+ operator ParamGenerator<::std::tuple<T...>>() const {
+ return ParamGenerator<::std::tuple<T...>>(
+ new CartesianProductGenerator<T...>(generators_));
+ }
+
+ private:
+ std::tuple<Gen...> generators_;
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GOOGLETEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+
+namespace testing {
+
+// Functions producing parameter generators.
+//
+// Google Test uses these generators to produce parameters for value-
+// parameterized tests. When a parameterized test suite is instantiated
+// with a particular generator, Google Test creates and runs tests
+// for each element in the sequence produced by the generator.
+//
+// In the following sample, tests from test suite FooTest are instantiated
+// each three times with parameter values 3, 5, and 8:
+//
+// class FooTest : public TestWithParam<int> { ... };
+//
+// TEST_P(FooTest, TestThis) {
+// }
+// TEST_P(FooTest, TestThat) {
+// }
+// INSTANTIATE_TEST_SUITE_P(TestSequence, FooTest, Values(3, 5, 8));
+//
+
+// Range() returns generators providing sequences of values in a range.
+//
+// Synopsis:
+// Range(start, end)
+// - returns a generator producing a sequence of values {start, start+1,
+// start+2, ..., }.
+// Range(start, end, step)
+// - returns a generator producing a sequence of values {start, start+step,
+// start+step+step, ..., }.
+// Notes:
+// * The generated sequences never include end. For example, Range(1, 5)
+// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
+// returns a generator producing {1, 3, 5, 7}.
+// * start and end must have the same type. That type may be any integral or
+// floating-point type or a user defined type satisfying these conditions:
+// * It must be assignable (have operator=() defined).
+// * It must have operator+() (operator+(int-compatible type) for
+// two-operand version).
+// * It must have operator<() defined.
+// Elements in the resulting sequences will also have that type.
+// * Condition start < end must be satisfied in order for resulting sequences
+// to contain any elements.
+//
+template <typename T, typename IncrementT>
+internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
+ return internal::ParamGenerator<T>(
+ new internal::RangeGenerator<T, IncrementT>(start, end, step));
+}
+
+template <typename T>
+internal::ParamGenerator<T> Range(T start, T end) {
+ return Range(start, end, 1);
+}
+
+// ValuesIn() function allows generation of tests with parameters coming from
+// a container.
+//
+// Synopsis:
+// ValuesIn(const T (&array)[N])
+// - returns a generator producing sequences with elements from
+// a C-style array.
+// ValuesIn(const Container& container)
+// - returns a generator producing sequences with elements from
+// an STL-style container.
+// ValuesIn(Iterator begin, Iterator end)
+// - returns a generator producing sequences with elements from
+// a range [begin, end) defined by a pair of STL-style iterators. These
+// iterators can also be plain C pointers.
+//
+// Please note that ValuesIn copies the values from the containers
+// passed in and keeps them to generate tests in RUN_ALL_TESTS().
+//
+// Examples:
+//
+// This instantiates tests from test suite StringTest
+// each with C-string values of "foo", "bar", and "baz":
+//
+// const char* strings[] = {"foo", "bar", "baz"};
+// INSTANTIATE_TEST_SUITE_P(StringSequence, StringTest, ValuesIn(strings));
+//
+// This instantiates tests from test suite StlStringTest
+// each with STL strings with values "a" and "b":
+//
+// ::std::vector< ::std::string> GetParameterStrings() {
+// ::std::vector< ::std::string> v;
+// v.push_back("a");
+// v.push_back("b");
+// return v;
+// }
+//
+// INSTANTIATE_TEST_SUITE_P(CharSequence,
+// StlStringTest,
+// ValuesIn(GetParameterStrings()));
+//
+//
+// This will also instantiate tests from CharTest
+// each with parameter values 'a' and 'b':
+//
+// ::std::list<char> GetParameterChars() {
+// ::std::list<char> list;
+// list.push_back('a');
+// list.push_back('b');
+// return list;
+// }
+// ::std::list<char> l = GetParameterChars();
+// INSTANTIATE_TEST_SUITE_P(CharSequence2,
+// CharTest,
+// ValuesIn(l.begin(), l.end()));
+//
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename std::iterator_traits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end) {
+ typedef typename std::iterator_traits<ForwardIterator>::value_type ParamType;
+ return internal::ParamGenerator<ParamType>(
+ new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
+}
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
+ return ValuesIn(array, array + N);
+}
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container) {
+ return ValuesIn(container.begin(), container.end());
+}
+
+// Values() allows generating tests from explicitly specified list of
+// parameters.
+//
+// Synopsis:
+// Values(T v1, T v2, ..., T vN)
+// - returns a generator producing sequences with elements v1, v2, ..., vN.
+//
+// For example, this instantiates tests from test suite BarTest each
+// with values "one", "two", and "three":
+//
+// INSTANTIATE_TEST_SUITE_P(NumSequence,
+// BarTest,
+// Values("one", "two", "three"));
+//
+// This instantiates tests from test suite BazTest each with values 1, 2, 3.5.
+// The exact type of values will depend on the type of parameter in BazTest.
+//
+// INSTANTIATE_TEST_SUITE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
+//
+//
+template <typename... T>
+internal::ValueArray<T...> Values(T... v) {
+ return internal::ValueArray<T...>(std::move(v)...);
+}
+
+// Bool() allows generating tests with parameters in a set of (false, true).
+//
+// Synopsis:
+// Bool()
+// - returns a generator producing sequences with elements {false, true}.
+//
+// It is useful when testing code that depends on Boolean flags. Combinations
+// of multiple flags can be tested when several Bool()'s are combined using
+// Combine() function.
+//
+// In the following example all tests in the test suite FlagDependentTest
+// will be instantiated twice with parameters false and true.
+//
+// class FlagDependentTest : public testing::TestWithParam<bool> {
+// virtual void SetUp() {
+// external_flag = GetParam();
+// }
+// }
+// INSTANTIATE_TEST_SUITE_P(BoolSequence, FlagDependentTest, Bool());
+//
+inline internal::ParamGenerator<bool> Bool() {
+ return Values(false, true);
+}
+
+// Combine() allows the user to combine two or more sequences to produce
+// values of a Cartesian product of those sequences' elements.
+//
+// Synopsis:
+// Combine(gen1, gen2, ..., genN)
+// - returns a generator producing sequences with elements coming from
+// the Cartesian product of elements from the sequences generated by
+// gen1, gen2, ..., genN. The sequence elements will have a type of
+// std::tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
+// of elements from sequences produces by gen1, gen2, ..., genN.
+//
+// Example:
+//
+// This will instantiate tests in test suite AnimalTest each one with
+// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
+// tuple("dog", BLACK), and tuple("dog", WHITE):
+//
+// enum Color { BLACK, GRAY, WHITE };
+// class AnimalTest
+// : public testing::TestWithParam<std::tuple<const char*, Color> > {...};
+//
+// TEST_P(AnimalTest, AnimalLooksNice) {...}
+//
+// INSTANTIATE_TEST_SUITE_P(AnimalVariations, AnimalTest,
+// Combine(Values("cat", "dog"),
+// Values(BLACK, WHITE)));
+//
+// This will instantiate tests in FlagDependentTest with all variations of two
+// Boolean flags:
+//
+// class FlagDependentTest
+// : public testing::TestWithParam<std::tuple<bool, bool> > {
+// virtual void SetUp() {
+// // Assigns external_flag_1 and external_flag_2 values from the tuple.
+// std::tie(external_flag_1, external_flag_2) = GetParam();
+// }
+// };
+//
+// TEST_P(FlagDependentTest, TestFeature1) {
+// // Test your code using external_flag_1 and external_flag_2 here.
+// }
+// INSTANTIATE_TEST_SUITE_P(TwoBoolSequence, FlagDependentTest,
+// Combine(Bool(), Bool()));
+//
+template <typename... Generator>
+internal::CartesianProductHolder<Generator...> Combine(const Generator&... g) {
+ return internal::CartesianProductHolder<Generator...>(g...);
+}
+
+#define TEST_P(test_suite_name, test_name) \
+ class GTEST_TEST_CLASS_NAME_(test_suite_name, test_name) \
+ : public test_suite_name { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)() {} \
+ void TestBody() override; \
+ \
+ private: \
+ static int AddToRegistry() { \
+ ::testing::UnitTest::GetInstance() \
+ ->parameterized_test_registry() \
+ .GetTestSuitePatternHolder<test_suite_name>( \
+ GTEST_STRINGIFY_(test_suite_name), \
+ ::testing::internal::CodeLocation(__FILE__, __LINE__)) \
+ ->AddTestPattern( \
+ GTEST_STRINGIFY_(test_suite_name), GTEST_STRINGIFY_(test_name), \
+ new ::testing::internal::TestMetaFactory<GTEST_TEST_CLASS_NAME_( \
+ test_suite_name, test_name)>(), \
+ ::testing::internal::CodeLocation(__FILE__, __LINE__)); \
+ return 0; \
+ } \
+ static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTEST_TEST_CLASS_NAME_(test_suite_name, \
+ test_name)); \
+ }; \
+ int GTEST_TEST_CLASS_NAME_(test_suite_name, \
+ test_name)::gtest_registering_dummy_ = \
+ GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::AddToRegistry(); \
+ void GTEST_TEST_CLASS_NAME_(test_suite_name, test_name)::TestBody()
+
+// The last argument to INSTANTIATE_TEST_SUITE_P allows the user to specify
+// generator and an optional function or functor that generates custom test name
+// suffixes based on the test parameters. Such a function or functor should
+// accept one argument of type testing::TestParamInfo<class ParamType>, and
+// return std::string.
+//
+// testing::PrintToStringParamName is a builtin test suffix generator that
+// returns the value of testing::PrintToString(GetParam()).
+//
+// Note: test names must be non-empty, unique, and may only contain ASCII
+// alphanumeric characters or underscore. Because PrintToString adds quotes
+// to std::string and C strings, it won't work for these types.
+
+#define GTEST_EXPAND_(arg) arg
+#define GTEST_GET_FIRST_(first, ...) first
+#define GTEST_GET_SECOND_(first, second, ...) second
+
+#define INSTANTIATE_TEST_SUITE_P(prefix, test_suite_name, ...) \
+ static ::testing::internal::ParamGenerator<test_suite_name::ParamType> \
+ gtest_##prefix##test_suite_name##_EvalGenerator_() { \
+ return GTEST_EXPAND_(GTEST_GET_FIRST_(__VA_ARGS__, DUMMY_PARAM_)); \
+ } \
+ static ::std::string gtest_##prefix##test_suite_name##_EvalGenerateName_( \
+ const ::testing::TestParamInfo<test_suite_name::ParamType>& info) { \
+ if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::TestNotEmpty(GTEST_EXPAND_(GTEST_GET_SECOND_( \
+ __VA_ARGS__, \
+ ::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
+ DUMMY_PARAM_))); \
+ auto t = std::make_tuple(__VA_ARGS__); \
+ static_assert(std::tuple_size<decltype(t)>::value <= 2, \
+ "Too Many Args!"); \
+ } \
+ return ((GTEST_EXPAND_(GTEST_GET_SECOND_( \
+ __VA_ARGS__, \
+ ::testing::internal::DefaultParamName<test_suite_name::ParamType>, \
+ DUMMY_PARAM_))))(info); \
+ } \
+ static int gtest_##prefix##test_suite_name##_dummy_ \
+ GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::UnitTest::GetInstance() \
+ ->parameterized_test_registry() \
+ .GetTestSuitePatternHolder<test_suite_name>( \
+ GTEST_STRINGIFY_(test_suite_name), \
+ ::testing::internal::CodeLocation(__FILE__, __LINE__)) \
+ ->AddTestSuiteInstantiation( \
+ GTEST_STRINGIFY_(prefix), \
+ >est_##prefix##test_suite_name##_EvalGenerator_, \
+ >est_##prefix##test_suite_name##_EvalGenerateName_, \
+ __FILE__, __LINE__)
+
+
+// Allow Marking a Parameterized test class as not needing to be instantiated.
+#define GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(T) \
+ namespace gtest_do_not_use_outside_namespace_scope {} \
+ static const ::testing::internal::MarkAsIgnored gtest_allow_ignore_##T( \
+ GTEST_STRINGIFY_(T))
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+#define INSTANTIATE_TEST_CASE_P \
+ static_assert(::testing::internal::InstantiateTestCase_P_IsDeprecated(), \
+ ""); \
+ INSTANTIATE_TEST_SUITE_P
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+} // namespace testing
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Google C++ Testing and Mocking Framework definitions useful in production code.
+// GOOGLETEST_CM0003 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
+
+// When you need to test the private or protected members of a class,
+// use the FRIEND_TEST macro to declare your tests as friends of the
+// class. For example:
+//
+// class MyClass {
+// private:
+// void PrivateMethod();
+// FRIEND_TEST(MyClassTest, PrivateMethodWorks);
+// };
+//
+// class MyClassTest : public testing::Test {
+// // ...
+// };
+//
+// TEST_F(MyClassTest, PrivateMethodWorks) {
+// // Can call MyClass::PrivateMethod() here.
+// }
+//
+// Note: The test class must be in the same namespace as the class being tested.
+// For example, putting MyClassTest in an anonymous namespace will not work.
+
+#define FRIEND_TEST(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PROD_H_
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// This header implements typed tests and type-parameterized tests.
+
+// Typed (aka type-driven) tests repeat the same test for types in a
+// list. You must know which types you want to test with when writing
+// typed tests. Here's how you do it:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ public:
+ ...
+ typedef std::list<T> List;
+ static T shared_;
+ T value_;
+};
+
+// Next, associate a list of types with the test suite, which will be
+// repeated for each type in the list. The typedef is necessary for
+// the macro to parse correctly.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+TYPED_TEST_SUITE(FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// TYPED_TEST_SUITE(FooTest, int);
+
+// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
+// tests for this test suite as you want.
+TYPED_TEST(FooTest, DoesBlah) {
+ // Inside a test, refer to the special name TypeParam to get the type
+ // parameter. Since we are inside a derived class template, C++ requires
+ // us to visit the members of FooTest via 'this'.
+ TypeParam n = this->value_;
+
+ // To visit static members of the fixture, add the TestFixture::
+ // prefix.
+ n += TestFixture::shared_;
+
+ // To refer to typedefs in the fixture, add the "typename
+ // TestFixture::" prefix.
+ typename TestFixture::List values;
+ values.push_back(n);
+ ...
+}
+
+TYPED_TEST(FooTest, HasPropertyA) { ... }
+
+// TYPED_TEST_SUITE takes an optional third argument which allows to specify a
+// class that generates custom test name suffixes based on the type. This should
+// be a class which has a static template function GetName(int index) returning
+// a string for each type. The provided integer index equals the index of the
+// type in the provided type list. In many cases the index can be ignored.
+//
+// For example:
+// class MyTypeNames {
+// public:
+// template <typename T>
+// static std::string GetName(int) {
+// if (std::is_same<T, char>()) return "char";
+// if (std::is_same<T, int>()) return "int";
+// if (std::is_same<T, unsigned int>()) return "unsignedInt";
+// }
+// };
+// TYPED_TEST_SUITE(FooTest, MyTypes, MyTypeNames);
+
+#endif // 0
+
+// Type-parameterized tests are abstract test patterns parameterized
+// by a type. Compared with typed tests, type-parameterized tests
+// allow you to define the test pattern without knowing what the type
+// parameters are. The defined pattern can be instantiated with
+// different types any number of times, in any number of translation
+// units.
+//
+// If you are designing an interface or concept, you can define a
+// suite of type-parameterized tests to verify properties that any
+// valid implementation of the interface/concept should have. Then,
+// each implementation can easily instantiate the test suite to verify
+// that it conforms to the requirements, without having to write
+// similar tests repeatedly. Here's an example:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ ...
+};
+
+// Next, declare that you will define a type-parameterized test suite
+// (the _P suffix is for "parameterized" or "pattern", whichever you
+// prefer):
+TYPED_TEST_SUITE_P(FooTest);
+
+// Then, use TYPED_TEST_P() to define as many type-parameterized tests
+// for this type-parameterized test suite as you want.
+TYPED_TEST_P(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ TypeParam n = 0;
+ ...
+}
+
+TYPED_TEST_P(FooTest, HasPropertyA) { ... }
+
+// Now the tricky part: you need to register all test patterns before
+// you can instantiate them. The first argument of the macro is the
+// test suite name; the rest are the names of the tests in this test
+// case.
+REGISTER_TYPED_TEST_SUITE_P(FooTest,
+ DoesBlah, HasPropertyA);
+
+// Finally, you are free to instantiate the pattern with the types you
+// want. If you put the above code in a header file, you can #include
+// it in multiple C++ source files and instantiate it multiple times.
+//
+// To distinguish different instances of the pattern, the first
+// argument to the INSTANTIATE_* macro is a prefix that will be added
+// to the actual test suite name. Remember to pick unique prefixes for
+// different instances.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, int);
+//
+// Similar to the optional argument of TYPED_TEST_SUITE above,
+// INSTANTIATE_TEST_SUITE_P takes an optional fourth argument which allows to
+// generate custom names.
+// INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes, MyTypeNames);
+
+#endif // 0
+
+
+// Implements typed tests.
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the typedef for the type parameters of the
+// given test suite.
+#define GTEST_TYPE_PARAMS_(TestSuiteName) gtest_type_params_##TestSuiteName##_
+
+// Expands to the name of the typedef for the NameGenerator, responsible for
+// creating the suffixes of the name.
+#define GTEST_NAME_GENERATOR_(TestSuiteName) \
+ gtest_type_params_##TestSuiteName##_NameGenerator
+
+#define TYPED_TEST_SUITE(CaseName, Types, ...) \
+ typedef ::testing::internal::GenerateTypeList<Types>::type \
+ GTEST_TYPE_PARAMS_(CaseName); \
+ typedef ::testing::internal::NameGeneratorSelector<__VA_ARGS__>::type \
+ GTEST_NAME_GENERATOR_(CaseName)
+
+#define TYPED_TEST(CaseName, TestName) \
+ static_assert(sizeof(GTEST_STRINGIFY_(TestName)) > 1, \
+ "test-name must not be empty"); \
+ template <typename gtest_TypeParam_> \
+ class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
+ : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ void TestBody() override; \
+ }; \
+ static bool gtest_##CaseName##_##TestName##_registered_ \
+ GTEST_ATTRIBUTE_UNUSED_ = ::testing::internal::TypeParameterizedTest< \
+ CaseName, \
+ ::testing::internal::TemplateSel<GTEST_TEST_CLASS_NAME_(CaseName, \
+ TestName)>, \
+ GTEST_TYPE_PARAMS_( \
+ CaseName)>::Register("", \
+ ::testing::internal::CodeLocation( \
+ __FILE__, __LINE__), \
+ GTEST_STRINGIFY_(CaseName), \
+ GTEST_STRINGIFY_(TestName), 0, \
+ ::testing::internal::GenerateNames< \
+ GTEST_NAME_GENERATOR_(CaseName), \
+ GTEST_TYPE_PARAMS_(CaseName)>()); \
+ template <typename gtest_TypeParam_> \
+ void GTEST_TEST_CLASS_NAME_(CaseName, \
+ TestName)<gtest_TypeParam_>::TestBody()
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+#define TYPED_TEST_CASE \
+ static_assert(::testing::internal::TypedTestCaseIsDeprecated(), ""); \
+ TYPED_TEST_SUITE
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+// Implements type-parameterized tests.
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the namespace name that the type-parameterized tests for
+// the given type-parameterized test suite are defined in. The exact
+// name of the namespace is subject to change without notice.
+#define GTEST_SUITE_NAMESPACE_(TestSuiteName) gtest_suite_##TestSuiteName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the variable used to remember the names of
+// the defined tests in the given test suite.
+#define GTEST_TYPED_TEST_SUITE_P_STATE_(TestSuiteName) \
+ gtest_typed_test_suite_p_state_##TestSuiteName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
+//
+// Expands to the name of the variable used to remember the names of
+// the registered tests in the given test suite.
+#define GTEST_REGISTERED_TEST_NAMES_(TestSuiteName) \
+ gtest_registered_test_names_##TestSuiteName##_
+
+// The variables defined in the type-parameterized test macros are
+// static as typically these macros are used in a .h file that can be
+// #included in multiple translation units linked together.
+#define TYPED_TEST_SUITE_P(SuiteName) \
+ static ::testing::internal::TypedTestSuitePState \
+ GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName)
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+#define TYPED_TEST_CASE_P \
+ static_assert(::testing::internal::TypedTestCase_P_IsDeprecated(), ""); \
+ TYPED_TEST_SUITE_P
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+#define TYPED_TEST_P(SuiteName, TestName) \
+ namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \
+ template <typename gtest_TypeParam_> \
+ class TestName : public SuiteName<gtest_TypeParam_> { \
+ private: \
+ typedef SuiteName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ void TestBody() override; \
+ }; \
+ static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
+ GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).AddTestName( \
+ __FILE__, __LINE__, GTEST_STRINGIFY_(SuiteName), \
+ GTEST_STRINGIFY_(TestName)); \
+ } \
+ template <typename gtest_TypeParam_> \
+ void GTEST_SUITE_NAMESPACE_( \
+ SuiteName)::TestName<gtest_TypeParam_>::TestBody()
+
+// Note: this won't work correctly if the trailing arguments are macros.
+#define REGISTER_TYPED_TEST_SUITE_P(SuiteName, ...) \
+ namespace GTEST_SUITE_NAMESPACE_(SuiteName) { \
+ typedef ::testing::internal::Templates<__VA_ARGS__> gtest_AllTests_; \
+ } \
+ static const char* const GTEST_REGISTERED_TEST_NAMES_( \
+ SuiteName) GTEST_ATTRIBUTE_UNUSED_ = \
+ GTEST_TYPED_TEST_SUITE_P_STATE_(SuiteName).VerifyRegisteredTestNames( \
+ GTEST_STRINGIFY_(SuiteName), __FILE__, __LINE__, #__VA_ARGS__)
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+#define REGISTER_TYPED_TEST_CASE_P \
+ static_assert(::testing::internal::RegisterTypedTestCase_P_IsDeprecated(), \
+ ""); \
+ REGISTER_TYPED_TEST_SUITE_P
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+#define INSTANTIATE_TYPED_TEST_SUITE_P(Prefix, SuiteName, Types, ...) \
+ static_assert(sizeof(GTEST_STRINGIFY_(Prefix)) > 1, \
+ "test-suit-prefix must not be empty"); \
+ static bool gtest_##Prefix##_##SuiteName GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::internal::TypeParameterizedTestSuite< \
+ SuiteName, GTEST_SUITE_NAMESPACE_(SuiteName)::gtest_AllTests_, \
+ ::testing::internal::GenerateTypeList<Types>::type>:: \
+ Register(GTEST_STRINGIFY_(Prefix), \
+ ::testing::internal::CodeLocation(__FILE__, __LINE__), \
+ >EST_TYPED_TEST_SUITE_P_STATE_(SuiteName), \
+ GTEST_STRINGIFY_(SuiteName), \
+ GTEST_REGISTERED_TEST_NAMES_(SuiteName), \
+ ::testing::internal::GenerateNames< \
+ ::testing::internal::NameGeneratorSelector< \
+ __VA_ARGS__>::type, \
+ ::testing::internal::GenerateTypeList<Types>::type>())
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+#define INSTANTIATE_TYPED_TEST_CASE_P \
+ static_assert( \
+ ::testing::internal::InstantiateTypedTestCase_P_IsDeprecated(), ""); \
+ INSTANTIATE_TYPED_TEST_SUITE_P
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4251 \
+/* class A needs to have dll-interface to be used by clients of class B */)
+
+namespace testing {
+
+// Silence C4100 (unreferenced formal parameter) and 4805
+// unsafe mix of type 'const int' and type 'const bool'
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4805)
+# pragma warning(disable:4100)
+#endif
+
+
+// Declares the flags.
+
+// This flag temporary enables the disabled tests.
+GTEST_DECLARE_bool_(also_run_disabled_tests);
+
+// This flag brings the debugger on an assertion failure.
+GTEST_DECLARE_bool_(break_on_failure);
+
+// This flag controls whether Google Test catches all test-thrown exceptions
+// and logs them as failures.
+GTEST_DECLARE_bool_(catch_exceptions);
+
+// This flag enables using colors in terminal output. Available values are
+// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
+// to let Google Test decide.
+GTEST_DECLARE_string_(color);
+
+// This flag controls whether the test runner should continue execution past
+// first failure.
+GTEST_DECLARE_bool_(fail_fast);
+
+// This flag sets up the filter to select by name using a glob pattern
+// the tests to run. If the filter is not given all tests are executed.
+GTEST_DECLARE_string_(filter);
+
+// This flag controls whether Google Test installs a signal handler that dumps
+// debugging information when fatal signals are raised.
+GTEST_DECLARE_bool_(install_failure_signal_handler);
+
+// This flag causes the Google Test to list tests. None of the tests listed
+// are actually run if the flag is provided.
+GTEST_DECLARE_bool_(list_tests);
+
+// This flag controls whether Google Test emits a detailed XML report to a file
+// in addition to its normal textual output.
+GTEST_DECLARE_string_(output);
+
+// This flags control whether Google Test prints only test failures.
+GTEST_DECLARE_bool_(brief);
+
+// This flags control whether Google Test prints the elapsed time for each
+// test.
+GTEST_DECLARE_bool_(print_time);
+
+// This flags control whether Google Test prints UTF8 characters as text.
+GTEST_DECLARE_bool_(print_utf8);
+
+// This flag specifies the random number seed.
+GTEST_DECLARE_int32_(random_seed);
+
+// This flag sets how many times the tests are repeated. The default value
+// is 1. If the value is -1 the tests are repeating forever.
+GTEST_DECLARE_int32_(repeat);
+
+// This flag controls whether Google Test includes Google Test internal
+// stack frames in failure stack traces.
+GTEST_DECLARE_bool_(show_internal_stack_frames);
+
+// When this flag is specified, tests' order is randomized on every iteration.
+GTEST_DECLARE_bool_(shuffle);
+
+// This flag specifies the maximum number of stack frames to be
+// printed in a failure message.
+GTEST_DECLARE_int32_(stack_trace_depth);
+
+// When this flag is specified, a failed assertion will throw an
+// exception if exceptions are enabled, or exit the program with a
+// non-zero code otherwise. For use with an external test framework.
+GTEST_DECLARE_bool_(throw_on_failure);
+
+// When this flag is set with a "host:port" string, on supported
+// platforms test results are streamed to the specified port on
+// the specified host machine.
+GTEST_DECLARE_string_(stream_result_to);
+
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+GTEST_DECLARE_string_(flagfile);
+#endif // GTEST_USE_OWN_FLAGFILE_FLAG_
+
+// The upper limit for valid stack trace depths.
+const int kMaxStackTraceDepth = 100;
+
+namespace internal {
+
+class AssertHelper;
+class DefaultGlobalTestPartResultReporter;
+class ExecDeathTest;
+class NoExecDeathTest;
+class FinalSuccessChecker;
+class GTestFlagSaver;
+class StreamingListenerTest;
+class TestResultAccessor;
+class TestEventListenersAccessor;
+class TestEventRepeater;
+class UnitTestRecordPropertyTestHelper;
+class WindowsDeathTest;
+class FuchsiaDeathTest;
+class UnitTestImpl* GetUnitTestImpl();
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const std::string& message);
+std::set<std::string>* GetIgnoredParameterizedTestSuites();
+
+} // namespace internal
+
+// The friend relationship of some of these classes is cyclic.
+// If we don't forward declare them the compiler might confuse the classes
+// in friendship clauses with same named classes on the scope.
+class Test;
+class TestSuite;
+
+// Old API is still available but deprecated
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+using TestCase = TestSuite;
+#endif
+class TestInfo;
+class UnitTest;
+
+// A class for indicating whether an assertion was successful. When
+// the assertion wasn't successful, the AssertionResult object
+// remembers a non-empty message that describes how it failed.
+//
+// To create an instance of this class, use one of the factory functions
+// (AssertionSuccess() and AssertionFailure()).
+//
+// This class is useful for two purposes:
+// 1. Defining predicate functions to be used with Boolean test assertions
+// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
+// 2. Defining predicate-format functions to be
+// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
+//
+// For example, if you define IsEven predicate:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
+// will print the message
+//
+// Value of: IsEven(Fib(5))
+// Actual: false (5 is odd)
+// Expected: true
+//
+// instead of a more opaque
+//
+// Value of: IsEven(Fib(5))
+// Actual: false
+// Expected: true
+//
+// in case IsEven is a simple Boolean predicate.
+//
+// If you expect your predicate to be reused and want to support informative
+// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
+// about half as often as positive ones in our tests), supply messages for
+// both success and failure cases:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess() << n << " is even";
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
+//
+// Value of: IsEven(Fib(6))
+// Actual: true (8 is even)
+// Expected: false
+//
+// NB: Predicates that support negative Boolean assertions have reduced
+// performance in positive ones so be careful not to use them in tests
+// that have lots (tens of thousands) of positive Boolean assertions.
+//
+// To use this class with EXPECT_PRED_FORMAT assertions such as:
+//
+// // Verifies that Foo() returns an even number.
+// EXPECT_PRED_FORMAT1(IsEven, Foo());
+//
+// you need to define:
+//
+// testing::AssertionResult IsEven(const char* expr, int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure()
+// << "Expected: " << expr << " is even\n Actual: it's " << n;
+// }
+//
+// If Foo() returns 5, you will see the following message:
+//
+// Expected: Foo() is even
+// Actual: it's 5
+//
+class GTEST_API_ AssertionResult {
+ public:
+ // Copy constructor.
+ // Used in EXPECT_TRUE/FALSE(assertion_result).
+ AssertionResult(const AssertionResult& other);
+
+// C4800 is a level 3 warning in Visual Studio 2015 and earlier.
+// This warning is not emitted in Visual Studio 2017.
+// This warning is off by default starting in Visual Studio 2019 but can be
+// enabled with command-line options.
+#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920)
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */)
+#endif
+
+ // Used in the EXPECT_TRUE/FALSE(bool_expression).
+ //
+ // T must be contextually convertible to bool.
+ //
+ // The second parameter prevents this overload from being considered if
+ // the argument is implicitly convertible to AssertionResult. In that case
+ // we want AssertionResult's copy constructor to be used.
+ template <typename T>
+ explicit AssertionResult(
+ const T& success,
+ typename std::enable_if<
+ !std::is_convertible<T, AssertionResult>::value>::type*
+ /*enabler*/
+ = nullptr)
+ : success_(success) {}
+
+#if defined(_MSC_VER) && (_MSC_VER < 1910 || _MSC_VER >= 1920)
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+#endif
+
+ // Assignment operator.
+ AssertionResult& operator=(AssertionResult other) {
+ swap(other);
+ return *this;
+ }
+
+ // Returns true if and only if the assertion succeeded.
+ operator bool() const { return success_; } // NOLINT
+
+ // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+ AssertionResult operator!() const;
+
+ // Returns the text streamed into this AssertionResult. Test assertions
+ // use it when they fail (i.e., the predicate's outcome doesn't match the
+ // assertion's expectation). When nothing has been streamed into the
+ // object, returns an empty string.
+ const char* message() const {
+ return message_.get() != nullptr ? message_->c_str() : "";
+ }
+ // Deprecated; please use message() instead.
+ const char* failure_message() const { return message(); }
+
+ // Streams a custom failure message into this object.
+ template <typename T> AssertionResult& operator<<(const T& value) {
+ AppendMessage(Message() << value);
+ return *this;
+ }
+
+ // Allows streaming basic output manipulators such as endl or flush into
+ // this object.
+ AssertionResult& operator<<(
+ ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) {
+ AppendMessage(Message() << basic_manipulator);
+ return *this;
+ }
+
+ private:
+ // Appends the contents of message to message_.
+ void AppendMessage(const Message& a_message) {
+ if (message_.get() == nullptr) message_.reset(new ::std::string);
+ message_->append(a_message.GetString().c_str());
+ }
+
+ // Swap the contents of this AssertionResult with other.
+ void swap(AssertionResult& other);
+
+ // Stores result of the assertion predicate.
+ bool success_;
+ // Stores the message describing the condition in case the expectation
+ // construct is not satisfied with the predicate's outcome.
+ // Referenced via a pointer to avoid taking too much stack frame space
+ // with test assertions.
+ std::unique_ptr< ::std::string> message_;
+};
+
+// Makes a successful assertion result.
+GTEST_API_ AssertionResult AssertionSuccess();
+
+// Makes a failed assertion result.
+GTEST_API_ AssertionResult AssertionFailure();
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << msg.
+GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
+
+} // namespace testing
+
+// Includes the auto-generated header that implements a family of generic
+// predicate assertion macros. This include comes late because it relies on
+// APIs declared above.
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is AUTOMATICALLY GENERATED on 01/02/2019 by command
+// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND!
+//
+// Implements a family of generic predicate assertion macros.
+// GOOGLETEST_CM0001 DO NOT DELETE
+
+#ifndef GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+#define GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+
+namespace testing {
+
+// This header implements a family of generic predicate assertion
+// macros:
+//
+// ASSERT_PRED_FORMAT1(pred_format, v1)
+// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
+// ...
+//
+// where pred_format is a function or functor that takes n (in the
+// case of ASSERT_PRED_FORMATn) values and their source expression
+// text, and returns a testing::AssertionResult. See the definition
+// of ASSERT_EQ in gtest.h for an example.
+//
+// If you don't care about formatting, you can use the more
+// restrictive version:
+//
+// ASSERT_PRED1(pred, v1)
+// ASSERT_PRED2(pred, v1, v2)
+// ...
+//
+// where pred is an n-ary function or functor that returns bool,
+// and the values v1, v2, ..., must support the << operator for
+// streaming to std::ostream.
+//
+// We also define the EXPECT_* variations.
+//
+// For now we only support predicates whose arity is at most 5.
+// Please email googletestframework@googlegroups.com if you need
+// support for higher arities.
+
+// GTEST_ASSERT_ is the basic statement to which all of the assertions
+// in this file reduce. Don't use this in your code.
+
+#define GTEST_ASSERT_(expression, on_failure) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar = (expression)) \
+ ; \
+ else \
+ on_failure(gtest_ar.failure_message())
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1>
+AssertionResult AssertPred1Helper(const char* pred_text,
+ const char* e1,
+ Pred pred,
+ const T1& v1) {
+ if (pred(v1)) return AssertionSuccess();
+
+ return AssertionFailure()
+ << pred_text << "(" << e1 << ") evaluates to false, where"
+ << "\n"
+ << e1 << " evaluates to " << ::testing::PrintToString(v1);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, v1), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+#define GTEST_PRED1_(pred, v1, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
+ #v1, \
+ pred, \
+ v1), on_failure)
+
+// Unary predicate assertion macros.
+#define EXPECT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2>
+AssertionResult AssertPred2Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ Pred pred,
+ const T1& v1,
+ const T2& v2) {
+ if (pred(v1, v2)) return AssertionSuccess();
+
+ return AssertionFailure()
+ << pred_text << "(" << e1 << ", " << e2
+ << ") evaluates to false, where"
+ << "\n"
+ << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
+ << e2 << " evaluates to " << ::testing::PrintToString(v2);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+#define GTEST_PRED2_(pred, v1, v2, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
+ #v1, \
+ #v2, \
+ pred, \
+ v1, \
+ v2), on_failure)
+
+// Binary predicate assertion macros.
+#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3>
+AssertionResult AssertPred3Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3) {
+ if (pred(v1, v2, v3)) return AssertionSuccess();
+
+ return AssertionFailure()
+ << pred_text << "(" << e1 << ", " << e2 << ", " << e3
+ << ") evaluates to false, where"
+ << "\n"
+ << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
+ << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
+ << e3 << " evaluates to " << ::testing::PrintToString(v3);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ pred, \
+ v1, \
+ v2, \
+ v3), on_failure)
+
+// Ternary predicate assertion macros.
+#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4>
+AssertionResult AssertPred4Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4) {
+ if (pred(v1, v2, v3, v4)) return AssertionSuccess();
+
+ return AssertionFailure()
+ << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
+ << ") evaluates to false, where"
+ << "\n"
+ << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
+ << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
+ << e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n"
+ << e4 << " evaluates to " << ::testing::PrintToString(v4);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4), on_failure)
+
+// 4-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5>
+AssertionResult AssertPred5Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ const char* e5,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4,
+ const T5& v5) {
+ if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
+
+ return AssertionFailure()
+ << pred_text << "(" << e1 << ", " << e2 << ", " << e3 << ", " << e4
+ << ", " << e5 << ") evaluates to false, where"
+ << "\n"
+ << e1 << " evaluates to " << ::testing::PrintToString(v1) << "\n"
+ << e2 << " evaluates to " << ::testing::PrintToString(v2) << "\n"
+ << e3 << " evaluates to " << ::testing::PrintToString(v3) << "\n"
+ << e4 << " evaluates to " << ::testing::PrintToString(v4) << "\n"
+ << e5 << " evaluates to " << ::testing::PrintToString(v5);
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ #v5, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4, \
+ v5), on_failure)
+
+// 5-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+
+
+
+} // namespace testing
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+namespace testing {
+
+// The abstract class that all tests inherit from.
+//
+// In Google Test, a unit test program contains one or many TestSuites, and
+// each TestSuite contains one or many Tests.
+//
+// When you define a test using the TEST macro, you don't need to
+// explicitly derive from Test - the TEST macro automatically does
+// this for you.
+//
+// The only time you derive from Test is when defining a test fixture
+// to be used in a TEST_F. For example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// void SetUp() override { ... }
+// void TearDown() override { ... }
+// ...
+// };
+//
+// TEST_F(FooTest, Bar) { ... }
+// TEST_F(FooTest, Baz) { ... }
+//
+// Test is not copyable.
+class GTEST_API_ Test {
+ public:
+ friend class TestInfo;
+
+ // The d'tor is virtual as we intend to inherit from Test.
+ virtual ~Test();
+
+ // Sets up the stuff shared by all tests in this test suite.
+ //
+ // Google Test will call Foo::SetUpTestSuite() before running the first
+ // test in test suite Foo. Hence a sub-class can define its own
+ // SetUpTestSuite() method to shadow the one defined in the super
+ // class.
+ static void SetUpTestSuite() {}
+
+ // Tears down the stuff shared by all tests in this test suite.
+ //
+ // Google Test will call Foo::TearDownTestSuite() after running the last
+ // test in test suite Foo. Hence a sub-class can define its own
+ // TearDownTestSuite() method to shadow the one defined in the super
+ // class.
+ static void TearDownTestSuite() {}
+
+ // Legacy API is deprecated but still available. Use SetUpTestSuite and
+ // TearDownTestSuite instead.
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ static void TearDownTestCase() {}
+ static void SetUpTestCase() {}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Returns true if and only if the current test has a fatal failure.
+ static bool HasFatalFailure();
+
+ // Returns true if and only if the current test has a non-fatal failure.
+ static bool HasNonfatalFailure();
+
+ // Returns true if and only if the current test was skipped.
+ static bool IsSkipped();
+
+ // Returns true if and only if the current test has a (either fatal or
+ // non-fatal) failure.
+ static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
+
+ // Logs a property for the current test, test suite, or for the entire
+ // invocation of the test program when used outside of the context of a
+ // test suite. Only the last value for a given key is remembered. These
+ // are public static so they can be called from utility functions that are
+ // not members of the test fixture. Calls to RecordProperty made during
+ // lifespan of the test (from the moment its constructor starts to the
+ // moment its destructor finishes) will be output in XML as attributes of
+ // the <testcase> element. Properties recorded from fixture's
+ // SetUpTestSuite or TearDownTestSuite are logged as attributes of the
+ // corresponding <testsuite> element. Calls to RecordProperty made in the
+ // global context (before or after invocation of RUN_ALL_TESTS and from
+ // SetUp/TearDown method of Environment objects registered with Google
+ // Test) will be output as attributes of the <testsuites> element.
+ static void RecordProperty(const std::string& key, const std::string& value);
+ static void RecordProperty(const std::string& key, int value);
+
+ protected:
+ // Creates a Test object.
+ Test();
+
+ // Sets up the test fixture.
+ virtual void SetUp();
+
+ // Tears down the test fixture.
+ virtual void TearDown();
+
+ private:
+ // Returns true if and only if the current test has the same fixture class
+ // as the first test in the current test suite.
+ static bool HasSameFixtureClass();
+
+ // Runs the test after the test fixture has been set up.
+ //
+ // A sub-class must implement this to define the test logic.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
+ // Instead, use the TEST or TEST_F macro.
+ virtual void TestBody() = 0;
+
+ // Sets up, executes, and tears down the test.
+ void Run();
+
+ // Deletes self. We deliberately pick an unusual name for this
+ // internal method to avoid clashing with names used in user TESTs.
+ void DeleteSelf_() { delete this; }
+
+ const std::unique_ptr<GTEST_FLAG_SAVER_> gtest_flag_saver_;
+
+ // Often a user misspells SetUp() as Setup() and spends a long time
+ // wondering why it is never called by Google Test. The declaration of
+ // the following method is solely for catching such an error at
+ // compile time:
+ //
+ // - The return type is deliberately chosen to be not void, so it
+ // will be a conflict if void Setup() is declared in the user's
+ // test fixture.
+ //
+ // - This method is private, so it will be another compiler error
+ // if the method is called from the user's test fixture.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION.
+ //
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return nullptr; }
+
+ // We disallow copying Tests.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
+};
+
+typedef internal::TimeInMillis TimeInMillis;
+
+// A copyable object representing a user specified test property which can be
+// output as a key/value string pair.
+//
+// Don't inherit from TestProperty as its destructor is not virtual.
+class TestProperty {
+ public:
+ // C'tor. TestProperty does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestProperty object.
+ TestProperty(const std::string& a_key, const std::string& a_value) :
+ key_(a_key), value_(a_value) {
+ }
+
+ // Gets the user supplied key.
+ const char* key() const {
+ return key_.c_str();
+ }
+
+ // Gets the user supplied value.
+ const char* value() const {
+ return value_.c_str();
+ }
+
+ // Sets a new value, overriding the one supplied in the constructor.
+ void SetValue(const std::string& new_value) {
+ value_ = new_value;
+ }
+
+ private:
+ // The key supplied by the user.
+ std::string key_;
+ // The value supplied by the user.
+ std::string value_;
+};
+
+// The result of a single Test. This includes a list of
+// TestPartResults, a list of TestProperties, a count of how many
+// death tests there are in the Test, and how much time it took to run
+// the Test.
+//
+// TestResult is not copyable.
+class GTEST_API_ TestResult {
+ public:
+ // Creates an empty TestResult.
+ TestResult();
+
+ // D'tor. Do not inherit from TestResult.
+ ~TestResult();
+
+ // Gets the number of all test parts. This is the sum of the number
+ // of successful test parts and the number of failed test parts.
+ int total_part_count() const;
+
+ // Returns the number of the test properties.
+ int test_property_count() const;
+
+ // Returns true if and only if the test passed (i.e. no test part failed).
+ bool Passed() const { return !Skipped() && !Failed(); }
+
+ // Returns true if and only if the test was skipped.
+ bool Skipped() const;
+
+ // Returns true if and only if the test failed.
+ bool Failed() const;
+
+ // Returns true if and only if the test fatally failed.
+ bool HasFatalFailure() const;
+
+ // Returns true if and only if the test has a non-fatal failure.
+ bool HasNonfatalFailure() const;
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Gets the time of the test case start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const { return start_timestamp_; }
+
+ // Returns the i-th test part result among all the results. i can range from 0
+ // to total_part_count() - 1. If i is not in that range, aborts the program.
+ const TestPartResult& GetTestPartResult(int i) const;
+
+ // Returns the i-th test property. i can range from 0 to
+ // test_property_count() - 1. If i is not in that range, aborts the
+ // program.
+ const TestProperty& GetTestProperty(int i) const;
+
+ private:
+ friend class TestInfo;
+ friend class TestSuite;
+ friend class UnitTest;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::ExecDeathTest;
+ friend class internal::TestResultAccessor;
+ friend class internal::UnitTestImpl;
+ friend class internal::WindowsDeathTest;
+ friend class internal::FuchsiaDeathTest;
+
+ // Gets the vector of TestPartResults.
+ const std::vector<TestPartResult>& test_part_results() const {
+ return test_part_results_;
+ }
+
+ // Gets the vector of TestProperties.
+ const std::vector<TestProperty>& test_properties() const {
+ return test_properties_;
+ }
+
+ // Sets the start time.
+ void set_start_timestamp(TimeInMillis start) { start_timestamp_ = start; }
+
+ // Sets the elapsed time.
+ void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
+
+ // Adds a test property to the list. The property is validated and may add
+ // a non-fatal failure if invalid (e.g., if it conflicts with reserved
+ // key names). If a property is already recorded for the same key, the
+ // value will be updated, rather than storing multiple values for the same
+ // key. xml_element specifies the element for which the property is being
+ // recorded and is used for validation.
+ void RecordProperty(const std::string& xml_element,
+ const TestProperty& test_property);
+
+ // Adds a failure if the key is a reserved attribute of Google Test
+ // testsuite tags. Returns true if the property is valid.
+ // FIXME: Validate attribute names are legal and human readable.
+ static bool ValidateTestProperty(const std::string& xml_element,
+ const TestProperty& test_property);
+
+ // Adds a test part result to the list.
+ void AddTestPartResult(const TestPartResult& test_part_result);
+
+ // Returns the death test count.
+ int death_test_count() const { return death_test_count_; }
+
+ // Increments the death test count, returning the new count.
+ int increment_death_test_count() { return ++death_test_count_; }
+
+ // Clears the test part results.
+ void ClearTestPartResults();
+
+ // Clears the object.
+ void Clear();
+
+ // Protects mutable state of the property vector and of owned
+ // properties, whose values may be updated.
+ internal::Mutex test_properties_mutex_;
+
+ // The vector of TestPartResults
+ std::vector<TestPartResult> test_part_results_;
+ // The vector of TestProperties
+ std::vector<TestProperty> test_properties_;
+ // Running count of death tests.
+ int death_test_count_;
+ // The start time, in milliseconds since UNIX Epoch.
+ TimeInMillis start_timestamp_;
+ // The elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestResult.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
+}; // class TestResult
+
+// A TestInfo object stores the following information about a test:
+//
+// Test suite name
+// Test name
+// Whether the test should be run
+// A function pointer that creates the test object when invoked
+// Test result
+//
+// The constructor of TestInfo registers itself with the UnitTest
+// singleton such that the RUN_ALL_TESTS() macro knows which tests to
+// run.
+class GTEST_API_ TestInfo {
+ public:
+ // Destructs a TestInfo object. This function is not virtual, so
+ // don't inherit from TestInfo.
+ ~TestInfo();
+
+ // Returns the test suite name.
+ const char* test_suite_name() const { return test_suite_name_.c_str(); }
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ const char* test_case_name() const { return test_suite_name(); }
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Returns the test name.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a typed
+ // or a type-parameterized test.
+ const char* type_param() const {
+ if (type_param_.get() != nullptr) return type_param_->c_str();
+ return nullptr;
+ }
+
+ // Returns the text representation of the value parameter, or NULL if this
+ // is not a value-parameterized test.
+ const char* value_param() const {
+ if (value_param_.get() != nullptr) return value_param_->c_str();
+ return nullptr;
+ }
+
+ // Returns the file name where this test is defined.
+ const char* file() const { return location_.file.c_str(); }
+
+ // Returns the line where this test is defined.
+ int line() const { return location_.line; }
+
+ // Return true if this test should not be run because it's in another shard.
+ bool is_in_another_shard() const { return is_in_another_shard_; }
+
+ // Returns true if this test should run, that is if the test is not
+ // disabled (or it is disabled but the also_run_disabled_tests flag has
+ // been specified) and its full name matches the user-specified filter.
+ //
+ // Google Test allows the user to filter the tests by their full names.
+ // The full name of a test Bar in test suite Foo is defined as
+ // "Foo.Bar". Only the tests that match the filter will run.
+ //
+ // A filter is a colon-separated list of glob (not regex) patterns,
+ // optionally followed by a '-' and a colon-separated list of
+ // negative patterns (tests to exclude). A test is run if it
+ // matches one of the positive patterns and does not match any of
+ // the negative patterns.
+ //
+ // For example, *A*:Foo.* is a filter that matches any string that
+ // contains the character 'A' or starts with "Foo.".
+ bool should_run() const { return should_run_; }
+
+ // Returns true if and only if this test will appear in the XML report.
+ bool is_reportable() const {
+ // The XML report includes tests matching the filter, excluding those
+ // run in other shards.
+ return matches_filter_ && !is_in_another_shard_;
+ }
+
+ // Returns the result of the test.
+ const TestResult* result() const { return &result_; }
+
+ private:
+#if GTEST_HAS_DEATH_TEST
+ friend class internal::DefaultDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+ friend class Test;
+ friend class TestSuite;
+ friend class internal::UnitTestImpl;
+ friend class internal::StreamingListenerTest;
+ friend TestInfo* internal::MakeAndRegisterTestInfo(
+ const char* test_suite_name, const char* name, const char* type_param,
+ const char* value_param, internal::CodeLocation code_location,
+ internal::TypeId fixture_class_id, internal::SetUpTestSuiteFunc set_up_tc,
+ internal::TearDownTestSuiteFunc tear_down_tc,
+ internal::TestFactoryBase* factory);
+
+ // Constructs a TestInfo object. The newly constructed instance assumes
+ // ownership of the factory object.
+ TestInfo(const std::string& test_suite_name, const std::string& name,
+ const char* a_type_param, // NULL if not a type-parameterized test
+ const char* a_value_param, // NULL if not a value-parameterized test
+ internal::CodeLocation a_code_location,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory);
+
+ // Increments the number of death tests encountered in this test so
+ // far.
+ int increment_death_test_count() {
+ return result_.increment_death_test_count();
+ }
+
+ // Creates the test object, runs it, records its result, and then
+ // deletes it.
+ void Run();
+
+ // Skip and records the test result for this object.
+ void Skip();
+
+ static void ClearTestResult(TestInfo* test_info) {
+ test_info->result_.Clear();
+ }
+
+ // These fields are immutable properties of the test.
+ const std::string test_suite_name_; // test suite name
+ const std::string name_; // Test name
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const std::unique_ptr<const ::std::string> type_param_;
+ // Text representation of the value parameter, or NULL if this is not a
+ // value-parameterized test.
+ const std::unique_ptr<const ::std::string> value_param_;
+ internal::CodeLocation location_;
+ const internal::TypeId fixture_class_id_; // ID of the test fixture class
+ bool should_run_; // True if and only if this test should run
+ bool is_disabled_; // True if and only if this test is disabled
+ bool matches_filter_; // True if this test matches the
+ // user-specified filter.
+ bool is_in_another_shard_; // Will be run in another shard.
+ internal::TestFactoryBase* const factory_; // The factory that creates
+ // the test object
+
+ // This field is mutable and needs to be reset before running the
+ // test for the second time.
+ TestResult result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
+};
+
+// A test suite, which consists of a vector of TestInfos.
+//
+// TestSuite is not copyable.
+class GTEST_API_ TestSuite {
+ public:
+ // Creates a TestSuite with the given name.
+ //
+ // TestSuite does NOT have a default constructor. Always use this
+ // constructor to create a TestSuite object.
+ //
+ // Arguments:
+ //
+ // name: name of the test suite
+ // a_type_param: the name of the test's type parameter, or NULL if
+ // this is not a type-parameterized test.
+ // set_up_tc: pointer to the function that sets up the test suite
+ // tear_down_tc: pointer to the function that tears down the test suite
+ TestSuite(const char* name, const char* a_type_param,
+ internal::SetUpTestSuiteFunc set_up_tc,
+ internal::TearDownTestSuiteFunc tear_down_tc);
+
+ // Destructor of TestSuite.
+ virtual ~TestSuite();
+
+ // Gets the name of the TestSuite.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a
+ // type-parameterized test suite.
+ const char* type_param() const {
+ if (type_param_.get() != nullptr) return type_param_->c_str();
+ return nullptr;
+ }
+
+ // Returns true if any test in this test suite should run.
+ bool should_run() const { return should_run_; }
+
+ // Gets the number of successful tests in this test suite.
+ int successful_test_count() const;
+
+ // Gets the number of skipped tests in this test suite.
+ int skipped_test_count() const;
+
+ // Gets the number of failed tests in this test suite.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests in this test suite.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Get the number of tests in this test suite that should run.
+ int test_to_run_count() const;
+
+ // Gets the number of all tests in this test suite.
+ int total_test_count() const;
+
+ // Returns true if and only if the test suite passed.
+ bool Passed() const { return !Failed(); }
+
+ // Returns true if and only if the test suite failed.
+ bool Failed() const {
+ return failed_test_count() > 0 || ad_hoc_test_result().Failed();
+ }
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Gets the time of the test suite start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const { return start_timestamp_; }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ const TestInfo* GetTestInfo(int i) const;
+
+ // Returns the TestResult that holds test properties recorded during
+ // execution of SetUpTestSuite and TearDownTestSuite.
+ const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; }
+
+ private:
+ friend class Test;
+ friend class internal::UnitTestImpl;
+
+ // Gets the (mutable) vector of TestInfos in this TestSuite.
+ std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
+
+ // Gets the (immutable) vector of TestInfos in this TestSuite.
+ const std::vector<TestInfo*>& test_info_list() const {
+ return test_info_list_;
+ }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ TestInfo* GetMutableTestInfo(int i);
+
+ // Sets the should_run member.
+ void set_should_run(bool should) { should_run_ = should; }
+
+ // Adds a TestInfo to this test suite. Will delete the TestInfo upon
+ // destruction of the TestSuite object.
+ void AddTestInfo(TestInfo * test_info);
+
+ // Clears the results of all tests in this test suite.
+ void ClearResult();
+
+ // Clears the results of all tests in the given test suite.
+ static void ClearTestSuiteResult(TestSuite* test_suite) {
+ test_suite->ClearResult();
+ }
+
+ // Runs every test in this TestSuite.
+ void Run();
+
+ // Skips the execution of tests under this TestSuite
+ void Skip();
+
+ // Runs SetUpTestSuite() for this TestSuite. This wrapper is needed
+ // for catching exceptions thrown from SetUpTestSuite().
+ void RunSetUpTestSuite() {
+ if (set_up_tc_ != nullptr) {
+ (*set_up_tc_)();
+ }
+ }
+
+ // Runs TearDownTestSuite() for this TestSuite. This wrapper is
+ // needed for catching exceptions thrown from TearDownTestSuite().
+ void RunTearDownTestSuite() {
+ if (tear_down_tc_ != nullptr) {
+ (*tear_down_tc_)();
+ }
+ }
+
+ // Returns true if and only if test passed.
+ static bool TestPassed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Passed();
+ }
+
+ // Returns true if and only if test skipped.
+ static bool TestSkipped(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Skipped();
+ }
+
+ // Returns true if and only if test failed.
+ static bool TestFailed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Failed();
+ }
+
+ // Returns true if and only if the test is disabled and will be reported in
+ // the XML report.
+ static bool TestReportableDisabled(const TestInfo* test_info) {
+ return test_info->is_reportable() && test_info->is_disabled_;
+ }
+
+ // Returns true if and only if test is disabled.
+ static bool TestDisabled(const TestInfo* test_info) {
+ return test_info->is_disabled_;
+ }
+
+ // Returns true if and only if this test will appear in the XML report.
+ static bool TestReportable(const TestInfo* test_info) {
+ return test_info->is_reportable();
+ }
+
+ // Returns true if the given test should run.
+ static bool ShouldRunTest(const TestInfo* test_info) {
+ return test_info->should_run();
+ }
+
+ // Shuffles the tests in this test suite.
+ void ShuffleTests(internal::Random* random);
+
+ // Restores the test order to before the first shuffle.
+ void UnshuffleTests();
+
+ // Name of the test suite.
+ std::string name_;
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const std::unique_ptr<const ::std::string> type_param_;
+ // The vector of TestInfos in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestInfo*> test_info_list_;
+ // Provides a level of indirection for the test list to allow easy
+ // shuffling and restoring the test order. The i-th element in this
+ // vector is the index of the i-th test in the shuffled test list.
+ std::vector<int> test_indices_;
+ // Pointer to the function that sets up the test suite.
+ internal::SetUpTestSuiteFunc set_up_tc_;
+ // Pointer to the function that tears down the test suite.
+ internal::TearDownTestSuiteFunc tear_down_tc_;
+ // True if and only if any test in this test suite should run.
+ bool should_run_;
+ // The start time, in milliseconds since UNIX Epoch.
+ TimeInMillis start_timestamp_;
+ // Elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+ // Holds test properties recorded during execution of SetUpTestSuite and
+ // TearDownTestSuite.
+ TestResult ad_hoc_test_result_;
+
+ // We disallow copying TestSuites.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestSuite);
+};
+
+// An Environment object is capable of setting up and tearing down an
+// environment. You should subclass this to define your own
+// environment(s).
+//
+// An Environment object does the set-up and tear-down in virtual
+// methods SetUp() and TearDown() instead of the constructor and the
+// destructor, as:
+//
+// 1. You cannot safely throw from a destructor. This is a problem
+// as in some cases Google Test is used where exceptions are enabled, and
+// we may want to implement ASSERT_* using exceptions where they are
+// available.
+// 2. You cannot use ASSERT_* directly in a constructor or
+// destructor.
+class Environment {
+ public:
+ // The d'tor is virtual as we need to subclass Environment.
+ virtual ~Environment() {}
+
+ // Override this to define how to set up the environment.
+ virtual void SetUp() {}
+
+ // Override this to define how to tear down the environment.
+ virtual void TearDown() {}
+ private:
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return nullptr; }
+};
+
+#if GTEST_HAS_EXCEPTIONS
+
+// Exception which can be thrown from TestEventListener::OnTestPartResult.
+class GTEST_API_ AssertionException
+ : public internal::GoogleTestFailureException {
+ public:
+ explicit AssertionException(const TestPartResult& result)
+ : GoogleTestFailureException(result) {}
+};
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+// The interface for tracing execution of tests. The methods are organized in
+// the order the corresponding events are fired.
+class TestEventListener {
+ public:
+ virtual ~TestEventListener() {}
+
+ // Fired before any test activity starts.
+ virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
+
+ // Fired before each iteration of tests starts. There may be more than
+ // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
+ // index, starting from 0.
+ virtual void OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired before environment set-up for each iteration of tests starts.
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment set-up for each iteration of tests ends.
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
+
+ // Fired before the test suite starts.
+ virtual void OnTestSuiteStart(const TestSuite& /*test_suite*/) {}
+
+ // Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Fired before the test starts.
+ virtual void OnTestStart(const TestInfo& test_info) = 0;
+
+ // Fired after a failed assertion or a SUCCEED() invocation.
+ // If you want to throw an exception from this function to skip to the next
+ // TEST, it must be AssertionException defined above, or inherited from it.
+ virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
+
+ // Fired after the test ends.
+ virtual void OnTestEnd(const TestInfo& test_info) = 0;
+
+ // Fired after the test suite ends.
+ virtual void OnTestSuiteEnd(const TestSuite& /*test_suite*/) {}
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Fired before environment tear-down for each iteration of tests starts.
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment tear-down for each iteration of tests ends.
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
+
+ // Fired after each iteration of tests finishes.
+ virtual void OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired after all test activities have ended.
+ virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
+};
+
+// The convenience class for users who need to override just one or two
+// methods and are not concerned that a possible change to a signature of
+// the methods they override will not be caught during the build. For
+// comments about each method please see the definition of TestEventListener
+// above.
+class EmptyTestEventListener : public TestEventListener {
+ public:
+ void OnTestProgramStart(const UnitTest& /*unit_test*/) override {}
+ void OnTestIterationStart(const UnitTest& /*unit_test*/,
+ int /*iteration*/) override {}
+ void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) override {}
+ void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {}
+ void OnTestSuiteStart(const TestSuite& /*test_suite*/) override {}
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseStart(const TestCase& /*test_case*/) override {}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ void OnTestStart(const TestInfo& /*test_info*/) override {}
+ void OnTestPartResult(const TestPartResult& /*test_part_result*/) override {}
+ void OnTestEnd(const TestInfo& /*test_info*/) override {}
+ void OnTestSuiteEnd(const TestSuite& /*test_suite*/) override {}
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ void OnTestCaseEnd(const TestCase& /*test_case*/) override {}
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) override {}
+ void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {}
+ void OnTestIterationEnd(const UnitTest& /*unit_test*/,
+ int /*iteration*/) override {}
+ void OnTestProgramEnd(const UnitTest& /*unit_test*/) override {}
+};
+
+// TestEventListeners lets users add listeners to track events in Google Test.
+class GTEST_API_ TestEventListeners {
+ public:
+ TestEventListeners();
+ ~TestEventListeners();
+
+ // Appends an event listener to the end of the list. Google Test assumes
+ // the ownership of the listener (i.e. it will delete the listener when
+ // the test program finishes).
+ void Append(TestEventListener* listener);
+
+ // Removes the given event listener from the list and returns it. It then
+ // becomes the caller's responsibility to delete the listener. Returns
+ // NULL if the listener is not found in the list.
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Returns the standard listener responsible for the default console
+ // output. Can be removed from the listeners list to shut down default
+ // console output. Note that removing this object from the listener list
+ // with Release transfers its ownership to the caller and makes this
+ // function return NULL the next time.
+ TestEventListener* default_result_printer() const {
+ return default_result_printer_;
+ }
+
+ // Returns the standard listener responsible for the default XML output
+ // controlled by the --gtest_output=xml flag. Can be removed from the
+ // listeners list by users who want to shut down the default XML output
+ // controlled by this flag and substitute it with custom one. Note that
+ // removing this object from the listener list with Release transfers its
+ // ownership to the caller and makes this function return NULL the next
+ // time.
+ TestEventListener* default_xml_generator() const {
+ return default_xml_generator_;
+ }
+
+ private:
+ friend class TestSuite;
+ friend class TestInfo;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::NoExecDeathTest;
+ friend class internal::TestEventListenersAccessor;
+ friend class internal::UnitTestImpl;
+
+ // Returns repeater that broadcasts the TestEventListener events to all
+ // subscribers.
+ TestEventListener* repeater();
+
+ // Sets the default_result_printer attribute to the provided listener.
+ // The listener is also added to the listener list and previous
+ // default_result_printer is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultResultPrinter(TestEventListener* listener);
+
+ // Sets the default_xml_generator attribute to the provided listener. The
+ // listener is also added to the listener list and previous
+ // default_xml_generator is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultXmlGenerator(TestEventListener* listener);
+
+ // Controls whether events will be forwarded by the repeater to the
+ // listeners in the list.
+ bool EventForwardingEnabled() const;
+ void SuppressEventForwarding();
+
+ // The actual list of listeners.
+ internal::TestEventRepeater* repeater_;
+ // Listener responsible for the standard result output.
+ TestEventListener* default_result_printer_;
+ // Listener responsible for the creation of the XML output file.
+ TestEventListener* default_xml_generator_;
+
+ // We disallow copying TestEventListeners.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
+};
+
+// A UnitTest consists of a vector of TestSuites.
+//
+// This is a singleton class. The only instance of UnitTest is
+// created when UnitTest::GetInstance() is first called. This
+// instance is never deleted.
+//
+// UnitTest is not copyable.
+//
+// This class is thread-safe as long as the methods are called
+// according to their specification.
+class GTEST_API_ UnitTest {
+ public:
+ // Gets the singleton UnitTest object. The first time this method
+ // is called, a UnitTest object is constructed and returned.
+ // Consecutive calls will return the same object.
+ static UnitTest* GetInstance();
+
+ // Runs all tests in this UnitTest object and prints the result.
+ // Returns 0 if successful, or 1 otherwise.
+ //
+ // This method can only be called from the main thread.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ int Run() GTEST_MUST_USE_RESULT_;
+
+ // Returns the working directory when the first TEST() or TEST_F()
+ // was executed. The UnitTest object owns the string.
+ const char* original_working_dir() const;
+
+ // Returns the TestSuite object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestSuite* current_test_suite() const GTEST_LOCK_EXCLUDED_(mutex_);
+
+// Legacy API is still available but deprecated
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ const TestCase* current_test_case() const GTEST_LOCK_EXCLUDED_(mutex_);
+#endif
+
+ // Returns the TestInfo object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestInfo* current_test_info() const
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Returns the random seed used at the start of the current test run.
+ int random_seed() const;
+
+ // Returns the ParameterizedTestSuiteRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ internal::ParameterizedTestSuiteRegistry& parameterized_test_registry()
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Gets the number of successful test suites.
+ int successful_test_suite_count() const;
+
+ // Gets the number of failed test suites.
+ int failed_test_suite_count() const;
+
+ // Gets the number of all test suites.
+ int total_test_suite_count() const;
+
+ // Gets the number of all test suites that contain at least one test
+ // that should run.
+ int test_suite_to_run_count() const;
+
+ // Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ int successful_test_case_count() const;
+ int failed_test_case_count() const;
+ int total_test_case_count() const;
+ int test_case_to_run_count() const;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of skipped tests.
+ int skipped_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the time of the test program start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const;
+
+ // Returns true if and only if the unit test passed (i.e. all test suites
+ // passed).
+ bool Passed() const;
+
+ // Returns true if and only if the unit test failed (i.e. some test suite
+ // failed or something outside of all tests failed).
+ bool Failed() const;
+
+ // Gets the i-th test suite among all the test suites. i can range from 0 to
+ // total_test_suite_count() - 1. If i is not in that range, returns NULL.
+ const TestSuite* GetTestSuite(int i) const;
+
+// Legacy API is deprecated but still available
+#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+ const TestCase* GetTestCase(int i) const;
+#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
+
+ // Returns the TestResult containing information on test failures and
+ // properties logged outside of individual test suites.
+ const TestResult& ad_hoc_test_result() const;
+
+ // Returns the list of event listeners that can be used to track events
+ // inside Google Test.
+ TestEventListeners& listeners();
+
+ private:
+ // Registers and returns a global test environment. When a test
+ // program is run, all global test environments will be set-up in
+ // the order they were registered. After all tests in the program
+ // have finished, all global test environments will be torn-down in
+ // the *reverse* order they were registered.
+ //
+ // The UnitTest object takes ownership of the given environment.
+ //
+ // This method can only be called from the main thread.
+ Environment* AddEnvironment(Environment* env);
+
+ // Adds a TestPartResult to the current TestResult object. All
+ // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
+ // eventually call this to report their results. The user code
+ // should use the assertion macros instead of calling this directly.
+ void AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const std::string& message,
+ const std::string& os_stack_trace)
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Adds a TestProperty to the current TestResult object when invoked from
+ // inside a test, to current TestSuite's ad_hoc_test_result_ when invoked
+ // from SetUpTestSuite or TearDownTestSuite, or to the global property set
+ // when invoked elsewhere. If the result already contains a property with
+ // the same key, the value will be updated.
+ void RecordProperty(const std::string& key, const std::string& value);
+
+ // Gets the i-th test suite among all the test suites. i can range from 0 to
+ // total_test_suite_count() - 1. If i is not in that range, returns NULL.
+ TestSuite* GetMutableTestSuite(int i);
+
+ // Accessors for the implementation object.
+ internal::UnitTestImpl* impl() { return impl_; }
+ const internal::UnitTestImpl* impl() const { return impl_; }
+
+ // These classes and functions are friends as they need to access private
+ // members of UnitTest.
+ friend class ScopedTrace;
+ friend class Test;
+ friend class internal::AssertHelper;
+ friend class internal::StreamingListenerTest;
+ friend class internal::UnitTestRecordPropertyTestHelper;
+ friend Environment* AddGlobalTestEnvironment(Environment* env);
+ friend std::set<std::string>* internal::GetIgnoredParameterizedTestSuites();
+ friend internal::UnitTestImpl* internal::GetUnitTestImpl();
+ friend void internal::ReportFailureInUnknownLocation(
+ TestPartResult::Type result_type,
+ const std::string& message);
+
+ // Creates an empty UnitTest.
+ UnitTest();
+
+ // D'tor
+ virtual ~UnitTest();
+
+ // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+ // Google Test trace stack.
+ void PushGTestTrace(const internal::TraceInfo& trace)
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Pops a trace from the per-thread Google Test trace stack.
+ void PopGTestTrace()
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Protects mutable state in *impl_. This is mutable as some const
+ // methods need to lock it too.
+ mutable internal::Mutex mutex_;
+
+ // Opaque implementation object. This field is never changed once
+ // the object is constructed. We don't mark it as const here, as
+ // doing so will cause a warning in the constructor of UnitTest.
+ // Mutable state in *impl_ is protected by mutex_.
+ internal::UnitTestImpl* impl_;
+
+ // We disallow copying UnitTest.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
+};
+
+// A convenient wrapper for adding an environment for the test
+// program.
+//
+// You should call this before RUN_ALL_TESTS() is called, probably in
+// main(). If you use gtest_main, you need to call this before main()
+// starts for it to take effect. For example, you can define a global
+// variable like this:
+//
+// testing::Environment* const foo_env =
+// testing::AddGlobalTestEnvironment(new FooEnvironment);
+//
+// However, we strongly recommend you to write your own main() and
+// call AddGlobalTestEnvironment() there, as relying on initialization
+// of global variables makes the code harder to read and may cause
+// problems when you register multiple environments from different
+// translation units and the environments have dependencies among them
+// (remember that the compiler doesn't guarantee the order in which
+// global variables from different translation units are initialized).
+inline Environment* AddGlobalTestEnvironment(Environment* env) {
+ return UnitTest::GetInstance()->AddEnvironment(env);
+}
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+GTEST_API_ void InitGoogleTest(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
+
+// This overloaded version can be used on Arduino/embedded platforms where
+// there is no argc/argv.
+GTEST_API_ void InitGoogleTest();
+
+namespace internal {
+
+// Separate the error generating code from the code path to reduce the stack
+// frame size of CmpHelperEQ. This helps reduce the overhead of some sanitizers
+// when calling EXPECT_* in a tight loop.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQFailure(const char* lhs_expression,
+ const char* rhs_expression,
+ const T1& lhs, const T2& rhs) {
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ FormatForComparisonFailureMessage(lhs, rhs),
+ FormatForComparisonFailureMessage(rhs, lhs),
+ false);
+}
+
+// This block of code defines operator==/!=
+// to block lexical scope lookup.
+// It prevents using invalid operator==/!= defined at namespace scope.
+struct faketype {};
+inline bool operator==(faketype, faketype) { return true; }
+inline bool operator!=(faketype, faketype) { return false; }
+
+// The helper function for {ASSERT|EXPECT}_EQ.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const T1& lhs,
+ const T2& rhs) {
+ if (lhs == rhs) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+class EqHelper {
+ public:
+ // This templatized version is for the general case.
+ template <
+ typename T1, typename T2,
+ // Disable this overload for cases where one argument is a pointer
+ // and the other is the null pointer constant.
+ typename std::enable_if<!std::is_integral<T1>::value ||
+ !std::is_pointer<T2>::value>::type* = nullptr>
+ static AssertionResult Compare(const char* lhs_expression,
+ const char* rhs_expression, const T1& lhs,
+ const T2& rhs) {
+ return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+ }
+
+ // With this overloaded version, we allow anonymous enums to be used
+ // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
+ // enums can be implicitly cast to BiggestInt.
+ //
+ // Even though its body looks the same as the above version, we
+ // cannot merge the two, as it will make anonymous enums unhappy.
+ static AssertionResult Compare(const char* lhs_expression,
+ const char* rhs_expression,
+ BiggestInt lhs,
+ BiggestInt rhs) {
+ return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+ }
+
+ template <typename T>
+ static AssertionResult Compare(
+ const char* lhs_expression, const char* rhs_expression,
+ // Handle cases where '0' is used as a null pointer literal.
+ std::nullptr_t /* lhs */, T* rhs) {
+ // We already know that 'lhs' is a null pointer.
+ return CmpHelperEQ(lhs_expression, rhs_expression, static_cast<T*>(nullptr),
+ rhs);
+ }
+};
+
+// Separate the error generating code from the code path to reduce the stack
+// frame size of CmpHelperOP. This helps reduce the overhead of some sanitizers
+// when calling EXPECT_OP in a tight loop.
+template <typename T1, typename T2>
+AssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2,
+ const T1& val1, const T2& val2,
+ const char* op) {
+ return AssertionFailure()
+ << "Expected: (" << expr1 << ") " << op << " (" << expr2
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);
+}
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste
+// of similar code.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+template <typename T1, typename T2>\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ const T1& val1, const T2& val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\
+ }\
+}
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// Implements the helper function for {ASSERT|EXPECT}_NE
+GTEST_IMPL_CMP_HELPER_(NE, !=)
+// Implements the helper function for {ASSERT|EXPECT}_LE
+GTEST_IMPL_CMP_HELPER_(LE, <=)
+// Implements the helper function for {ASSERT|EXPECT}_LT
+GTEST_IMPL_CMP_HELPER_(LT, <)
+// Implements the helper function for {ASSERT|EXPECT}_GE
+GTEST_IMPL_CMP_HELPER_(GE, >=)
+// Implements the helper function for {ASSERT|EXPECT}_GT
+GTEST_IMPL_CMP_HELPER_(GT, >)
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+
+// Helper function for *_STREQ on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
+
+// Helper function for *_STRNE on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
+
+} // namespace internal
+
+// IsSubstring() and IsNotSubstring() are intended to be used as the
+// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
+// themselves. They check whether needle is a substring of haystack
+// (NULL is considered a substring of itself only), and return an
+// appropriate error message when they fail.
+//
+// The {needle,haystack}_expr arguments are the stringified
+// expressions that generated the two real arguments.
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+// Helper template function for comparing floating-points.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename RawType>
+AssertionResult CmpHelperFloatingPointEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ RawType lhs_value,
+ RawType rhs_value) {
+ const FloatingPoint<RawType> lhs(lhs_value), rhs(rhs_value);
+
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ ::std::stringstream lhs_ss;
+ lhs_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << lhs_value;
+
+ ::std::stringstream rhs_ss;
+ rhs_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << rhs_value;
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ StringStreamToString(&lhs_ss),
+ StringStreamToString(&rhs_ss),
+ false);
+}
+
+// Helper function for implementing ASSERT_NEAR.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error);
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+// A class that enables one to stream messages to assertion macros
+class GTEST_API_ AssertHelper {
+ public:
+ // Constructor.
+ AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message);
+ ~AssertHelper();
+
+ // Message assignment is a semantic trick to enable assertion
+ // streaming; see the GTEST_MESSAGE_ macro below.
+ void operator=(const Message& message) const;
+
+ private:
+ // We put our data in a struct so that the size of the AssertHelper class can
+ // be as small as possible. This is important because gcc is incapable of
+ // re-using stack space even for temporary variables, so every EXPECT_EQ
+ // reserves stack space for another AssertHelper.
+ struct AssertHelperData {
+ AssertHelperData(TestPartResult::Type t,
+ const char* srcfile,
+ int line_num,
+ const char* msg)
+ : type(t), file(srcfile), line(line_num), message(msg) { }
+
+ TestPartResult::Type const type;
+ const char* const file;
+ int const line;
+ std::string const message;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
+ };
+
+ AssertHelperData* const data_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
+};
+
+} // namespace internal
+
+// The pure interface class that all value-parameterized tests inherit from.
+// A value-parameterized class must inherit from both ::testing::Test and
+// ::testing::WithParamInterface. In most cases that just means inheriting
+// from ::testing::TestWithParam, but more complicated test hierarchies
+// may need to inherit from Test and WithParamInterface at different levels.
+//
+// This interface has support for accessing the test parameter value via
+// the GetParam() method.
+//
+// Use it with one of the parameter generator defining functions, like Range(),
+// Values(), ValuesIn(), Bool(), and Combine().
+//
+// class FooTest : public ::testing::TestWithParam<int> {
+// protected:
+// FooTest() {
+// // Can use GetParam() here.
+// }
+// ~FooTest() override {
+// // Can use GetParam() here.
+// }
+// void SetUp() override {
+// // Can use GetParam() here.
+// }
+// void TearDown override {
+// // Can use GetParam() here.
+// }
+// };
+// TEST_P(FooTest, DoesBar) {
+// // Can use GetParam() method here.
+// Foo foo;
+// ASSERT_TRUE(foo.DoesBar(GetParam()));
+// }
+// INSTANTIATE_TEST_SUITE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
+
+template <typename T>
+class WithParamInterface {
+ public:
+ typedef T ParamType;
+ virtual ~WithParamInterface() {}
+
+ // The current parameter value. Is also available in the test fixture's
+ // constructor.
+ static const ParamType& GetParam() {
+ GTEST_CHECK_(parameter_ != nullptr)
+ << "GetParam() can only be called inside a value-parameterized test "
+ << "-- did you intend to write TEST_P instead of TEST_F?";
+ return *parameter_;
+ }
+
+ private:
+ // Sets parameter value. The caller is responsible for making sure the value
+ // remains alive and unchanged throughout the current test.
+ static void SetParam(const ParamType* parameter) {
+ parameter_ = parameter;
+ }
+
+ // Static value used for accessing parameter during a test lifetime.
+ static const ParamType* parameter_;
+
+ // TestClass must be a subclass of WithParamInterface<T> and Test.
+ template <class TestClass> friend class internal::ParameterizedTestFactory;
+};
+
+template <typename T>
+const T* WithParamInterface<T>::parameter_ = nullptr;
+
+// Most value-parameterized classes can ignore the existence of
+// WithParamInterface, and can just inherit from ::testing::TestWithParam.
+
+template <typename T>
+class TestWithParam : public Test, public WithParamInterface<T> {
+};
+
+// Macros for indicating success/failure in test code.
+
+// Skips test in runtime.
+// Skipping test aborts current function.
+// Skipped tests are neither successful nor failed.
+#define GTEST_SKIP() GTEST_SKIP_("")
+
+// ADD_FAILURE unconditionally adds a failure to the current test.
+// SUCCEED generates a success - it doesn't automatically make the
+// current test successful, as a test is only successful when it has
+// no failure.
+//
+// EXPECT_* verifies that a certain condition is satisfied. If not,
+// it behaves like ADD_FAILURE. In particular:
+//
+// EXPECT_TRUE verifies that a Boolean condition is true.
+// EXPECT_FALSE verifies that a Boolean condition is false.
+//
+// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
+// that they will also abort the current function on failure. People
+// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
+// writing data-driven tests often find themselves using ADD_FAILURE
+// and EXPECT_* more.
+
+// Generates a nonfatal failure with a generic message.
+#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
+
+// Generates a nonfatal failure at the given source file location with
+// a generic message.
+#define ADD_FAILURE_AT(file, line) \
+ GTEST_MESSAGE_AT_(file, line, "Failed", \
+ ::testing::TestPartResult::kNonFatalFailure)
+
+// Generates a fatal failure with a generic message.
+#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
+
+// Like GTEST_FAIL(), but at the given source file location.
+#define GTEST_FAIL_AT(file, line) \
+ GTEST_MESSAGE_AT_(file, line, "Failed", \
+ ::testing::TestPartResult::kFatalFailure)
+
+// Define this macro to 1 to omit the definition of FAIL(), which is a
+// generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_FAIL
+# define FAIL() GTEST_FAIL()
+#endif
+
+// Generates a success with a generic message.
+#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
+
+// Define this macro to 1 to omit the definition of SUCCEED(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_SUCCEED
+# define SUCCEED() GTEST_SUCCEED()
+#endif
+
+// Macros for testing exceptions.
+//
+// * {ASSERT|EXPECT}_THROW(statement, expected_exception):
+// Tests that the statement throws the expected exception.
+// * {ASSERT|EXPECT}_NO_THROW(statement):
+// Tests that the statement doesn't throw any exception.
+// * {ASSERT|EXPECT}_ANY_THROW(statement):
+// Tests that the statement throws an exception.
+
+#define EXPECT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
+#define ASSERT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
+#define ASSERT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
+
+// Boolean assertions. Condition can be either a Boolean expression or an
+// AssertionResult. For more information on how to use AssertionResult with
+// these macros see comments on that class.
+#define GTEST_EXPECT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_NONFATAL_FAILURE_)
+#define GTEST_EXPECT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_NONFATAL_FAILURE_)
+#define GTEST_ASSERT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_FATAL_FAILURE_)
+#define GTEST_ASSERT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_FATAL_FAILURE_)
+
+// Define these macros to 1 to omit the definition of the corresponding
+// EXPECT or ASSERT, which clashes with some users' own code.
+
+#if !GTEST_DONT_DEFINE_EXPECT_TRUE
+#define EXPECT_TRUE(condition) GTEST_EXPECT_TRUE(condition)
+#endif
+
+#if !GTEST_DONT_DEFINE_EXPECT_FALSE
+#define EXPECT_FALSE(condition) GTEST_EXPECT_FALSE(condition)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_TRUE
+#define ASSERT_TRUE(condition) GTEST_ASSERT_TRUE(condition)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_FALSE
+#define ASSERT_FALSE(condition) GTEST_ASSERT_FALSE(condition)
+#endif
+
+// Macros for testing equalities and inequalities.
+//
+// * {ASSERT|EXPECT}_EQ(v1, v2): Tests that v1 == v2
+// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2
+// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2
+// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2
+// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2
+// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2
+//
+// When they are not, Google Test prints both the tested expressions and
+// their actual values. The values must be compatible built-in types,
+// or you will get a compiler error. By "compatible" we mean that the
+// values can be compared by the respective operator.
+//
+// Note:
+//
+// 1. It is possible to make a user-defined type work with
+// {ASSERT|EXPECT}_??(), but that requires overloading the
+// comparison operators and is thus discouraged by the Google C++
+// Usage Guide. Therefore, you are advised to use the
+// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
+// equal.
+//
+// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
+// pointers (in particular, C strings). Therefore, if you use it
+// with two C strings, you are testing how their locations in memory
+// are related, not how their content is related. To compare two C
+// strings by content, use {ASSERT|EXPECT}_STR*().
+//
+// 3. {ASSERT|EXPECT}_EQ(v1, v2) is preferred to
+// {ASSERT|EXPECT}_TRUE(v1 == v2), as the former tells you
+// what the actual value is when it fails, and similarly for the
+// other comparisons.
+//
+// 4. Do not depend on the order in which {ASSERT|EXPECT}_??()
+// evaluate their arguments, which is undefined.
+//
+// 5. These macros evaluate their arguments exactly once.
+//
+// Examples:
+//
+// EXPECT_NE(Foo(), 5);
+// EXPECT_EQ(a_pointer, NULL);
+// ASSERT_LT(i, array_size);
+// ASSERT_GT(records.size(), 0) << "There is no record left.";
+
+#define EXPECT_EQ(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::EqHelper::Compare, val1, val2)
+#define EXPECT_NE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define EXPECT_LE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define EXPECT_LT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define EXPECT_GE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define EXPECT_GT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+#define GTEST_ASSERT_EQ(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::EqHelper::Compare, val1, val2)
+#define GTEST_ASSERT_NE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define GTEST_ASSERT_LE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define GTEST_ASSERT_LT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define GTEST_ASSERT_GE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define GTEST_ASSERT_GT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of
+// ASSERT_XY(), which clashes with some users' own code.
+
+#if !GTEST_DONT_DEFINE_ASSERT_EQ
+# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_NE
+# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LE
+# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LT
+# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GE
+# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GT
+# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2)
+#endif
+
+// C-string Comparisons. All tests treat NULL and any non-NULL string
+// as different. Two NULLs are equal.
+//
+// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2
+// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2
+// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
+// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
+//
+// For wide or narrow string objects, you can use the
+// {ASSERT|EXPECT}_??() macros.
+//
+// Don't depend on the order in which the arguments are evaluated,
+// which is undefined.
+//
+// These macros evaluate their arguments exactly once.
+
+#define EXPECT_STREQ(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2)
+#define EXPECT_STRNE(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define EXPECT_STRCASEEQ(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2)
+#define EXPECT_STRCASENE(s1, s2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+#define ASSERT_STREQ(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2)
+#define ASSERT_STRNE(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define ASSERT_STRCASEEQ(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2)
+#define ASSERT_STRCASENE(s1, s2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+// Macros for comparing floating-point numbers.
+//
+// * {ASSERT|EXPECT}_FLOAT_EQ(val1, val2):
+// Tests that two float values are almost equal.
+// * {ASSERT|EXPECT}_DOUBLE_EQ(val1, val2):
+// Tests that two double values are almost equal.
+// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
+// Tests that v1 and v2 are within the given distance to each other.
+//
+// Google Test uses ULP-based comparison to automatically pick a default
+// error bound that is appropriate for the operands. See the
+// FloatingPoint template class in gtest-internal.h if you are
+// interested in the implementation details.
+
+#define EXPECT_FLOAT_EQ(val1, val2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ val1, val2)
+
+#define EXPECT_DOUBLE_EQ(val1, val2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ val1, val2)
+
+#define ASSERT_FLOAT_EQ(val1, val2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ val1, val2)
+
+#define ASSERT_DOUBLE_EQ(val1, val2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ val1, val2)
+
+#define EXPECT_NEAR(val1, val2, abs_error)\
+ EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+#define ASSERT_NEAR(val1, val2, abs_error)\
+ ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+// These predicate format functions work on floating-point values, and
+// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
+//
+// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2);
+GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2);
+
+
+#if GTEST_OS_WINDOWS
+
+// Macros that test for HRESULT failure and success, these are only useful
+// on Windows, and rely on Windows SDK macros and APIs to compile.
+//
+// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
+//
+// When expr unexpectedly fails or succeeds, Google Test prints the
+// expected result and the actual result with both a human-readable
+// string representation of the error, if available, as well as the
+// hex result code.
+# define EXPECT_HRESULT_SUCCEEDED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define ASSERT_HRESULT_SUCCEEDED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define EXPECT_HRESULT_FAILED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+# define ASSERT_HRESULT_FAILED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+#endif // GTEST_OS_WINDOWS
+
+// Macros that execute statement and check that it doesn't generate new fatal
+// failures in the current thread.
+//
+// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
+//
+// Examples:
+//
+// EXPECT_NO_FATAL_FAILURE(Process());
+// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
+//
+#define ASSERT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
+#define EXPECT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
+
+// Causes a trace (including the given source file path and line number,
+// and the given message) to be included in every test failure message generated
+// by code in the scope of the lifetime of an instance of this class. The effect
+// is undone with the destruction of the instance.
+//
+// The message argument can be anything streamable to std::ostream.
+//
+// Example:
+// testing::ScopedTrace trace("file.cc", 123, "message");
+//
+class GTEST_API_ ScopedTrace {
+ public:
+ // The c'tor pushes the given source file location and message onto
+ // a trace stack maintained by Google Test.
+
+ // Template version. Uses Message() to convert the values into strings.
+ // Slow, but flexible.
+ template <typename T>
+ ScopedTrace(const char* file, int line, const T& message) {
+ PushTrace(file, line, (Message() << message).GetString());
+ }
+
+ // Optimize for some known types.
+ ScopedTrace(const char* file, int line, const char* message) {
+ PushTrace(file, line, message ? message : "(null)");
+ }
+
+ ScopedTrace(const char* file, int line, const std::string& message) {
+ PushTrace(file, line, message);
+ }
+
+ // The d'tor pops the info pushed by the c'tor.
+ //
+ // Note that the d'tor is not virtual in order to be efficient.
+ // Don't inherit from ScopedTrace!
+ ~ScopedTrace();
+
+ private:
+ void PushTrace(const char* file, int line, std::string message);
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
+} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its
+ // c'tor and d'tor. Therefore it doesn't
+ // need to be used otherwise.
+
+// Causes a trace (including the source file path, the current line
+// number, and the given message) to be included in every test failure
+// message generated by code in the current scope. The effect is
+// undone when the control leaves the current scope.
+//
+// The message argument can be anything streamable to std::ostream.
+//
+// In the implementation, we include the current line number as part
+// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
+// to appear in the same block - as long as they are on different
+// lines.
+//
+// Assuming that each thread maintains its own stack of traces.
+// Therefore, a SCOPED_TRACE() would (correctly) only affect the
+// assertions in its own thread.
+#define SCOPED_TRACE(message) \
+ ::testing::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
+ __FILE__, __LINE__, (message))
+
+// Compile-time assertion for type equality.
+// StaticAssertTypeEq<type1, type2>() compiles if and only if type1 and type2
+// are the same type. The value it returns is not interesting.
+//
+// Instead of making StaticAssertTypeEq a class template, we make it a
+// function template that invokes a helper class template. This
+// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
+// defining objects of that type.
+//
+// CAVEAT:
+//
+// When used inside a method of a class template,
+// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
+// instantiated. For example, given:
+//
+// template <typename T> class Foo {
+// public:
+// void Bar() { testing::StaticAssertTypeEq<int, T>(); }
+// };
+//
+// the code:
+//
+// void Test1() { Foo<bool> foo; }
+//
+// will NOT generate a compiler error, as Foo<bool>::Bar() is never
+// actually instantiated. Instead, you need:
+//
+// void Test2() { Foo<bool> foo; foo.Bar(); }
+//
+// to cause a compiler error.
+template <typename T1, typename T2>
+constexpr bool StaticAssertTypeEq() noexcept {
+ static_assert(std::is_same<T1, T2>::value, "T1 and T2 are not the same type");
+ return true;
+}
+
+// Defines a test.
+//
+// The first parameter is the name of the test suite, and the second
+// parameter is the name of the test within the test suite.
+//
+// The convention is to end the test suite name with "Test". For
+// example, a test suite for the Foo class can be named FooTest.
+//
+// Test code should appear between braces after an invocation of
+// this macro. Example:
+//
+// TEST(FooTest, InitializesCorrectly) {
+// Foo foo;
+// EXPECT_TRUE(foo.StatusIsOK());
+// }
+
+// Note that we call GetTestTypeId() instead of GetTypeId<
+// ::testing::Test>() here to get the type ID of testing::Test. This
+// is to work around a suspected linker bug when using Google Test as
+// a framework on Mac OS X. The bug causes GetTypeId<
+// ::testing::Test>() to return different values depending on whether
+// the call is from the Google Test framework itself or from user test
+// code. GetTestTypeId() is guaranteed to always return the same
+// value, as it always calls GetTypeId<>() from the Google Test
+// framework.
+#define GTEST_TEST(test_suite_name, test_name) \
+ GTEST_TEST_(test_suite_name, test_name, ::testing::Test, \
+ ::testing::internal::GetTestTypeId())
+
+// Define this macro to 1 to omit the definition of TEST(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_TEST
+#define TEST(test_suite_name, test_name) GTEST_TEST(test_suite_name, test_name)
+#endif
+
+// Defines a test that uses a test fixture.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test suite name. The second parameter is the
+// name of the test within the test suite.
+//
+// A test fixture class must be declared earlier. The user should put
+// the test code between braces after using this macro. Example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// void SetUp() override { b_.AddElement(3); }
+//
+// Foo a_;
+// Foo b_;
+// };
+//
+// TEST_F(FooTest, InitializesCorrectly) {
+// EXPECT_TRUE(a_.StatusIsOK());
+// }
+//
+// TEST_F(FooTest, ReturnsElementCountCorrectly) {
+// EXPECT_EQ(a_.size(), 0);
+// EXPECT_EQ(b_.size(), 1);
+// }
+//
+// GOOGLETEST_CM0011 DO NOT DELETE
+#if !GTEST_DONT_DEFINE_TEST
+#define TEST_F(test_fixture, test_name)\
+ GTEST_TEST_(test_fixture, test_name, test_fixture, \
+ ::testing::internal::GetTypeId<test_fixture>())
+#endif // !GTEST_DONT_DEFINE_TEST
+
+// Returns a path to temporary directory.
+// Tries to determine an appropriate directory for the platform.
+GTEST_API_ std::string TempDir();
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+// Dynamically registers a test with the framework.
+//
+// This is an advanced API only to be used when the `TEST` macros are
+// insufficient. The macros should be preferred when possible, as they avoid
+// most of the complexity of calling this function.
+//
+// The `factory` argument is a factory callable (move-constructible) object or
+// function pointer that creates a new instance of the Test object. It
+// handles ownership to the caller. The signature of the callable is
+// `Fixture*()`, where `Fixture` is the test fixture class for the test. All
+// tests registered with the same `test_suite_name` must return the same
+// fixture type. This is checked at runtime.
+//
+// The framework will infer the fixture class from the factory and will call
+// the `SetUpTestSuite` and `TearDownTestSuite` for it.
+//
+// Must be called before `RUN_ALL_TESTS()` is invoked, otherwise behavior is
+// undefined.
+//
+// Use case example:
+//
+// class MyFixture : public ::testing::Test {
+// public:
+// // All of these optional, just like in regular macro usage.
+// static void SetUpTestSuite() { ... }
+// static void TearDownTestSuite() { ... }
+// void SetUp() override { ... }
+// void TearDown() override { ... }
+// };
+//
+// class MyTest : public MyFixture {
+// public:
+// explicit MyTest(int data) : data_(data) {}
+// void TestBody() override { ... }
+//
+// private:
+// int data_;
+// };
+//
+// void RegisterMyTests(const std::vector<int>& values) {
+// for (int v : values) {
+// ::testing::RegisterTest(
+// "MyFixture", ("Test" + std::to_string(v)).c_str(), nullptr,
+// std::to_string(v).c_str(),
+// __FILE__, __LINE__,
+// // Important to use the fixture type as the return type here.
+// [=]() -> MyFixture* { return new MyTest(v); });
+// }
+// }
+// ...
+// int main(int argc, char** argv) {
+// std::vector<int> values_to_test = LoadValuesFromConfig();
+// RegisterMyTests(values_to_test);
+// ...
+// return RUN_ALL_TESTS();
+// }
+//
+template <int&... ExplicitParameterBarrier, typename Factory>
+TestInfo* RegisterTest(const char* test_suite_name, const char* test_name,
+ const char* type_param, const char* value_param,
+ const char* file, int line, Factory factory) {
+ using TestT = typename std::remove_pointer<decltype(factory())>::type;
+
+ class FactoryImpl : public internal::TestFactoryBase {
+ public:
+ explicit FactoryImpl(Factory f) : factory_(std::move(f)) {}
+ Test* CreateTest() override { return factory_(); }
+
+ private:
+ Factory factory_;
+ };
+
+ return internal::MakeAndRegisterTestInfo(
+ test_suite_name, test_name, type_param, value_param,
+ internal::CodeLocation(file, line), internal::GetTypeId<TestT>(),
+ internal::SuiteApiResolver<TestT>::GetSetUpCaseOrSuite(file, line),
+ internal::SuiteApiResolver<TestT>::GetTearDownCaseOrSuite(file, line),
+ new FactoryImpl{std::move(factory)});
+}
+
+} // namespace testing
+
+// Use this function in main() to run all tests. It returns 0 if all
+// tests are successful, or 1 otherwise.
+//
+// RUN_ALL_TESTS() should be invoked after the command line has been
+// parsed by InitGoogleTest().
+//
+// This function was formerly a macro; thus, it is in the global
+// namespace and has an all-caps name.
+int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_;
+
+inline int RUN_ALL_TESTS() {
+ return ::testing::UnitTest::GetInstance()->Run();
+}
+
+GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251
+
+#endif // GOOGLETEST_INCLUDE_GTEST_GTEST_H_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+#include "trait_backports.hpp"
+
+#if !defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+# include "no_unique_address.hpp"
+#endif
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace detail {
+
+// For no unique address emulation, this is the case taken when neither are empty.
+// For real `[[no_unique_address]]`, this case is always taken.
+template <class _T1, class _T2, class _Enable = void> struct __compressed_pair {
+ _MDSPAN_NO_UNIQUE_ADDRESS _T1 __t1_val{};
+ _MDSPAN_NO_UNIQUE_ADDRESS _T2 __t2_val{};
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T1 &__first() noexcept { return __t1_val; }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T1 const &__first() const noexcept {
+ return __t1_val;
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T2 &__second() noexcept { return __t2_val; }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T2 const &__second() const noexcept {
+ return __t2_val;
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair() = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ ~__compressed_pair() = default;
+ template <class _T1Like, class _T2Like>
+ MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_T1Like &&__t1, _T2Like &&__t2)
+ : __t1_val((_T1Like &&) __t1), __t2_val((_T2Like &&) __t2) {}
+};
+
+#if !defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+
+// First empty.
+template <class _T1, class _T2>
+struct __compressed_pair<
+ _T1, _T2,
+ std::enable_if_t<_MDSPAN_TRAIT(std::is_empty, _T1) && !_MDSPAN_TRAIT(std::is_empty, _T2)>>
+ : private _T1 {
+ _T2 __t2_val{};
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T1 &__first() noexcept {
+ return *static_cast<_T1 *>(this);
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T1 const &__first() const noexcept {
+ return *static_cast<_T1 const *>(this);
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T2 &__second() noexcept { return __t2_val; }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T2 const &__second() const noexcept {
+ return __t2_val;
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair() = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ ~__compressed_pair() = default;
+ template <class _T1Like, class _T2Like>
+ MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_T1Like &&__t1, _T2Like &&__t2)
+ : _T1((_T1Like &&) __t1), __t2_val((_T2Like &&) __t2) {}
+};
+
+// Second empty.
+template <class _T1, class _T2>
+struct __compressed_pair<
+ _T1, _T2,
+ std::enable_if_t<!_MDSPAN_TRAIT(std::is_empty, _T1) && _MDSPAN_TRAIT(std::is_empty, _T2)>>
+ : private _T2 {
+ _T1 __t1_val{};
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T1 &__first() noexcept { return __t1_val; }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T1 const &__first() const noexcept {
+ return __t1_val;
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T2 &__second() noexcept {
+ return *static_cast<_T2 *>(this);
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T2 const &__second() const noexcept {
+ return *static_cast<_T2 const *>(this);
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair() = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ ~__compressed_pair() = default;
+
+ template <class _T1Like, class _T2Like>
+ MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_T1Like &&__t1, _T2Like &&__t2)
+ : _T2((_T2Like &&) __t2), __t1_val((_T1Like &&) __t1) {}
+};
+
+// Both empty.
+template <class _T1, class _T2>
+struct __compressed_pair<
+ _T1, _T2,
+ std::enable_if_t<_MDSPAN_TRAIT(std::is_empty, _T1) && _MDSPAN_TRAIT(std::is_empty, _T2)>>
+ // We need to use the __no_unique_address_emulation wrapper here to avoid
+ // base class ambiguities.
+#ifdef _MDSPAN_COMPILER_MSVC
+// MSVC doesn't allow you to access public static member functions of a type
+// when you *happen* to privately inherit from that type.
+ : protected __no_unique_address_emulation<_T1, 0>,
+ protected __no_unique_address_emulation<_T2, 1>
+#else
+ : private __no_unique_address_emulation<_T1, 0>,
+ private __no_unique_address_emulation<_T2, 1>
+#endif
+{
+ using __first_base_t = __no_unique_address_emulation<_T1, 0>;
+ using __second_base_t = __no_unique_address_emulation<_T2, 1>;
+
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T1 &__first() noexcept {
+ return this->__first_base_t::__ref();
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T1 const &__first() const noexcept {
+ return this->__first_base_t::__ref();
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T2 &__second() noexcept {
+ return this->__second_base_t::__ref();
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T2 const &__second() const noexcept {
+ return this->__second_base_t::__ref();
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair() = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __compressed_pair(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair const &) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair &
+ operator=(__compressed_pair &&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ ~__compressed_pair() = default;
+ template <class _T1Like, class _T2Like>
+ MDSPAN_INLINE_FUNCTION constexpr __compressed_pair(_T1Like &&__t1, _T2Like &&__t2) noexcept
+ : __first_base_t(_T1((_T1Like &&) __t1)),
+ __second_base_t(_T2((_T2Like &&) __t2))
+ { }
+};
+
+#endif // !defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+
+} // end namespace detail
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#ifndef __has_include
+# define __has_include(x) 0
+#endif
+
+#if __has_include(<version>)
+# include <version>
+#else
+# include <type_traits>
+# include <utility>
+#endif
+
+#ifdef _MSVC_LANG
+#define _MDSPAN_CPLUSPLUS _MSVC_LANG
+#else
+#define _MDSPAN_CPLUSPLUS __cplusplus
+#endif
+
+#define MDSPAN_CXX_STD_14 201402L
+#define MDSPAN_CXX_STD_17 201703L
+#define MDSPAN_CXX_STD_20 202002L
+// Note GCC has not updated this in version 13
+#ifdef __clang__
+#define MDSPAN_CXX_STD_23 202302L
+#else
+#define MDSPAN_CXX_STD_23 202100L
+#endif
+
+#define MDSPAN_HAS_CXX_14 (_MDSPAN_CPLUSPLUS >= MDSPAN_CXX_STD_14)
+#define MDSPAN_HAS_CXX_17 (_MDSPAN_CPLUSPLUS >= MDSPAN_CXX_STD_17)
+#define MDSPAN_HAS_CXX_20 (_MDSPAN_CPLUSPLUS >= MDSPAN_CXX_STD_20)
+#define MDSPAN_HAS_CXX_23 (_MDSPAN_CPLUSPLUS >= MDSPAN_CXX_STD_23)
+
+static_assert(_MDSPAN_CPLUSPLUS >= MDSPAN_CXX_STD_14, "mdspan requires C++14 or later.");
+
+#ifndef _MDSPAN_COMPILER_CLANG
+# if defined(__clang__)
+# define _MDSPAN_COMPILER_CLANG __clang__
+# endif
+#endif
+
+#if !defined(_MDSPAN_COMPILER_MSVC) && !defined(_MDSPAN_COMPILER_MSVC_CLANG)
+# if defined(_MSC_VER)
+# if !defined(_MDSPAN_COMPILER_CLANG)
+# define _MDSPAN_COMPILER_MSVC _MSC_VER
+# else
+# define _MDSPAN_COMPILER_MSVC_CLANG _MSC_VER
+# endif
+# endif
+#endif
+
+#ifndef _MDSPAN_COMPILER_INTEL
+# ifdef __INTEL_COMPILER
+# define _MDSPAN_COMPILER_INTEL __INTEL_COMPILER
+# endif
+#endif
+
+#ifndef _MDSPAN_COMPILER_APPLECLANG
+# ifdef __apple_build_version__
+# define _MDSPAN_COMPILER_APPLECLANG __apple_build_version__
+# endif
+#endif
+
+#ifndef _MDSPAN_HAS_CUDA
+# if defined(__CUDACC__)
+# define _MDSPAN_HAS_CUDA __CUDACC__
+# endif
+#endif
+
+#ifndef _MDSPAN_HAS_HIP
+# if defined(__HIPCC__)
+# define _MDSPAN_HAS_HIP __HIPCC__
+# endif
+#endif
+
+#ifndef _MDSPAN_HAS_SYCL
+# if defined(SYCL_LANGUAGE_VERSION)
+# define _MDSPAN_HAS_SYCL SYCL_LANGUAGE_VERSION
+# endif
+#endif
+
+#ifndef __has_cpp_attribute
+# define __has_cpp_attribute(x) 0
+#endif
+
+#ifndef _MDSPAN_PRESERVE_STANDARD_LAYOUT
+// Preserve standard layout by default, but we're not removing the old version
+// that turns this off until we're sure this doesn't have an unreasonable cost
+// to the compiler or optimizer.
+# define _MDSPAN_PRESERVE_STANDARD_LAYOUT 1
+#endif
+
+#if !defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+# if ((__has_cpp_attribute(no_unique_address) >= 201803L) && \
+ (!defined(__NVCC__) || MDSPAN_HAS_CXX_20) && \
+ (!defined(_MDSPAN_COMPILER_MSVC) || MDSPAN_HAS_CXX_20))
+# define _MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS 1
+# define _MDSPAN_NO_UNIQUE_ADDRESS [[no_unique_address]]
+# else
+# define _MDSPAN_NO_UNIQUE_ADDRESS
+# endif
+#endif
+
+// NVCC older than 11.6 chokes on the no-unique-address-emulation
+// so just pretend to use it (to avoid the full blown EBO workaround
+// which NVCC also doesn't like ...), and leave the macro empty
+#ifndef _MDSPAN_NO_UNIQUE_ADDRESS
+# if defined(__NVCC__)
+# define _MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS 1
+# define _MDSPAN_USE_FAKE_ATTRIBUTE_NO_UNIQUE_ADDRESS
+# endif
+# define _MDSPAN_NO_UNIQUE_ADDRESS
+#endif
+
+// AMDs HIP compiler seems to have issues with concepts
+// it pretends concepts exist, but doesn't ship <concept>
+#ifndef __HIPCC__
+#ifndef _MDSPAN_USE_CONCEPTS
+# if defined(__cpp_concepts) && __cpp_concepts >= 201507L
+# define _MDSPAN_USE_CONCEPTS 1
+# endif
+#endif
+#endif
+
+#ifndef _MDSPAN_USE_FOLD_EXPRESSIONS
+# if (defined(__cpp_fold_expressions) && __cpp_fold_expressions >= 201603L) \
+ || (!defined(__cpp_fold_expressions) && MDSPAN_HAS_CXX_17)
+# define _MDSPAN_USE_FOLD_EXPRESSIONS 1
+# endif
+#endif
+
+#ifndef _MDSPAN_USE_INLINE_VARIABLES
+# if defined(__cpp_inline_variables) && __cpp_inline_variables >= 201606L \
+ || (!defined(__cpp_inline_variables) && MDSPAN_HAS_CXX_17)
+# define _MDSPAN_USE_INLINE_VARIABLES 1
+# endif
+#endif
+
+#ifndef _MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS
+# if (!(defined(__cpp_lib_type_trait_variable_templates) && __cpp_lib_type_trait_variable_templates >= 201510L) \
+ || !MDSPAN_HAS_CXX_17)
+# if !(defined(_MDSPAN_COMPILER_APPLECLANG) && MDSPAN_HAS_CXX_17)
+# define _MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS 1
+# endif
+# endif
+#endif
+
+#ifndef _MDSPAN_USE_VARIABLE_TEMPLATES
+# if (defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304 && MDSPAN_HAS_CXX_17) \
+ || (!defined(__cpp_variable_templates) && MDSPAN_HAS_CXX_17)
+# define _MDSPAN_USE_VARIABLE_TEMPLATES 1
+# endif
+#endif // _MDSPAN_USE_VARIABLE_TEMPLATES
+
+#ifndef _MDSPAN_USE_CONSTEXPR_14
+# if (defined(__cpp_constexpr) && __cpp_constexpr >= 201304) \
+ || (!defined(__cpp_constexpr) && MDSPAN_HAS_CXX_14) \
+ && (!(defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1700))
+# define _MDSPAN_USE_CONSTEXPR_14 1
+# endif
+#endif
+
+#ifndef _MDSPAN_USE_INTEGER_SEQUENCE
+# if defined(_MDSPAN_COMPILER_MSVC)
+# if (defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304)
+# define _MDSPAN_USE_INTEGER_SEQUENCE 1
+# endif
+# endif
+#endif
+#ifndef _MDSPAN_USE_INTEGER_SEQUENCE
+# if (defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304) \
+ || (!defined(__cpp_lib_integer_sequence) && MDSPAN_HAS_CXX_14) \
+ /* as far as I can tell, libc++ seems to think this is a C++11 feature... */ \
+ || (defined(__GLIBCXX__) && __GLIBCXX__ > 20150422 && __GNUC__ < 5 && !defined(__INTEL_CXX11_MODE__))
+ // several compilers lie about integer_sequence working properly unless the C++14 standard is used
+# define _MDSPAN_USE_INTEGER_SEQUENCE 1
+# elif defined(_MDSPAN_COMPILER_APPLECLANG) && MDSPAN_HAS_CXX_14
+ // appleclang seems to be missing the __cpp_lib_... macros, but doesn't seem to lie about C++14 making
+ // integer_sequence work
+# define _MDSPAN_USE_INTEGER_SEQUENCE 1
+# endif
+#endif
+
+#ifndef _MDSPAN_USE_RETURN_TYPE_DEDUCTION
+# if (defined(__cpp_return_type_deduction) && __cpp_return_type_deduction >= 201304) \
+ || (!defined(__cpp_return_type_deduction) && MDSPAN_HAS_CXX_14)
+# define _MDSPAN_USE_RETURN_TYPE_DEDUCTION 1
+# endif
+#endif
+
+#ifndef _MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+# if (!defined(__NVCC__) || (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__ * 10 >= 1170)) && \
+ ((defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201703) || \
+ (!defined(__cpp_deduction_guides) && MDSPAN_HAS_CXX_17))
+# define _MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
+# endif
+#endif
+
+#ifndef _MDSPAN_USE_STANDARD_TRAIT_ALIASES
+# if (defined(__cpp_lib_transformation_trait_aliases) && __cpp_lib_transformation_trait_aliases >= 201304) \
+ || (!defined(__cpp_lib_transformation_trait_aliases) && MDSPAN_HAS_CXX_14)
+# define _MDSPAN_USE_STANDARD_TRAIT_ALIASES 1
+# elif defined(_MDSPAN_COMPILER_APPLECLANG) && MDSPAN_HAS_CXX_14
+ // appleclang seems to be missing the __cpp_lib_... macros, but doesn't seem to lie about C++14
+# define _MDSPAN_USE_STANDARD_TRAIT_ALIASES 1
+# endif
+#endif
+
+#ifndef _MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND
+# ifdef __GNUC__
+# if __GNUC__ < 9
+# define _MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND 1
+# endif
+# endif
+#endif
+
+#ifndef MDSPAN_CONDITIONAL_EXPLICIT
+# if MDSPAN_HAS_CXX_20
+# define MDSPAN_CONDITIONAL_EXPLICIT(COND) explicit(COND)
+# else
+# define MDSPAN_CONDITIONAL_EXPLICIT(COND)
+# endif
+#endif
+
+#ifndef MDSPAN_USE_BRACKET_OPERATOR
+# if defined(__cpp_multidimensional_subscript)
+// The following if/else is necessary to workaround a clang issue
+// relative to using a parameter pack inside a bracket operator in C++2b/C++23 mode
+# if defined(_MDSPAN_COMPILER_CLANG) && ((__clang_major__ == 15) || (__clang_major__ == 16))
+# define MDSPAN_USE_BRACKET_OPERATOR 0
+# else
+# define MDSPAN_USE_BRACKET_OPERATOR 1
+# endif
+# else
+# define MDSPAN_USE_BRACKET_OPERATOR 0
+# endif
+#endif
+
+#ifndef MDSPAN_USE_PAREN_OPERATOR
+# if !MDSPAN_USE_BRACKET_OPERATOR
+# define MDSPAN_USE_PAREN_OPERATOR 1
+# else
+# define MDSPAN_USE_PAREN_OPERATOR 0
+# endif
+#endif
+
+#if MDSPAN_USE_BRACKET_OPERATOR
+# define __MDSPAN_OP(mds,...) mds[__VA_ARGS__]
+// Corentins demo compiler for subscript chokes on empty [] call,
+// though I believe the proposal supports it?
+#ifdef MDSPAN_NO_EMPTY_BRACKET_OPERATOR
+# define __MDSPAN_OP0(mds) mds.accessor().access(mds.data_handle(),0)
+#else
+# define __MDSPAN_OP0(mds) mds[]
+#endif
+# define __MDSPAN_OP1(mds, a) mds[a]
+# define __MDSPAN_OP2(mds, a, b) mds[a,b]
+# define __MDSPAN_OP3(mds, a, b, c) mds[a,b,c]
+# define __MDSPAN_OP4(mds, a, b, c, d) mds[a,b,c,d]
+# define __MDSPAN_OP5(mds, a, b, c, d, e) mds[a,b,c,d,e]
+# define __MDSPAN_OP6(mds, a, b, c, d, e, f) mds[a,b,c,d,e,f]
+#else
+# define __MDSPAN_OP(mds,...) mds(__VA_ARGS__)
+# define __MDSPAN_OP0(mds) mds()
+# define __MDSPAN_OP1(mds, a) mds(a)
+# define __MDSPAN_OP2(mds, a, b) mds(a,b)
+# define __MDSPAN_OP3(mds, a, b, c) mds(a,b,c)
+# define __MDSPAN_OP4(mds, a, b, c, d) mds(a,b,c,d)
+# define __MDSPAN_OP5(mds, a, b, c, d, e) mds(a,b,c,d,e)
+# define __MDSPAN_OP6(mds, a, b, c, d, e, f) mds(a,b,c,d,e,f)
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+
+#include <cstddef> // size_t
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+template <class ElementType>
+struct default_accessor {
+
+ using offset_policy = default_accessor;
+ using element_type = ElementType;
+ using reference = ElementType&;
+ using data_handle_type = ElementType*;
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr default_accessor() noexcept = default;
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherElementType,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, OtherElementType(*)[], element_type(*)[])
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr default_accessor(default_accessor<OtherElementType>) noexcept {}
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr data_handle_type
+ offset(data_handle_type p, size_t i) const noexcept {
+ return p + i;
+ }
+
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference access(data_handle_type p, size_t i) const noexcept {
+ return p[i];
+ }
+
+};
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+
+#if defined(__cpp_lib_span)
+#include <span>
+#endif
+
+#include <cstddef> // size_t
+#include <limits> // numeric_limits
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+#if defined(__cpp_lib_span)
+using std::dynamic_extent;
+#else
+_MDSPAN_INLINE_VARIABLE constexpr auto dynamic_extent = std::numeric_limits<size_t>::max();
+#endif
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+//==============================================================================================================
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+#include "dynamic_extent.hpp"
+#include "utility.hpp"
+
+#ifdef __cpp_lib_span
+#include <span>
+#endif
+#include <array>
+#include <type_traits>
+
+#include <cassert>
+#include <cinttypes>
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace detail {
+
+// Function used to check compatibility of extents in converting constructor
+// can't be a private member function for some reason.
+template <size_t... Extents, size_t... OtherExtents>
+MDSPAN_INLINE_FUNCTION
+static constexpr std::integral_constant<bool, false> __check_compatible_extents(
+ std::integral_constant<bool, false>,
+ std::integer_sequence<size_t, Extents...>,
+ std::integer_sequence<size_t, OtherExtents...>) noexcept {
+ return {};
+}
+
+// This helper prevents ICE's on MSVC.
+template <size_t Lhs, size_t Rhs>
+struct __compare_extent_compatible : std::integral_constant<bool,
+ Lhs == dynamic_extent ||
+ Rhs == dynamic_extent ||
+ Lhs == Rhs>
+{};
+
+template <size_t... Extents, size_t... OtherExtents>
+MDSPAN_INLINE_FUNCTION
+static constexpr std::integral_constant<
+ bool, _MDSPAN_FOLD_AND(__compare_extent_compatible<Extents, OtherExtents>::value)>
+__check_compatible_extents(
+ std::integral_constant<bool, true>,
+ std::integer_sequence<size_t, Extents...>,
+ std::integer_sequence<size_t, OtherExtents...>) noexcept {
+ return {};
+}
+
+template<class IndexType, class ... Arguments>
+MDSPAN_INLINE_FUNCTION
+static constexpr bool are_valid_indices() {
+ return
+ _MDSPAN_FOLD_AND(std::is_convertible<Arguments, IndexType>::value) &&
+ _MDSPAN_FOLD_AND(std::is_nothrow_constructible<IndexType, Arguments>::value);
+}
+
+// ------------------------------------------------------------------
+// ------------ static_array ----------------------------------------
+// ------------------------------------------------------------------
+
+// array like class which provides an array of static values with get
+// function and operator [].
+
+// Implementation of Static Array with recursive implementation of get.
+template <size_t R, class T, T... Extents> struct static_array_impl;
+
+template <size_t R, class T, T FirstExt, T... Extents>
+struct static_array_impl<R, T, FirstExt, Extents...> {
+ MDSPAN_INLINE_FUNCTION
+ constexpr static T get(size_t r) {
+ if (r == R)
+ return FirstExt;
+ else
+ return static_array_impl<R + 1, T, Extents...>::get(r);
+ }
+ template <size_t r> MDSPAN_INLINE_FUNCTION constexpr static T get() {
+#if MDSPAN_HAS_CXX_17
+ if constexpr (r == R)
+ return FirstExt;
+ else
+ return static_array_impl<R + 1, T, Extents...>::template get<r>();
+#else
+ get(r);
+#endif
+ }
+};
+
+// End the recursion
+template <size_t R, class T, T FirstExt>
+struct static_array_impl<R, T, FirstExt> {
+ MDSPAN_INLINE_FUNCTION
+ constexpr static T get(size_t) { return FirstExt; }
+ template <size_t> MDSPAN_INLINE_FUNCTION constexpr static T get() {
+ return FirstExt;
+ }
+};
+
+// Don't start recursion if size 0
+template <class T> struct static_array_impl<0, T> {
+ MDSPAN_INLINE_FUNCTION
+ constexpr static T get(size_t) { return T(); }
+ template <size_t> MDSPAN_INLINE_FUNCTION constexpr static T get() {
+ return T();
+ }
+};
+
+// Static array, provides get<r>(), get(r) and operator[r]
+template <class T, T... Values> struct static_array:
+ public static_array_impl<0, T, Values...> {
+
+public:
+ using value_type = T;
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t size() { return sizeof...(Values); }
+};
+
+
+// ------------------------------------------------------------------
+// ------------ index_sequence_scan ---------------------------------
+// ------------------------------------------------------------------
+
+// index_sequence_scan takes compile time values and provides get(r)
+// and get<r>() which return the sum of the first r-1 values.
+
+// Recursive implementation for get
+template <size_t R, size_t... Values> struct index_sequence_scan_impl;
+
+template <size_t R, size_t FirstVal, size_t... Values>
+struct index_sequence_scan_impl<R, FirstVal, Values...> {
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t get(size_t r) {
+ if (r > R)
+ return FirstVal + index_sequence_scan_impl<R + 1, Values...>::get(r);
+ else
+ return 0;
+ }
+};
+
+template <size_t R, size_t FirstVal>
+struct index_sequence_scan_impl<R, FirstVal> {
+#if defined(__NVCC__) || defined(__NVCOMPILER) || \
+ defined(_MDSPAN_COMPILER_INTEL)
+ // NVCC warns about pointless comparison with 0 for R==0 and r being const
+ // evaluatable and also 0.
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t get(size_t r) {
+ return static_cast<int64_t>(R) > static_cast<int64_t>(r) ? FirstVal : 0;
+ }
+#else
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t get(size_t r) { return R > r ? FirstVal : 0; }
+#endif
+};
+template <> struct index_sequence_scan_impl<0> {
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t get(size_t) { return 0; }
+};
+
+// ------------------------------------------------------------------
+// ------------ possibly_empty_array -------------------------------
+// ------------------------------------------------------------------
+
+// array like class which provides get function and operator [], and
+// has a specialization for the size 0 case.
+// This is needed to make the maybe_static_array be truly empty, for
+// all static values.
+
+template <class T, size_t N> struct possibly_empty_array {
+ T vals[N]{};
+ MDSPAN_INLINE_FUNCTION
+ constexpr T &operator[](size_t r) { return vals[r]; }
+ MDSPAN_INLINE_FUNCTION
+ constexpr const T &operator[](size_t r) const { return vals[r]; }
+};
+
+template <class T> struct possibly_empty_array<T, 0> {
+ MDSPAN_INLINE_FUNCTION
+ constexpr T operator[](size_t) { return T(); }
+ MDSPAN_INLINE_FUNCTION
+ constexpr const T operator[](size_t) const { return T(); }
+};
+
+// ------------------------------------------------------------------
+// ------------ maybe_static_array ----------------------------------
+// ------------------------------------------------------------------
+
+// array like class which has a mix of static and runtime values but
+// only stores the runtime values.
+// The type of the static and the runtime values can be different.
+// The position of a dynamic value is indicated through a tag value.
+template <class TDynamic, class TStatic, TStatic dyn_tag, TStatic... Values>
+struct maybe_static_array {
+
+ static_assert(std::is_convertible<TStatic, TDynamic>::value, "maybe_static_array: TStatic must be convertible to TDynamic");
+ static_assert(std::is_convertible<TDynamic, TStatic>::value, "maybe_static_array: TDynamic must be convertible to TStatic");
+
+private:
+ // Static values member
+ using static_vals_t = static_array<TStatic, Values...>;
+ constexpr static size_t m_size = sizeof...(Values);
+ constexpr static size_t m_size_dynamic =
+ _MDSPAN_FOLD_PLUS_RIGHT((Values == dyn_tag), 0);
+
+ // Dynamic values member
+ _MDSPAN_NO_UNIQUE_ADDRESS possibly_empty_array<TDynamic, m_size_dynamic>
+ m_dyn_vals;
+
+ // static mapping of indices to the position in the dynamic values array
+ using dyn_map_t = index_sequence_scan_impl<0, static_cast<size_t>(Values == dyn_tag)...>;
+public:
+
+ // two types for static and dynamic values
+ using value_type = TDynamic;
+ using static_value_type = TStatic;
+ // tag value indicating dynamic value
+ constexpr static static_value_type tag_value = dyn_tag;
+
+ constexpr maybe_static_array() = default;
+
+ // constructor for all static values
+ // TODO: add precondition check?
+ MDSPAN_TEMPLATE_REQUIRES(class... Vals,
+ /* requires */ ((m_size_dynamic == 0) &&
+ (sizeof...(Vals) > 0)))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(Vals...) : m_dyn_vals{} {}
+
+ // constructors from dynamic values only
+ MDSPAN_TEMPLATE_REQUIRES(class... DynVals,
+ /* requires */ (sizeof...(DynVals) ==
+ m_size_dynamic &&
+ m_size_dynamic > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(DynVals... vals)
+ : m_dyn_vals{static_cast<TDynamic>(vals)...} {}
+
+
+ MDSPAN_TEMPLATE_REQUIRES(class T, size_t N,
+ /* requires */ (N == m_size_dynamic && N > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(const std::array<T, N> &vals) {
+ for (size_t r = 0; r < N; r++)
+ m_dyn_vals[r] = static_cast<TDynamic>(vals[r]);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(class T, size_t N,
+ /* requires */ (N == m_size_dynamic && N == 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(const std::array<T, N> &) : m_dyn_vals{} {}
+
+#ifdef __cpp_lib_span
+ MDSPAN_TEMPLATE_REQUIRES(class T, size_t N,
+ /* requires */ (N == m_size_dynamic && N > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(const std::span<T, N> &vals) {
+ for (size_t r = 0; r < N; r++)
+ m_dyn_vals[r] = static_cast<TDynamic>(vals[r]);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(class T, size_t N,
+ /* requires */ (N == m_size_dynamic && N == 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(const std::span<T, N> &) : m_dyn_vals{} {}
+#endif
+
+ // constructors from all values
+ MDSPAN_TEMPLATE_REQUIRES(class... DynVals,
+ /* requires */ (sizeof...(DynVals) !=
+ m_size_dynamic &&
+ m_size_dynamic > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(DynVals... vals)
+ : m_dyn_vals{} {
+ static_assert((sizeof...(DynVals) == m_size), "Invalid number of values.");
+ TDynamic values[m_size]{static_cast<TDynamic>(vals)...};
+ for (size_t r = 0; r < m_size; r++) {
+ TStatic static_val = static_vals_t::get(r);
+ if (static_val == dyn_tag) {
+ m_dyn_vals[dyn_map_t::get(r)] = values[r];
+ }
+// Precondition check
+#ifdef _MDSPAN_DEBUG
+ else {
+ assert(values[r] == static_cast<TDynamic>(static_val));
+ }
+#endif
+ }
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class T, size_t N,
+ /* requires */ (N != m_size_dynamic && m_size_dynamic > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(const std::array<T, N> &vals) {
+ static_assert((N == m_size), "Invalid number of values.");
+// Precondition check
+#ifdef _MDSPAN_DEBUG
+ assert(N == m_size);
+#endif
+ for (size_t r = 0; r < m_size; r++) {
+ TStatic static_val = static_vals_t::get(r);
+ if (static_val == dyn_tag) {
+ m_dyn_vals[dyn_map_t::get(r)] = static_cast<TDynamic>(vals[r]);
+ }
+// Precondition check
+#ifdef _MDSPAN_DEBUG
+ else {
+ assert(static_cast<TDynamic>(vals[r]) ==
+ static_cast<TDynamic>(static_val));
+ }
+#endif
+ }
+ }
+
+#ifdef __cpp_lib_span
+ MDSPAN_TEMPLATE_REQUIRES(
+ class T, size_t N,
+ /* requires */ (N != m_size_dynamic && m_size_dynamic > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr maybe_static_array(const std::span<T, N> &vals) {
+ static_assert((N == m_size) || (m_size == dynamic_extent));
+#ifdef _MDSPAN_DEBUG
+ assert(N == m_size);
+#endif
+ for (size_t r = 0; r < m_size; r++) {
+ TStatic static_val = static_vals_t::get(r);
+ if (static_val == dyn_tag) {
+ m_dyn_vals[dyn_map_t::get(r)] = static_cast<TDynamic>(vals[r]);
+ }
+#ifdef _MDSPAN_DEBUG
+ else {
+ assert(static_cast<TDynamic>(vals[r]) ==
+ static_cast<TDynamic>(static_val));
+ }
+#endif
+ }
+ }
+#endif
+
+ // access functions
+ MDSPAN_INLINE_FUNCTION
+ constexpr static TStatic static_value(size_t r) { return static_vals_t::get(r); }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr TDynamic value(size_t r) const {
+ TStatic static_val = static_vals_t::get(r);
+ return static_val == dyn_tag ? m_dyn_vals[dyn_map_t::get(r)]
+ : static_cast<TDynamic>(static_val);
+ }
+ MDSPAN_INLINE_FUNCTION
+ constexpr TDynamic operator[](size_t r) const { return value(r); }
+
+
+ // observers
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t size() { return m_size; }
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t size_dynamic() { return m_size_dynamic; }
+};
+
+} // namespace detail
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+// ------------------------------------------------------------------
+// ------------ extents ---------------------------------------------
+// ------------------------------------------------------------------
+
+// Class to describe the extents of a multi dimensional array.
+// Used by mdspan, mdarray and layout mappings.
+// See ISO C++ standard [mdspan.extents]
+
+template <class IndexType, size_t... Extents> class extents {
+public:
+ // typedefs for integral types used
+ using index_type = IndexType;
+ using size_type = std::make_unsigned_t<index_type>;
+ using rank_type = size_t;
+
+ static_assert(std::is_integral<index_type>::value && !std::is_same<index_type, bool>::value,
+ MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::extents::index_type must be a signed or unsigned integer type");
+private:
+ constexpr static rank_type m_rank = sizeof...(Extents);
+ constexpr static rank_type m_rank_dynamic =
+ _MDSPAN_FOLD_PLUS_RIGHT((Extents == dynamic_extent), /* + ... + */ 0);
+
+ // internal storage type using maybe_static_array
+ using vals_t =
+ detail::maybe_static_array<IndexType, size_t, dynamic_extent, Extents...>;
+ _MDSPAN_NO_UNIQUE_ADDRESS vals_t m_vals;
+
+public:
+ // [mdspan.extents.obs], observers of multidimensional index space
+ MDSPAN_INLINE_FUNCTION
+ constexpr static rank_type rank() noexcept { return m_rank; }
+ MDSPAN_INLINE_FUNCTION
+ constexpr static rank_type rank_dynamic() noexcept { return m_rank_dynamic; }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type extent(rank_type r) const noexcept { return m_vals.value(r); }
+ MDSPAN_INLINE_FUNCTION
+ constexpr static size_t static_extent(rank_type r) noexcept {
+ return vals_t::static_value(r);
+ }
+
+ // [mdspan.extents.cons], constructors
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr extents() noexcept = default;
+
+ // Construction from just dynamic or all values.
+ // Precondition check is deferred to maybe_static_array constructor
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... OtherIndexTypes,
+ /* requires */ (
+ _MDSPAN_FOLD_AND(_MDSPAN_TRAIT(std::is_convertible, OtherIndexTypes,
+ index_type) /* && ... */) &&
+ _MDSPAN_FOLD_AND(_MDSPAN_TRAIT(std::is_nothrow_constructible, index_type,
+ OtherIndexTypes) /* && ... */) &&
+ (sizeof...(OtherIndexTypes) == m_rank ||
+ sizeof...(OtherIndexTypes) == m_rank_dynamic)))
+ MDSPAN_INLINE_FUNCTION
+ constexpr explicit extents(OtherIndexTypes... dynvals) noexcept
+ : m_vals(static_cast<index_type>(dynvals)...) {}
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherIndexType, size_t N,
+ /* requires */
+ (
+ _MDSPAN_TRAIT(std::is_convertible, const OtherIndexType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type,
+ const OtherIndexType&) &&
+ (N == m_rank || N == m_rank_dynamic)))
+ MDSPAN_INLINE_FUNCTION
+ MDSPAN_CONDITIONAL_EXPLICIT(N != m_rank_dynamic)
+ constexpr extents(const std::array<OtherIndexType, N> &exts) noexcept
+ : m_vals(std::move(exts)) {}
+
+#ifdef __cpp_lib_span
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherIndexType, size_t N,
+ /* requires */
+ (_MDSPAN_TRAIT(std::is_convertible, const OtherIndexType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, const OtherIndexType&) &&
+ (N == m_rank || N == m_rank_dynamic)))
+ MDSPAN_INLINE_FUNCTION
+ MDSPAN_CONDITIONAL_EXPLICIT(N != m_rank_dynamic)
+ constexpr extents(const std::span<OtherIndexType, N> &exts) noexcept
+ : m_vals(std::move(exts)) {}
+#endif
+
+private:
+ // Function to construct extents storage from other extents.
+ // With C++ 17 the first two variants could be collapsed using if constexpr
+ // in which case you don't need all the requires clauses.
+ // in C++ 14 mode that doesn't work due to infinite recursion
+ MDSPAN_TEMPLATE_REQUIRES(
+ size_t DynCount, size_t R, class OtherExtents, class... DynamicValues,
+ /* requires */ ((R < m_rank) && (static_extent(R) == dynamic_extent)))
+ MDSPAN_INLINE_FUNCTION
+ constexpr
+ vals_t __construct_vals_from_extents(std::integral_constant<size_t, DynCount>,
+ std::integral_constant<size_t, R>,
+ const OtherExtents &exts,
+ DynamicValues... dynamic_values) noexcept {
+ return __construct_vals_from_extents(
+ std::integral_constant<size_t, DynCount + 1>(),
+ std::integral_constant<size_t, R + 1>(), exts, dynamic_values...,
+ exts.extent(R));
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ size_t DynCount, size_t R, class OtherExtents, class... DynamicValues,
+ /* requires */ ((R < m_rank) && (static_extent(R) != dynamic_extent)))
+ MDSPAN_INLINE_FUNCTION
+ constexpr
+ vals_t __construct_vals_from_extents(std::integral_constant<size_t, DynCount>,
+ std::integral_constant<size_t, R>,
+ const OtherExtents &exts,
+ DynamicValues... dynamic_values) noexcept {
+ return __construct_vals_from_extents(
+ std::integral_constant<size_t, DynCount>(),
+ std::integral_constant<size_t, R + 1>(), exts, dynamic_values...);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ size_t DynCount, size_t R, class OtherExtents, class... DynamicValues,
+ /* requires */ ((R == m_rank) && (DynCount == m_rank_dynamic)))
+ MDSPAN_INLINE_FUNCTION
+ constexpr
+ vals_t __construct_vals_from_extents(std::integral_constant<size_t, DynCount>,
+ std::integral_constant<size_t, R>,
+ const OtherExtents &,
+ DynamicValues... dynamic_values) noexcept {
+ return vals_t{static_cast<index_type>(dynamic_values)...};
+ }
+
+public:
+
+ // Converting constructor from other extents specializations
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherIndexType, size_t... OtherExtents,
+ /* requires */
+ (
+ /* multi-stage check to protect from invalid pack expansion when sizes
+ don't match? */
+ decltype(detail::__check_compatible_extents(
+ // using: sizeof...(Extents) == sizeof...(OtherExtents) as the second argument fails with MSVC+NVCC with some obscure expansion error
+ // MSVC: 19.38.33133 NVCC: 12.0
+ std::integral_constant<bool, extents<int, Extents...>::rank() == extents<int, OtherExtents...>::rank()>{},
+ std::integer_sequence<size_t, Extents...>{},
+ std::integer_sequence<size_t, OtherExtents...>{}))::value
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ MDSPAN_CONDITIONAL_EXPLICIT((((Extents != dynamic_extent) &&
+ (OtherExtents == dynamic_extent)) ||
+ ...) ||
+ (std::numeric_limits<index_type>::max() <
+ std::numeric_limits<OtherIndexType>::max()))
+ constexpr extents(const extents<OtherIndexType, OtherExtents...> &other) noexcept
+ : m_vals(__construct_vals_from_extents(
+ std::integral_constant<size_t, 0>(),
+ std::integral_constant<size_t, 0>(), other)) {}
+
+ // Comparison operator
+ template <class OtherIndexType, size_t... OtherExtents>
+ MDSPAN_INLINE_FUNCTION friend constexpr bool
+ operator==(const extents &lhs,
+ const extents<OtherIndexType, OtherExtents...> &rhs) noexcept {
+ return
+ rank() == extents<OtherIndexType, OtherExtents...>::rank() &&
+ detail::rankwise_equal(detail::with_rank<rank()>{}, rhs, lhs, detail::extent);
+ }
+
+#if !(MDSPAN_HAS_CXX_20)
+ template <class OtherIndexType, size_t... OtherExtents>
+ MDSPAN_INLINE_FUNCTION friend constexpr bool
+ operator!=(extents const &lhs,
+ extents<OtherIndexType, OtherExtents...> const &rhs) noexcept {
+ return !(lhs == rhs);
+ }
+#endif
+};
+
+// Recursive helper classes to implement dextents alias for extents
+namespace detail {
+
+template <class IndexType, size_t Rank,
+ class Extents = ::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<IndexType>>
+struct __make_dextents;
+
+template <class IndexType, size_t Rank, size_t... ExtentsPack>
+struct __make_dextents<
+ IndexType, Rank, ::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<IndexType, ExtentsPack...>>
+{
+ using type = typename __make_dextents<
+ IndexType, Rank - 1,
+ ::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<IndexType,
+ ::MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent,
+ ExtentsPack...>>::type;
+};
+
+template <class IndexType, size_t... ExtentsPack>
+struct __make_dextents<
+ IndexType, 0, ::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<IndexType, ExtentsPack...>>
+{
+ using type = ::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<IndexType, ExtentsPack...>;
+};
+
+} // end namespace detail
+
+// [mdspan.extents.dextents], alias template
+template <class IndexType, size_t Rank>
+using dextents = typename detail::__make_dextents<IndexType, Rank>::type;
+
+// Deduction guide for extents
+#if defined(_MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+template <class... IndexTypes>
+extents(IndexTypes...)
+ -> extents<size_t,
+ ((void) sizeof(IndexTypes), ::MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent)...>;
+#endif
+
+// Helper type traits for identifying a class as extents.
+namespace detail {
+
+template <class T> struct __is_extents : ::std::false_type {};
+
+template <class IndexType, size_t... ExtentsPack>
+struct __is_extents<::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<IndexType, ExtentsPack...>>
+ : ::std::true_type {};
+
+template <class T>
+#if MDSPAN_HAS_CXX_17
+inline
+#else
+static
+#endif
+constexpr bool __is_extents_v = __is_extents<T>::value;
+
+template<class InputIndexType, class ExtentsIndexType>
+MDSPAN_INLINE_FUNCTION
+constexpr void
+check_lower_bound(InputIndexType user_index,
+ ExtentsIndexType /* current_extent */,
+ std::true_type /* is_signed */)
+{
+ (void) user_index; // prevent unused variable warning
+#ifdef _MDSPAN_DEBUG
+ assert(static_cast<ExtentsIndexType>(user_index) >= 0);
+#endif
+}
+
+template<class InputIndexType, class ExtentsIndexType>
+MDSPAN_INLINE_FUNCTION
+constexpr void
+check_lower_bound(InputIndexType /* user_index */,
+ ExtentsIndexType /* current_extent */,
+ std::false_type /* is_signed */)
+{}
+
+template<class InputIndexType, class ExtentsIndexType>
+MDSPAN_INLINE_FUNCTION
+constexpr void
+check_upper_bound(InputIndexType user_index,
+ ExtentsIndexType current_extent)
+{
+ (void) user_index; // prevent unused variable warnings
+ (void) current_extent;
+#ifdef _MDSPAN_DEBUG
+ assert(static_cast<ExtentsIndexType>(user_index) < current_extent);
+#endif
+}
+
+// Returning true to use AND fold instead of comma
+// CPP14 mode doesn't like the use of void expressions
+// with the way the _MDSPAN_FOLD_AND is set up
+template<class InputIndex, class ExtentsIndexType>
+MDSPAN_INLINE_FUNCTION
+constexpr bool
+check_one_index(InputIndex user_index,
+ ExtentsIndexType current_extent)
+{
+ check_lower_bound(user_index, current_extent,
+ std::integral_constant<bool, std::is_signed<ExtentsIndexType>::value>{});
+ check_upper_bound(user_index, current_extent);
+ return true;
+}
+
+template<size_t ... RankIndices,
+ class ExtentsIndexType, size_t ... Exts,
+ class ... Indices>
+MDSPAN_INLINE_FUNCTION
+constexpr void
+check_all_indices_helper(std::index_sequence<RankIndices...>,
+ const extents<ExtentsIndexType, Exts...>& exts,
+ Indices... indices)
+{
+ // Suppress warning about statement has no effect
+ (void) _MDSPAN_FOLD_AND(
+ (check_one_index(indices, exts.extent(RankIndices)))
+ );
+}
+
+template<class ExtentsIndexType, size_t ... Exts,
+ class ... Indices>
+MDSPAN_INLINE_FUNCTION
+constexpr void
+check_all_indices(const extents<ExtentsIndexType, Exts...>& exts,
+ Indices... indices)
+{
+ check_all_indices_helper(std::make_index_sequence<sizeof...(Indices)>(),
+ exts, indices...);
+}
+
+} // namespace detail
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+struct full_extent_t { explicit full_extent_t() = default; };
+
+_MDSPAN_INLINE_VARIABLE constexpr auto full_extent = full_extent_t{ };
+
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+#include "trait_backports.hpp"
+#include "extents.hpp"
+#include "layout_stride.hpp"
+#include "utility.hpp"
+#if MDSPAN_HAS_CXX_17
+#include "../__p2642_bits/layout_padded_fwd.hpp"
+#endif
+#include <type_traits>
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+//==============================================================================
+
+template <class Extents>
+class layout_left::mapping {
+ public:
+ using extents_type = Extents;
+ using index_type = typename extents_type::index_type;
+ using size_type = typename extents_type::size_type;
+ using rank_type = typename extents_type::rank_type;
+ using layout_type = layout_left;
+ private:
+
+ static_assert(detail::__is_extents_v<extents_type>,
+ MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::layout_left::mapping must be instantiated with a specialization of " MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::extents.");
+
+ template <class>
+ friend class mapping;
+
+ // i0+(i1 + E(1)*(i2 + E(2)*i3))
+ template <size_t r, size_t Rank>
+ struct __rank_count {};
+
+ template <size_t r, size_t Rank, class I, class... Indices>
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __compute_offset(
+ __rank_count<r,Rank>, const I& i, Indices... idx) const {
+ return __compute_offset(__rank_count<r+1,Rank>(), idx...) *
+ __extents.extent(r) + i;
+ }
+
+ template<class I>
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __compute_offset(
+ __rank_count<extents_type::rank()-1,extents_type::rank()>, const I& i) const {
+ return i;
+ }
+
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __compute_offset(__rank_count<0,0>) const { return 0; }
+
+ public:
+
+ //--------------------------------------------------------------------------------
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping() noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(mapping const&) noexcept = default;
+
+ _MDSPAN_HOST_DEVICE
+ constexpr mapping(extents_type const& __exts) noexcept
+ :__extents(__exts)
+ { }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, OtherExtents)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT((!std::is_convertible<OtherExtents, extents_type>::value)) // needs two () due to comma
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14
+ mapping(mapping<OtherExtents> const& other) noexcept // NOLINT(google-explicit-constructor)
+ :__extents(other.extents())
+ {
+ /*
+ * TODO: check precondition
+ * other.required_span_size() is a representable value of type index_type
+ */
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, OtherExtents) &&
+ (extents_type::rank() <= 1)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT((!std::is_convertible<OtherExtents, extents_type>::value)) // needs two () due to comma
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14
+ mapping(layout_right::mapping<OtherExtents> const& other) noexcept // NOLINT(google-explicit-constructor)
+ :__extents(other.extents())
+ {
+ /*
+ * TODO: check precondition
+ * other.required_span_size() is a representable value of type index_type
+ */
+ }
+
+#if MDSPAN_HAS_CXX_17
+ /**
+ * Converting constructor from `layout_left_padded::mapping`.
+ *
+ * This overload participates in overload resolution only if _Mapping is a layout_left_padded mapping and
+ * extents_type is constructible from _Mapping::extents_type.
+ *
+ * \note There is currently a difference from p2642r2, where this function is specified as taking
+ * `layout_left_padded< padding_value >::mapping< Extents>`. However, this makes `padding_value` non-deducible.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (
+ MDSPAN_IMPL_PROPOSED_NAMESPACE::detail::is_layout_left_padded_mapping<_Mapping>::value
+ && std::is_constructible_v<extents_type, typename _Mapping::extents_type>
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT((!std::is_convertible_v<typename _Mapping::extents_type, extents_type>))
+ mapping(const _Mapping& __other) noexcept
+ : __extents(__other.extents())
+ {
+ MDSPAN_IMPL_PROPOSED_NAMESPACE::detail::
+ check_padded_layout_converting_constructor_mandates<
+ extents_type, _Mapping>(detail::with_rank<extents_type::rank()>{});
+ MDSPAN_IMPL_PROPOSED_NAMESPACE::detail::
+ check_padded_layout_converting_constructor_preconditions<
+ extents_type>(detail::with_rank<extents_type::rank()>{}, __other);
+ }
+#endif
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, OtherExtents)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0))
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14
+ mapping(layout_stride::mapping<OtherExtents> const& other) noexcept // NOLINT(google-explicit-constructor)
+ :__extents(other.extents())
+ {
+ /*
+ * TODO: check precondition
+ * other.required_span_size() is a representable value of type index_type
+ */
+ detail::validate_strides(detail::with_rank<extents_type::rank()>{}, layout_left{}, __extents, other);
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED _MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default;
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr const extents_type& extents() const noexcept {
+ return __extents;
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type required_span_size() const noexcept {
+ index_type value = 1;
+ for(rank_type r=0; r<extents_type::rank(); r++) value*=__extents.extent(r);
+ return value;
+ }
+
+ //--------------------------------------------------------------------------------
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... Indices,
+ /* requires */ (
+ (sizeof...(Indices) == extents_type::rank()) &&
+ (detail::are_valid_indices<index_type, Indices...>())
+ )
+ )
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type operator()(Indices... idxs) const noexcept {
+#if ! defined(NDEBUG)
+ detail::check_all_indices(this->extents(), idxs...);
+#endif // ! NDEBUG
+ return __compute_offset(__rank_count<0, extents_type::rank()>(), static_cast<index_type>(idxs)...);
+ }
+
+
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept { return true; }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_unique() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_exhaustive() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_strided() noexcept { return true; }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type stride(rank_type i) const noexcept
+#if MDSPAN_HAS_CXX_20
+ requires ( Extents::rank() > 0 )
+#endif
+ {
+ index_type value = 1;
+ for(rank_type r=0; r<i; r++) value*=__extents.extent(r);
+ return value;
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ ( Extents::rank() == OtherExtents::rank())
+ )
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator==(mapping const& lhs, mapping<OtherExtents> const& rhs) noexcept {
+ return lhs.extents() == rhs.extents();
+ }
+
+ // In C++ 20 the not equal exists if equal is found
+#if !(MDSPAN_HAS_CXX_20)
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ ( Extents::rank() == OtherExtents::rank())
+ )
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator!=(mapping const& lhs, mapping<OtherExtents> const& rhs) noexcept {
+ return lhs.extents() != rhs.extents();
+ }
+#endif
+
+ // Not really public, but currently needed to implement fully constexpr useable submdspan:
+ template<size_t N, class SizeType, size_t ... E, size_t ... Idx>
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type __get_stride(MDSPAN_IMPL_STANDARD_NAMESPACE::extents<SizeType, E...>,std::integer_sequence<size_t, Idx...>) const {
+ return _MDSPAN_FOLD_TIMES_RIGHT((Idx<N? __extents.template __extent<Idx>():1),1);
+ }
+ template<size_t N>
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type __stride() const noexcept {
+ return __get_stride<N>(__extents, std::make_index_sequence<extents_type::rank()>());
+ }
+
+private:
+ _MDSPAN_NO_UNIQUE_ADDRESS extents_type __extents{};
+
+ // [mdspan.submdspan.mapping], submdspan mapping specialization
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ constexpr auto submdspan_mapping_impl(
+ SliceSpecifiers... slices) const;
+
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr auto submdspan_mapping(
+ const mapping& src, SliceSpecifiers... slices) {
+ return src.submdspan_mapping_impl(slices...);
+ }
+};
+
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+#include "trait_backports.hpp"
+#include "extents.hpp"
+#include "layout_stride.hpp"
+#include "utility.hpp"
+#if MDSPAN_HAS_CXX_17
+#include "../__p2642_bits/layout_padded_fwd.hpp"
+#endif
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+//==============================================================================
+template <class Extents>
+class layout_right::mapping {
+ public:
+ using extents_type = Extents;
+ using index_type = typename extents_type::index_type;
+ using size_type = typename extents_type::size_type;
+ using rank_type = typename extents_type::rank_type;
+ using layout_type = layout_right;
+ private:
+
+ static_assert(detail::__is_extents_v<extents_type>,
+ MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::layout_right::mapping must be instantiated with a specialization of " MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::extents.");
+
+ template <class>
+ friend class mapping;
+
+ // i0+(i1 + E(1)*(i2 + E(2)*i3))
+ template <size_t r, size_t Rank>
+ struct __rank_count {};
+
+ template <size_t r, size_t Rank, class I, class... Indices>
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __compute_offset(
+ index_type offset, __rank_count<r,Rank>, const I& i, Indices... idx) const {
+ return __compute_offset(offset * __extents.extent(r) + i,__rank_count<r+1,Rank>(), idx...);
+ }
+
+ template<class I, class ... Indices>
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __compute_offset(
+ __rank_count<0,extents_type::rank()>, const I& i, Indices... idx) const {
+ return __compute_offset(i,__rank_count<1,extents_type::rank()>(),idx...);
+ }
+
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __compute_offset(size_t offset, __rank_count<extents_type::rank(), extents_type::rank()>) const {
+ return static_cast<index_type>(offset);
+ }
+
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __compute_offset(__rank_count<0,0>) const { return 0; }
+
+ public:
+
+ //--------------------------------------------------------------------------------
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping() noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(mapping const&) noexcept = default;
+
+ _MDSPAN_HOST_DEVICE
+ constexpr mapping(extents_type const& __exts) noexcept
+ :__extents(__exts)
+ { }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, OtherExtents)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT((!std::is_convertible<OtherExtents, extents_type>::value)) // needs two () due to comma
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14
+ mapping(mapping<OtherExtents> const& other) noexcept // NOLINT(google-explicit-constructor)
+ :__extents(other.extents())
+ {
+ /*
+ * TODO: check precondition
+ * other.required_span_size() is a representable value of type index_type
+ */
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, OtherExtents) &&
+ (extents_type::rank() <= 1)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT((!std::is_convertible<OtherExtents, extents_type>::value)) // needs two () due to comma
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14
+ mapping(layout_left::mapping<OtherExtents> const& other) noexcept // NOLINT(google-explicit-constructor)
+ :__extents(other.extents())
+ {
+ /*
+ * TODO: check precondition
+ * other.required_span_size() is a representable value of type index_type
+ */
+ }
+
+ /**
+ * Converting constructor from `layout_right_padded::mapping`.
+ *
+ * This overload participates in overload resolution only if _Mapping is a layout_right_padded mapping and
+ * extents_type is constructible from _Mapping::extents_type.
+ *
+ * \note There is currently a difference from p2642r2, where this function is specified as taking
+ * `layout_right_padded< padding_value >::mapping< Extents>`. However, this makes `padding_value` non-deducible.
+ */
+#if MDSPAN_HAS_CXX_17
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (
+ MDSPAN_IMPL_PROPOSED_NAMESPACE::detail::is_layout_right_padded_mapping<_Mapping>::value
+ && std::is_constructible_v<extents_type, typename _Mapping::extents_type>))
+ MDSPAN_CONDITIONAL_EXPLICIT((!std::is_convertible_v<typename _Mapping::extents_type, extents_type>))
+ mapping(const _Mapping &__other) noexcept
+ : __extents(__other.extents())
+ {
+ MDSPAN_IMPL_PROPOSED_NAMESPACE::detail::
+ check_padded_layout_converting_constructor_mandates<
+ extents_type, _Mapping>(detail::with_rank<extents_type::rank()>{});
+ MDSPAN_IMPL_PROPOSED_NAMESPACE::detail::
+ check_padded_layout_converting_constructor_preconditions<
+ extents_type>(detail::with_rank<extents_type::rank()>{}, __other);
+ }
+#endif
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, OtherExtents)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0))
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14
+ mapping(layout_stride::mapping<OtherExtents> const& other) noexcept // NOLINT(google-explicit-constructor)
+ :__extents(other.extents())
+ {
+ /*
+ * TODO: check precondition
+ * other.required_span_size() is a representable value of type index_type
+ */
+ detail::validate_strides(detail::with_rank<extents_type::rank()>{}, layout_right{}, __extents, other);
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED _MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default;
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr const extents_type& extents() const noexcept {
+ return __extents;
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type required_span_size() const noexcept {
+ index_type value = 1;
+ for(rank_type r=0; r != extents_type::rank(); ++r) value*=__extents.extent(r);
+ return value;
+ }
+
+ //--------------------------------------------------------------------------------
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class ... Indices,
+ /* requires */ (
+ (sizeof...(Indices) == extents_type::rank()) &&
+ (detail::are_valid_indices<index_type, Indices...>())
+ )
+ )
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type operator()(Indices... idxs) const noexcept {
+#if ! defined(NDEBUG)
+ detail::check_all_indices(this->extents(), idxs...);
+#endif // ! NDEBUG
+ return __compute_offset(__rank_count<0, extents_type::rank()>(), static_cast<index_type>(idxs)...);
+ }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_unique() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_exhaustive() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_strided() noexcept { return true; }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type stride(rank_type i) const noexcept
+#if MDSPAN_HAS_CXX_20
+ requires ( Extents::rank() > 0 )
+#endif
+ {
+ index_type value = 1;
+ for(rank_type r=extents_type::rank()-1; r>i; r--) value*=__extents.extent(r);
+ return value;
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ ( Extents::rank() == OtherExtents::rank())
+ )
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator==(mapping const& lhs, mapping<OtherExtents> const& rhs) noexcept {
+ return lhs.extents() == rhs.extents();
+ }
+
+ // In C++ 20 the not equal exists if equal is found
+#if !(MDSPAN_HAS_CXX_20)
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (Extents::rank() == OtherExtents::rank())
+ )
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator!=(mapping const& lhs, mapping<OtherExtents> const& rhs) noexcept {
+ return lhs.extents() != rhs.extents();
+ }
+#endif
+
+ // Not really public, but currently needed to implement fully constexpr useable submdspan:
+ template<size_t N, class SizeType, size_t ... E, size_t ... Idx>
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type __get_stride(MDSPAN_IMPL_STANDARD_NAMESPACE::extents<SizeType, E...>,std::integer_sequence<size_t, Idx...>) const {
+ return _MDSPAN_FOLD_TIMES_RIGHT((Idx>N? __extents.template __extent<Idx>():1),1);
+ }
+ template<size_t N>
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type __stride() const noexcept {
+ return __get_stride<N>(__extents, std::make_index_sequence<extents_type::rank()>());
+ }
+
+private:
+ _MDSPAN_NO_UNIQUE_ADDRESS extents_type __extents{};
+
+ // [mdspan.submdspan.mapping], submdspan mapping specialization
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ constexpr auto submdspan_mapping_impl(
+ SliceSpecifiers... slices) const;
+
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr auto submdspan_mapping(
+ const mapping& src, SliceSpecifiers... slices) {
+ return src.submdspan_mapping_impl(slices...);
+ }
+};
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+#include "extents.hpp"
+#include "trait_backports.hpp"
+#include "compressed_pair.hpp"
+#include "utility.hpp"
+
+#if !defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+# include "no_unique_address.hpp"
+#endif
+
+#include <array>
+#include <type_traits>
+#include <utility>
+
+#ifdef __cpp_lib_span
+#include <span>
+#endif
+#if defined(_MDSPAN_USE_CONCEPTS) && MDSPAN_HAS_CXX_20 && defined(__cpp_lib_concepts)
+# include <concepts>
+#endif
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+struct layout_left {
+ template<class Extents>
+ class mapping;
+};
+struct layout_right {
+ template<class Extents>
+ class mapping;
+};
+
+namespace detail {
+ template<class Layout, class Mapping>
+ constexpr bool __is_mapping_of =
+ std::is_same<typename Layout::template mapping<typename Mapping::extents_type>, Mapping>::value;
+
+#if defined(_MDSPAN_USE_CONCEPTS) && MDSPAN_HAS_CXX_20
+# if !defined(__cpp_lib_concepts)
+ namespace internal {
+ namespace detail {
+ template <typename _Tp, typename _Up>
+ concept __same_as = std::is_same_v<_Tp, _Up>;
+ } // namespace detail
+ template <class T, class U>
+ concept __same_as = detail::__same_as<T, U> && detail::__same_as<U, T>;
+ } // namespace internal
+# endif
+
+ template<class M>
+ concept __layout_mapping_alike = requires {
+ requires __is_extents<typename M::extents_type>::value;
+#if defined(__cpp_lib_concepts)
+ { M::is_always_strided() } -> std::same_as<bool>;
+ { M::is_always_exhaustive() } -> std::same_as<bool>;
+ { M::is_always_unique() } -> std::same_as<bool>;
+#else
+ { M::is_always_strided() } -> internal::__same_as<bool>;
+ { M::is_always_exhaustive() } -> internal::__same_as<bool>;
+ { M::is_always_unique() } -> internal::__same_as<bool>;
+#endif
+ std::bool_constant<M::is_always_strided()>::value;
+ std::bool_constant<M::is_always_exhaustive()>::value;
+ std::bool_constant<M::is_always_unique()>::value;
+ };
+#endif
+
+} // namespace detail
+
+struct layout_stride {
+ template <class Extents>
+ class mapping
+#if !defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ : private detail::__no_unique_address_emulation<
+ detail::__compressed_pair<
+ Extents,
+ detail::possibly_empty_array<typename Extents::index_type, Extents::rank()>
+ >
+ >
+#endif
+ {
+ public:
+ using extents_type = Extents;
+ using index_type = typename extents_type::index_type;
+ using size_type = typename extents_type::size_type;
+ using rank_type = typename extents_type::rank_type;
+ using layout_type = layout_stride;
+
+ // This could be a `requires`, but I think it's better and clearer as a `static_assert`.
+ static_assert(detail::__is_extents_v<Extents>,
+ MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::layout_stride::mapping must be instantiated with a specialization of " MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::extents.");
+
+
+ private:
+
+ //----------------------------------------------------------------------------
+
+ using __strides_storage_t = detail::possibly_empty_array<index_type, extents_type::rank()>;
+ using __member_pair_t = detail::__compressed_pair<extents_type, __strides_storage_t>;
+
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ _MDSPAN_NO_UNIQUE_ADDRESS __member_pair_t __members;
+#else
+ using __base_t = detail::__no_unique_address_emulation<__member_pair_t>;
+#endif
+
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr __strides_storage_t const&
+ __strides_storage() const noexcept {
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ return __members.__second();
+#else
+ return this->__base_t::__ref().__second();
+#endif
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 __strides_storage_t&
+ __strides_storage() noexcept {
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ return __members.__second();
+#else
+ return this->__base_t::__ref().__second();
+#endif
+ }
+
+ template<class SizeType, size_t ... Ep, size_t ... Idx>
+ _MDSPAN_HOST_DEVICE
+ constexpr index_type __get_size(::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<SizeType, Ep...>,std::integer_sequence<size_t, Idx...>) const {
+ return _MDSPAN_FOLD_TIMES_RIGHT( static_cast<index_type>(extents().extent(Idx)), 1 );
+ }
+
+ //----------------------------------------------------------------------------
+
+ template <class>
+ friend class mapping;
+
+ //----------------------------------------------------------------------------
+
+ // Workaround for non-deducibility of the index sequence template parameter if it's given at the top level
+ template <class>
+ struct __deduction_workaround;
+
+ template <size_t... Idxs>
+ struct __deduction_workaround<std::index_sequence<Idxs...>>
+ {
+ template <class OtherExtents>
+ MDSPAN_INLINE_FUNCTION
+ static constexpr bool _eq_impl(mapping const& self, mapping<OtherExtents> const& other) noexcept {
+ using common_t = std::common_type_t<index_type, typename OtherExtents::index_type>;
+ return _MDSPAN_FOLD_AND((static_cast<common_t>(self.stride(Idxs)) == static_cast<common_t>(other.stride(Idxs))) /* && ... */)
+ && _MDSPAN_FOLD_AND((static_cast<common_t>(self.extents().extent(Idxs)) == static_cast<common_t>(other.extents().extent(Idxs))) /* || ... */);
+ }
+ template <class OtherExtents>
+ MDSPAN_INLINE_FUNCTION
+ static constexpr bool _not_eq_impl(mapping const& self, mapping<OtherExtents> const& other) noexcept {
+ using common_t = std::common_type_t<index_type, typename OtherExtents::index_type>;
+ return _MDSPAN_FOLD_OR((static_cast<common_t>(self.stride(Idxs)) != static_cast<common_t>(other.stride(Idxs))) /* || ... */)
+ || _MDSPAN_FOLD_OR((static_cast<common_t>(self.extents().extent(Idxs)) != static_cast<common_t>(other.extents().extent(Idxs))) /* || ... */);
+ }
+
+ template <class... Integral>
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr size_t _call_op_impl(mapping const& self, Integral... idxs) noexcept {
+ return _MDSPAN_FOLD_PLUS_RIGHT((idxs * self.stride(Idxs)), /* + ... + */ 0);
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ static constexpr size_t _req_span_size_impl(mapping const& self) noexcept {
+ // assumes no negative strides; not sure if I'm allowed to assume that or not
+ return __impl::_call_op_impl(self, (self.extents().template __extent<Idxs>() - 1)...) + 1;
+ }
+
+ template<class OtherMapping>
+ MDSPAN_INLINE_FUNCTION
+ static constexpr const __strides_storage_t fill_strides(const OtherMapping& map) {
+ return __strides_storage_t{static_cast<index_type>(map.stride(Idxs))...};
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ static constexpr const __strides_storage_t& fill_strides(const __strides_storage_t& s) {
+ return s;
+ }
+
+ template<class IntegralType>
+ static constexpr const __strides_storage_t fill_strides(const std::array<IntegralType,extents_type::rank()>& s) {
+ return __strides_storage_t{static_cast<index_type>(s[Idxs])...};
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class IntegralType,
+ (std::is_convertible<IntegralType, typename extents_type::index_type>::value)
+ )
+ MDSPAN_INLINE_FUNCTION
+ // Need to avoid zero length c-array
+ static constexpr const __strides_storage_t fill_strides(mdspan_non_standard_tag, const IntegralType (&s)[extents_type::rank()>0?extents_type::rank():1]) {
+ return __strides_storage_t{static_cast<index_type>(s[Idxs])...};
+ }
+
+#ifdef __cpp_lib_span
+ template<class IntegralType>
+ static constexpr const __strides_storage_t fill_strides(const std::span<IntegralType,extents_type::rank()>& s) {
+ return __strides_storage_t{static_cast<index_type>(s[Idxs])...};
+ }
+#endif
+
+ MDSPAN_INLINE_FUNCTION
+ static constexpr std::array<index_type, extents_type::rank()> return_strides(const __strides_storage_t& s) {
+ return std::array<index_type, extents_type::rank()>{s[Idxs]...};
+ }
+
+ template<size_t K>
+ MDSPAN_INLINE_FUNCTION
+ static constexpr size_t __return_zero() { return 0; }
+
+ template<class Mapping>
+ MDSPAN_INLINE_FUNCTION
+ static constexpr typename Mapping::index_type
+ __OFFSET(const Mapping& m) { return m(__return_zero<Idxs>()...); }
+ };
+
+ // Can't use defaulted parameter in the __deduction_workaround template because of a bug in MSVC warning C4348.
+ using __impl = __deduction_workaround<std::make_index_sequence<Extents::rank()>>;
+
+ MDSPAN_FUNCTION
+ static constexpr __strides_storage_t strides_storage(detail::with_rank<0>) {
+ return {};
+ }
+
+ template <std::size_t N>
+ MDSPAN_FUNCTION
+ static constexpr __strides_storage_t strides_storage(detail::with_rank<N>) {
+ __strides_storage_t s{};
+
+ extents_type e;
+ index_type stride = 1;
+ for(int r = static_cast<int>(extents_type::rank() - 1); r >= 0; r--) {
+ s[r] = stride;
+ stride *= e.extent(r);
+ }
+
+ return s;
+ }
+
+ //----------------------------------------------------------------------------
+
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ MDSPAN_INLINE_FUNCTION constexpr explicit
+ mapping(__member_pair_t&& __m) : __members(::std::move(__m)) {}
+#else
+ MDSPAN_INLINE_FUNCTION constexpr explicit
+ mapping(__base_t&& __b) : __base_t(::std::move(__b)) {}
+#endif
+
+ public:
+
+ //--------------------------------------------------------------------------------
+
+ MDSPAN_INLINE_FUNCTION constexpr mapping() noexcept
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ : __members{
+#else
+ : __base_t(__base_t{__member_pair_t(
+#endif
+ extents_type(),
+ __strides_storage_t(strides_storage(detail::with_rank<extents_type::rank()>{}))
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ }
+#else
+ )})
+#endif
+ {}
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(mapping const&) noexcept = default;
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class IntegralTypes,
+ /* requires */ (
+ // MSVC 19.32 does not like using index_type here, requires the typename Extents::index_type
+ // error C2641: cannot deduce template arguments for 'MDSPAN_IMPL_STANDARD_NAMESPACE::layout_stride::mapping'
+ _MDSPAN_TRAIT(std::is_convertible, const std::remove_const_t<IntegralTypes>&, typename Extents::index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, typename Extents::index_type, const std::remove_const_t<IntegralTypes>&)
+ )
+ )
+ constexpr
+ mapping(
+ extents_type const& e,
+ std::array<IntegralTypes, extents_type::rank()> const& s
+ ) noexcept
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ : __members{
+#else
+ : __base_t(__base_t{__member_pair_t(
+#endif
+ e, __strides_storage_t(__impl::fill_strides(s))
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ }
+#else
+ )})
+#endif
+ {
+ /*
+ * TODO: check preconditions
+ * - s[i] > 0 is true for all i in the range [0, rank_ ).
+ * - REQUIRED-SPAN-SIZE(e, s) is a representable value of type index_type ([basic.fundamental]).
+ * - If rank_ is greater than 0, then there exists a permutation P of the integers in the
+ * range [0, rank_), such that s[ pi ] >= s[ pi − 1 ] * e.extent( pi − 1 ) is true for
+ * all i in the range [1, rank_ ), where pi is the ith element of P.
+ */
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class IntegralTypes,
+ /* requires */ (
+ // MSVC 19.32 does not like using index_type here, requires the typename Extents::index_type
+ // error C2641: cannot deduce template arguments for 'MDSPAN_IMPL_STANDARD_NAMESPACE::layout_stride::mapping'
+ _MDSPAN_TRAIT(std::is_convertible, const std::remove_const_t<IntegralTypes>&, typename Extents::index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, typename Extents::index_type, const std::remove_const_t<IntegralTypes>&)
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr
+ mapping(
+ mdspan_non_standard_tag,
+ extents_type const& e,
+ // Need to avoid zero-length c-array
+ const IntegralTypes (&s)[extents_type::rank()>0?extents_type::rank():1]
+ ) noexcept
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ : __members{
+#else
+ : __base_t(__base_t{__member_pair_t(
+#endif
+ e, __strides_storage_t(__impl::fill_strides(mdspan_non_standard, s))
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ }
+#else
+ )})
+#endif
+ {
+ /*
+ * TODO: check preconditions
+ * - s[i] > 0 is true for all i in the range [0, rank_ ).
+ * - REQUIRED-SPAN-SIZE(e, s) is a representable value of type index_type ([basic.fundamental]).
+ * - If rank_ is greater than 0, then there exists a permutation P of the integers in the
+ * range [0, rank_), such that s[ pi ] >= s[ pi − 1 ] * e.extent( pi − 1 ) is true for
+ * all i in the range [1, rank_ ), where pi is the ith element of P.
+ */
+ }
+
+#ifdef __cpp_lib_span
+ MDSPAN_TEMPLATE_REQUIRES(
+ class IntegralTypes,
+ /* requires */ (
+ // MSVC 19.32 does not like using index_type here, requires the typename Extents::index_type
+ // error C2641: cannot deduce template arguments for 'MDSPAN_IMPL_STANDARD_NAMESPACE::layout_stride::mapping'
+ _MDSPAN_TRAIT(std::is_convertible, const std::remove_const_t<IntegralTypes>&, typename Extents::index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, typename Extents::index_type, const std::remove_const_t<IntegralTypes>&)
+ )
+ )
+ constexpr
+ mapping(
+ extents_type const& e,
+ std::span<IntegralTypes, extents_type::rank()> const& s
+ ) noexcept
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ : __members{
+#else
+ : __base_t(__base_t{__member_pair_t(
+#endif
+ e, __strides_storage_t(__impl::fill_strides(s))
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ }
+#else
+ )})
+#endif
+ {
+ /*
+ * TODO: check preconditions
+ * - s[i] > 0 is true for all i in the range [0, rank_ ).
+ * - REQUIRED-SPAN-SIZE(e, s) is a representable value of type index_type ([basic.fundamental]).
+ * - If rank_ is greater than 0, then there exists a permutation P of the integers in the
+ * range [0, rank_), such that s[ pi ] >= s[ pi − 1 ] * e.extent( pi − 1 ) is true for
+ * all i in the range [1, rank_ ), where pi is the ith element of P.
+ */
+ }
+#endif // __cpp_lib_span
+
+#if !(defined(_MDSPAN_USE_CONCEPTS) && MDSPAN_HAS_CXX_20)
+ MDSPAN_TEMPLATE_REQUIRES(
+ class StridedLayoutMapping,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, typename StridedLayoutMapping::extents_type) &&
+ detail::__is_mapping_of<typename StridedLayoutMapping::layout_type, StridedLayoutMapping> &&
+ StridedLayoutMapping::is_always_unique() &&
+ StridedLayoutMapping::is_always_strided()
+ )
+ )
+#else
+ template<class StridedLayoutMapping>
+ requires(
+ detail::__layout_mapping_alike<StridedLayoutMapping> &&
+ _MDSPAN_TRAIT(std::is_constructible, extents_type, typename StridedLayoutMapping::extents_type) &&
+ StridedLayoutMapping::is_always_unique() &&
+ StridedLayoutMapping::is_always_strided()
+ )
+#endif
+ MDSPAN_CONDITIONAL_EXPLICIT(
+ !(std::is_convertible<typename StridedLayoutMapping::extents_type, extents_type>::value &&
+ (detail::__is_mapping_of<layout_left, StridedLayoutMapping> ||
+ detail::__is_mapping_of<layout_right, StridedLayoutMapping> ||
+ detail::__is_mapping_of<layout_stride, StridedLayoutMapping>))
+ ) // needs two () due to comma
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14
+ mapping(StridedLayoutMapping const& other) noexcept // NOLINT(google-explicit-constructor)
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ : __members{
+#else
+ : __base_t(__base_t{__member_pair_t(
+#endif
+ other.extents(), __strides_storage_t(__impl::fill_strides(other))
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ }
+#else
+ )})
+#endif
+ {
+ /*
+ * TODO: check preconditions
+ * - other.stride(i) > 0 is true for all i in the range [0, rank_ ).
+ * - other.required_span_size() is a representable value of type index_type ([basic.fundamental]).
+ * - OFFSET(other) == 0
+ */
+ }
+
+ //--------------------------------------------------------------------------------
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED _MDSPAN_CONSTEXPR_14_DEFAULTED
+ mapping& operator=(mapping const&) noexcept = default;
+
+ MDSPAN_INLINE_FUNCTION constexpr const extents_type& extents() const noexcept {
+#if defined(_MDSPAN_USE_ATTRIBUTE_NO_UNIQUE_ADDRESS)
+ return __members.__first();
+#else
+ return this->__base_t::__ref().__first();
+#endif
+ };
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr std::array< index_type, extents_type::rank() > strides() const noexcept {
+ return __impl::return_strides(__strides_storage());
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type required_span_size() const noexcept {
+ index_type span_size = 1;
+ // using int here to avoid warning about pointless comparison to 0
+ for(int r = 0; r < static_cast<int>(extents_type::rank()); r++) {
+ // Return early if any of the extents are zero
+ if(extents().extent(r)==0) return 0;
+ span_size += ( static_cast<index_type>(extents().extent(r) - 1 ) * __strides_storage()[r]);
+ }
+ return span_size;
+ }
+
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... Indices,
+ /* requires */ (
+ sizeof...(Indices) == Extents::rank() &&
+ (detail::are_valid_indices<index_type, Indices...>())
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr index_type operator()(Indices... idxs) const noexcept {
+#if ! defined(NDEBUG)
+ detail::check_all_indices(this->extents(), idxs...);
+#endif // ! NDEBUG
+ return static_cast<index_type>(__impl::_call_op_impl(*this, static_cast<index_type>(idxs)...));
+ }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept { return true; }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept {
+ return false;
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept { return true; }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_unique() noexcept { return true; }
+
+ private:
+ MDSPAN_INLINE_FUNCTION
+ constexpr bool exhaustive_for_nonzero_span_size() const
+ {
+ return required_span_size() == __get_size(extents(), std::make_index_sequence<extents_type::rank()>());
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr bool is_exhaustive_impl(detail::with_rank<0>) const
+ {
+ return true;
+ }
+ MDSPAN_INLINE_FUNCTION
+ constexpr bool is_exhaustive_impl(detail::with_rank<1>) const
+ {
+ if (required_span_size() != static_cast<index_type>(0)) {
+ return exhaustive_for_nonzero_span_size();
+ }
+ return stride(0) == 1;
+ }
+ template <std::size_t N>
+ MDSPAN_INLINE_FUNCTION
+ constexpr bool is_exhaustive_impl(detail::with_rank<N>) const
+ {
+ if (required_span_size() != static_cast<index_type>(0)) {
+ return exhaustive_for_nonzero_span_size();
+ }
+
+ rank_type r_largest = 0;
+ for (rank_type r = 1; r < extents_type::rank(); r++) {
+ if (stride(r) > stride(r_largest)) {
+ r_largest = r;
+ }
+ }
+ for (rank_type r = 0; r < extents_type::rank(); r++) {
+ if (extents().extent(r) == 0 && r != r_largest) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public:
+ MDSPAN_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 bool is_exhaustive() const noexcept {
+ return is_exhaustive_impl(detail::with_rank<extents_type::rank()>{});
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_strided() noexcept { return true; }
+
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type stride(rank_type r) const noexcept {
+ return __strides_storage()[r];
+ }
+
+#if !(defined(_MDSPAN_USE_CONCEPTS) && MDSPAN_HAS_CXX_20)
+ MDSPAN_TEMPLATE_REQUIRES(
+ class StridedLayoutMapping,
+ /* requires */ (
+ detail::__is_mapping_of<typename StridedLayoutMapping::layout_type, StridedLayoutMapping> &&
+ (extents_type::rank() == StridedLayoutMapping::extents_type::rank()) &&
+ StridedLayoutMapping::is_always_strided()
+ )
+ )
+#else
+ template<class StridedLayoutMapping>
+ requires(
+ detail::__layout_mapping_alike<StridedLayoutMapping> &&
+ (extents_type::rank() == StridedLayoutMapping::extents_type::rank()) &&
+ StridedLayoutMapping::is_always_strided()
+ )
+#endif
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator==(const mapping& x, const StridedLayoutMapping& y) noexcept {
+ return (x.extents() == y.extents()) &&
+ (__impl::__OFFSET(y) == static_cast<typename StridedLayoutMapping::index_type>(0)) &&
+ detail::rankwise_equal(detail::with_rank<extents_type::rank()>{}, x, y, detail::stride);
+ }
+
+ // This one is not technically part of the proposal. Just here to make implementation a bit more optimal hopefully
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ (extents_type::rank() == OtherExtents::rank())
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator==(mapping const& lhs, mapping<OtherExtents> const& rhs) noexcept {
+ return __impl::_eq_impl(lhs, rhs);
+ }
+
+#if !MDSPAN_HAS_CXX_20
+ MDSPAN_TEMPLATE_REQUIRES(
+ class StridedLayoutMapping,
+ /* requires */ (
+ detail::__is_mapping_of<typename StridedLayoutMapping::layout_type, StridedLayoutMapping> &&
+ (extents_type::rank() == StridedLayoutMapping::extents_type::rank()) &&
+ StridedLayoutMapping::is_always_strided()
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator!=(const mapping& x, const StridedLayoutMapping& y) noexcept {
+ return !(x == y);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherExtents,
+ /* requires */ (
+ (extents_type::rank() == OtherExtents::rank())
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr bool operator!=(mapping const& lhs, mapping<OtherExtents> const& rhs) noexcept {
+ return __impl::_not_eq_impl(lhs, rhs);
+ }
+#endif
+
+ // [mdspan.submdspan.mapping], submdspan mapping specialization
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ constexpr auto submdspan_mapping_impl(
+ SliceSpecifiers... slices) const;
+
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr auto submdspan_mapping(
+ const mapping& src, SliceSpecifiers... slices) {
+ return src.submdspan_mapping_impl(slices...);
+ }
+ };
+};
+
+namespace detail {
+
+template <class Layout, class Extents, class Mapping>
+MDSPAN_INLINE_FUNCTION
+constexpr void validate_strides(with_rank<0>, Layout, const Extents&, const Mapping&)
+{}
+
+template <std::size_t N, class Layout, class Extents, class Mapping>
+MDSPAN_INLINE_FUNCTION
+constexpr void validate_strides(with_rank<N>, Layout, const Extents& ext, const Mapping& other)
+{
+ static_assert(std::is_same<typename Mapping::layout_type, layout_stride>::value &&
+ (std::is_same<Layout, layout_left>::value ||
+ std::is_same<Layout, layout_right>::value)
+ , "This function is only intended to validate construction of "
+ "a layout_left or layout_right mapping from a layout_stride mapping.");
+
+ constexpr auto is_left = std::is_same<Layout, layout_left>::value;
+
+ typename Extents::index_type expected_stride = 1;
+
+ for (std::size_t r = 0; r < N; r++) {
+ const std::size_t s = is_left ? r : N - 1 - r;
+
+ MDSPAN_IMPL_PRECONDITION(common_integral_compare(expected_stride, other.stride(s))
+ && "invalid strides for layout_{left,right}");
+
+ expected_stride *= ext.extent(s);
+ }
+}
+
+} // namespace detail
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+#include "config.hpp"
+
+#include <cstdio>
+#include <cstdlib>
+#include <type_traits> // std::is_void
+#if defined(_MDSPAN_HAS_CUDA) || defined(_MDSPAN_HAS_HIP) || defined(_MDSPAN_HAS_SYCL)
+#include "assert.h"
+#endif
+
+#ifndef _MDSPAN_HOST_DEVICE
+# if defined(_MDSPAN_HAS_CUDA) || defined(_MDSPAN_HAS_HIP)
+# define _MDSPAN_HOST_DEVICE __host__ __device__
+# else
+# define _MDSPAN_HOST_DEVICE
+# endif
+#endif
+
+#ifndef MDSPAN_FORCE_INLINE_FUNCTION
+# ifdef _MDSPAN_COMPILER_MSVC // Microsoft compilers
+# define MDSPAN_FORCE_INLINE_FUNCTION __forceinline _MDSPAN_HOST_DEVICE
+# else
+# define MDSPAN_FORCE_INLINE_FUNCTION __attribute__((always_inline)) _MDSPAN_HOST_DEVICE
+# endif
+#endif
+
+#ifndef MDSPAN_INLINE_FUNCTION
+# define MDSPAN_INLINE_FUNCTION inline _MDSPAN_HOST_DEVICE
+#endif
+
+#ifndef MDSPAN_FUNCTION
+# define MDSPAN_FUNCTION _MDSPAN_HOST_DEVICE
+#endif
+
+#ifdef _MDSPAN_HAS_HIP
+# define MDSPAN_DEDUCTION_GUIDE _MDSPAN_HOST_DEVICE
+#else
+# define MDSPAN_DEDUCTION_GUIDE
+#endif
+
+// In CUDA defaulted functions do not need host device markup
+#ifndef MDSPAN_INLINE_FUNCTION_DEFAULTED
+# define MDSPAN_INLINE_FUNCTION_DEFAULTED
+#endif
+
+//==============================================================================
+// <editor-fold desc="Preprocessor helpers"> {{{1
+
+#define MDSPAN_PP_COUNT(...) \
+ _MDSPAN_PP_INTERNAL_EXPAND_ARGS_PRIVATE( \
+ _MDSPAN_PP_INTERNAL_ARGS_AUGMENTER(__VA_ARGS__) \
+ )
+
+#define _MDSPAN_PP_INTERNAL_ARGS_AUGMENTER(...) unused, __VA_ARGS__
+#define _MDSPAN_PP_INTERNAL_EXPAND(x) x
+#define _MDSPAN_PP_INTERNAL_EXPAND_ARGS_PRIVATE(...) \
+ _MDSPAN_PP_INTERNAL_EXPAND( \
+ _MDSPAN_PP_INTERNAL_COUNT_PRIVATE( \
+ __VA_ARGS__, 69, 68, 67, 66, 65, 64, 63, 62, 61, \
+ 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, \
+ 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, \
+ 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, \
+ 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, \
+ 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 \
+ ) \
+ )
+# define _MDSPAN_PP_INTERNAL_COUNT_PRIVATE( \
+ _1_, _2_, _3_, _4_, _5_, _6_, _7_, _8_, _9_, \
+ _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, \
+ _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, \
+ _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, \
+ _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, \
+ _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, \
+ _60, _61, _62, _63, _64, _65, _66, _67, _68, _69, \
+ _70, count, ...) count \
+ /**/
+
+#define MDSPAN_PP_STRINGIFY_IMPL(x) #x
+#define MDSPAN_PP_STRINGIFY(x) MDSPAN_PP_STRINGIFY_IMPL(x)
+
+#define MDSPAN_PP_CAT_IMPL(x, y) x ## y
+#define MDSPAN_PP_CAT(x, y) MDSPAN_PP_CAT_IMPL(x, y)
+
+#define MDSPAN_PP_EVAL(X, ...) X(__VA_ARGS__)
+
+#define MDSPAN_PP_REMOVE_PARENS_IMPL(...) __VA_ARGS__
+#define MDSPAN_PP_REMOVE_PARENS(...) MDSPAN_PP_REMOVE_PARENS_IMPL __VA_ARGS__
+
+#define MDSPAN_IMPL_STANDARD_NAMESPACE_STRING MDSPAN_PP_STRINGIFY(MDSPAN_IMPL_STANDARD_NAMESPACE)
+#define MDSPAN_IMPL_PROPOSED_NAMESPACE_STRING MDSPAN_PP_STRINGIFY(MDSPAN_IMPL_STANDARD_NAMESPACE) "::" MDSPAN_PP_STRINGIFY(MDSPAN_IMPL_PROPOSED_NAMESPACE)
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace detail {
+
+#if defined(_MDSPAN_HAS_CUDA) || defined(_MDSPAN_HAS_HIP)
+MDSPAN_FUNCTION inline void default_precondition_violation_handler(const char* cond, const char* file, unsigned line)
+{
+ printf("%s:%u: precondition failure: `%s`\n", file, line, cond);
+ assert(0);
+}
+#elif defined(_MDSPAN_HAS_SYCL)
+MDSPAN_FUNCTION inline void default_precondition_violation_handler(const char* cond, const char* file, unsigned line)
+{
+ sycl::ext::oneapi::experimental::printf("%s:%u: precondition failure: `%s`\n", file, line, cond);
+ assert(0);
+}
+#else
+MDSPAN_FUNCTION inline void default_precondition_violation_handler(const char* cond, const char* file, unsigned line)
+{
+ std::fprintf(stderr, "%s:%u: precondition failure: `%s`\n", file, line, cond);
+ std::abort();
+}
+#endif
+
+} // namespace detail
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+#ifndef MDSPAN_IMPL_PRECONDITION_VIOLATION_HANDLER
+#define MDSPAN_IMPL_PRECONDITION_VIOLATION_HANDLER(cond, file, line) \
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::default_precondition_violation_handler(cond, file, line)
+#endif
+
+#ifndef MDSPAN_IMPL_CHECK_PRECONDITION
+ #ifndef NDEBUG
+ #define MDSPAN_IMPL_CHECK_PRECONDITION 0
+ #else
+ #define MDSPAN_IMPL_CHECK_PRECONDITION 1
+ #endif
+#endif
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace detail {
+
+template <bool check = MDSPAN_IMPL_CHECK_PRECONDITION>
+MDSPAN_FUNCTION constexpr void precondition(const char* cond, const char* file, unsigned line)
+{
+ if (!check) { return; }
+ // in case the macro doesn't use the arguments for custom macros
+ (void) cond;
+ (void) file;
+ (void) line;
+ MDSPAN_IMPL_PRECONDITION_VIOLATION_HANDLER(cond, file, line);
+}
+
+} // namespace detail
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+#define MDSPAN_IMPL_PRECONDITION(...) \
+ do { \
+ if (!(__VA_ARGS__)) { \
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::precondition(#__VA_ARGS__, __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+// </editor-fold> end Preprocessor helpers }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="Concept emulation"> {{{1
+
+// These compatibility macros don't help with partial ordering, but they should do the trick
+// for what we need to do with concepts in mdspan
+#ifdef _MDSPAN_USE_CONCEPTS
+# define MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) > requires REQ
+# define MDSPAN_FUNCTION_REQUIRES(PAREN_PREQUALS, FNAME, PAREN_PARAMS, QUALS, REQ) \
+ MDSPAN_PP_REMOVE_PARENS(PAREN_PREQUALS) FNAME PAREN_PARAMS QUALS requires REQ \
+ /**/
+#else
+# define MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) , typename ::std::enable_if<(REQ), int>::type = 0>
+# define MDSPAN_FUNCTION_REQUIRES(PAREN_PREQUALS, FNAME, PAREN_PARAMS, QUALS, REQ) \
+ MDSPAN_TEMPLATE_REQUIRES( \
+ class __function_requires_ignored=void, \
+ (std::is_void<__function_requires_ignored>::value && REQ) \
+ ) MDSPAN_PP_REMOVE_PARENS(PAREN_PREQUALS) FNAME PAREN_PARAMS QUALS \
+ /**/
+#endif
+
+#if defined(_MDSPAN_COMPILER_MSVC) && (!defined(_MSVC_TRADITIONAL) || _MSVC_TRADITIONAL)
+# define MDSPAN_TEMPLATE_REQUIRES(...) \
+ MDSPAN_PP_CAT( \
+ MDSPAN_PP_CAT(MDSPAN_TEMPLATE_REQUIRES_, MDSPAN_PP_COUNT(__VA_ARGS__))\
+ (__VA_ARGS__), \
+ ) \
+ /**/
+#else
+# define MDSPAN_TEMPLATE_REQUIRES(...) \
+ MDSPAN_PP_EVAL( \
+ MDSPAN_PP_CAT(MDSPAN_TEMPLATE_REQUIRES_, MDSPAN_PP_COUNT(__VA_ARGS__)), \
+ __VA_ARGS__ \
+ ) \
+ /**/
+#endif
+
+#define MDSPAN_TEMPLATE_REQUIRES_2(TP1, REQ) \
+ template<TP1 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_3(TP1, TP2, REQ) \
+ template<TP1, TP2 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_4(TP1, TP2, TP3, REQ) \
+ template<TP1, TP2, TP3 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_5(TP1, TP2, TP3, TP4, REQ) \
+ template<TP1, TP2, TP3, TP4 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_6(TP1, TP2, TP3, TP4, TP5, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_7(TP1, TP2, TP3, TP4, TP5, TP6, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_8(TP1, TP2, TP3, TP4, TP5, TP6, TP7, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_9(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_10(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_11(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_12(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_13(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_14(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_15(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_16(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_17(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_18(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_19(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+#define MDSPAN_TEMPLATE_REQUIRES_20(TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18, TP19, REQ) \
+ template<TP1, TP2, TP3, TP4, TP5, TP6, TP7, TP8, TP9, TP10, TP11, TP12, TP13, TP14, TP15, TP16, TP17, TP18, TP19 \
+ MDSPAN_CLOSE_ANGLE_REQUIRES(REQ) \
+ /**/
+
+#define MDSPAN_INSTANTIATE_ONLY_IF_USED \
+ MDSPAN_TEMPLATE_REQUIRES( \
+ class __instantiate_only_if_used_tparam=void, \
+ ( _MDSPAN_TRAIT(std::is_void, __instantiate_only_if_used_tparam) ) \
+ ) \
+ /**/
+
+// </editor-fold> end Concept emulation }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="inline variables"> {{{1
+
+#ifdef _MDSPAN_USE_INLINE_VARIABLES
+# define _MDSPAN_INLINE_VARIABLE inline
+#else
+# define _MDSPAN_INLINE_VARIABLE
+#endif
+
+// </editor-fold> end inline variables }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="Return type deduction"> {{{1
+
+#if _MDSPAN_USE_RETURN_TYPE_DEDUCTION
+# define _MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
+ auto MDSPAN_PP_REMOVE_PARENS(SIGNATURE) { return MDSPAN_PP_REMOVE_PARENS(BODY); }
+# define _MDSPAN_DEDUCE_DECLTYPE_AUTO_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
+ decltype(auto) MDSPAN_PP_REMOVE_PARENS(SIGNATURE) { return MDSPAN_PP_REMOVE_PARENS(BODY); }
+#else
+# define _MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
+ auto MDSPAN_PP_REMOVE_PARENS(SIGNATURE) \
+ -> std::remove_cv_t<std::remove_reference_t<decltype(BODY)>> \
+ { return MDSPAN_PP_REMOVE_PARENS(BODY); }
+# define _MDSPAN_DEDUCE_DECLTYPE_AUTO_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \
+ auto MDSPAN_PP_REMOVE_PARENS(SIGNATURE) \
+ -> decltype(BODY) \
+ { return MDSPAN_PP_REMOVE_PARENS(BODY); }
+
+#endif
+
+// </editor-fold> end Return type deduction }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="fold expressions"> {{{1
+
+struct __mdspan_enable_fold_comma { };
+
+#ifdef _MDSPAN_USE_FOLD_EXPRESSIONS
+# define _MDSPAN_FOLD_AND(...) ((__VA_ARGS__) && ...)
+# define _MDSPAN_FOLD_AND_TEMPLATE(...) ((__VA_ARGS__) && ...)
+# define _MDSPAN_FOLD_OR(...) ((__VA_ARGS__) || ...)
+# define _MDSPAN_FOLD_ASSIGN_LEFT(INIT, ...) (INIT = ... = (__VA_ARGS__))
+# define _MDSPAN_FOLD_ASSIGN_RIGHT(PACK, ...) (PACK = ... = (__VA_ARGS__))
+# define _MDSPAN_FOLD_TIMES_RIGHT(PACK, ...) (PACK * ... * (__VA_ARGS__))
+# define _MDSPAN_FOLD_PLUS_RIGHT(PACK, ...) (PACK + ... + (__VA_ARGS__))
+# define _MDSPAN_FOLD_COMMA(...) ((__VA_ARGS__), ...)
+#else
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+namespace __fold_compatibility_impl {
+
+// We could probably be more clever here, but at the (small) risk of losing some compiler understanding. For the
+// few operations we need, it's not worth generalizing over the operation
+
+#if _MDSPAN_USE_RETURN_TYPE_DEDUCTION
+
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr decltype(auto) __fold_right_and_impl() {
+ return true;
+}
+
+template <class Arg, class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr decltype(auto) __fold_right_and_impl(Arg&& arg, Args&&... args) {
+ return ((Arg&&)arg) && __fold_compatibility_impl::__fold_right_and_impl((Args&&)args...);
+}
+
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr decltype(auto) __fold_right_or_impl() {
+ return false;
+}
+
+template <class Arg, class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_right_or_impl(Arg&& arg, Args&&... args) {
+ return ((Arg&&)arg) || __fold_compatibility_impl::__fold_right_or_impl((Args&&)args...);
+}
+
+template <class Arg1>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_left_assign_impl(Arg1&& arg1) {
+ return (Arg1&&)arg1;
+}
+
+template <class Arg1, class Arg2, class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_left_assign_impl(Arg1&& arg1, Arg2&& arg2, Args&&... args) {
+ return __fold_compatibility_impl::__fold_left_assign_impl((((Arg1&&)arg1) = ((Arg2&&)arg2)), (Args&&)args...);
+}
+
+template <class Arg1>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_right_assign_impl(Arg1&& arg1) {
+ return (Arg1&&)arg1;
+}
+
+template <class Arg1, class Arg2, class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_right_assign_impl(Arg1&& arg1, Arg2&& arg2, Args&&... args) {
+ return ((Arg1&&)arg1) = __fold_compatibility_impl::__fold_right_assign_impl((Arg2&&)arg2, (Args&&)args...);
+}
+
+template <class Arg1>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_right_plus_impl(Arg1&& arg1) {
+ return (Arg1&&)arg1;
+}
+
+template <class Arg1, class Arg2, class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_right_plus_impl(Arg1&& arg1, Arg2&& arg2, Args&&... args) {
+ return ((Arg1&&)arg1) + __fold_compatibility_impl::__fold_right_plus_impl((Arg2&&)arg2, (Args&&)args...);
+}
+
+template <class Arg1>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_right_times_impl(Arg1&& arg1) {
+ return (Arg1&&)arg1;
+}
+
+template <class Arg1, class Arg2, class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr auto __fold_right_times_impl(Arg1&& arg1, Arg2&& arg2, Args&&... args) {
+ return ((Arg1&&)arg1) * __fold_compatibility_impl::__fold_right_times_impl((Arg2&&)arg2, (Args&&)args...);
+}
+
+#else
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="right and"> {{{2
+
+template <class... Args>
+struct __fold_right_and_impl_;
+template <>
+struct __fold_right_and_impl_<> {
+ using __rv = bool;
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl() noexcept {
+ return true;
+ }
+};
+template <class Arg, class... Args>
+struct __fold_right_and_impl_<Arg, Args...> {
+ using __next_t = __fold_right_and_impl_<Args...>;
+ using __rv = decltype(std::declval<Arg>() && std::declval<typename __next_t::__rv>());
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg&& arg, Args&&... args) noexcept {
+ return ((Arg&&)arg) && __next_t::__impl((Args&&)args...);
+ }
+};
+
+template <class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr typename __fold_right_and_impl_<Args...>::__rv
+__fold_right_and_impl(Args&&... args) {
+ return __fold_right_and_impl_<Args...>::__impl((Args&&)args...);
+}
+
+// </editor-fold> end right and }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="right or"> {{{2
+
+template <class... Args>
+struct __fold_right_or_impl_;
+template <>
+struct __fold_right_or_impl_<> {
+ using __rv = bool;
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl() noexcept {
+ return false;
+ }
+};
+template <class Arg, class... Args>
+struct __fold_right_or_impl_<Arg, Args...> {
+ using __next_t = __fold_right_or_impl_<Args...>;
+ using __rv = decltype(std::declval<Arg>() || std::declval<typename __next_t::__rv>());
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg&& arg, Args&&... args) noexcept {
+ return ((Arg&&)arg) || __next_t::__impl((Args&&)args...);
+ }
+};
+
+template <class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr typename __fold_right_or_impl_<Args...>::__rv
+__fold_right_or_impl(Args&&... args) {
+ return __fold_right_or_impl_<Args...>::__impl((Args&&)args...);
+}
+
+// </editor-fold> end right or }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="right plus"> {{{2
+
+template <class... Args>
+struct __fold_right_plus_impl_;
+template <class Arg>
+struct __fold_right_plus_impl_<Arg> {
+ using __rv = Arg&&;
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg&& arg) noexcept {
+ return (Arg&&)arg;
+ }
+};
+template <class Arg1, class Arg2, class... Args>
+struct __fold_right_plus_impl_<Arg1, Arg2, Args...> {
+ using __next_t = __fold_right_plus_impl_<Arg2, Args...>;
+ using __rv = decltype(std::declval<Arg1>() + std::declval<typename __next_t::__rv>());
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg1&& arg, Arg2&& arg2, Args&&... args) noexcept {
+ return ((Arg1&&)arg) + __next_t::__impl((Arg2&&)arg2, (Args&&)args...);
+ }
+};
+
+template <class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr typename __fold_right_plus_impl_<Args...>::__rv
+__fold_right_plus_impl(Args&&... args) {
+ return __fold_right_plus_impl_<Args...>::__impl((Args&&)args...);
+}
+
+// </editor-fold> end right plus }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="right times"> {{{2
+
+template <class... Args>
+struct __fold_right_times_impl_;
+template <class Arg>
+struct __fold_right_times_impl_<Arg> {
+ using __rv = Arg&&;
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg&& arg) noexcept {
+ return (Arg&&)arg;
+ }
+};
+template <class Arg1, class Arg2, class... Args>
+struct __fold_right_times_impl_<Arg1, Arg2, Args...> {
+ using __next_t = __fold_right_times_impl_<Arg2, Args...>;
+ using __rv = decltype(std::declval<Arg1>() * std::declval<typename __next_t::__rv>());
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg1&& arg, Arg2&& arg2, Args&&... args) noexcept {
+ return ((Arg1&&)arg) * __next_t::__impl((Arg2&&)arg2, (Args&&)args...);
+ }
+};
+
+template <class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr typename __fold_right_times_impl_<Args...>::__rv
+__fold_right_times_impl(Args&&... args) {
+ return __fold_right_times_impl_<Args...>::__impl((Args&&)args...);
+}
+
+// </editor-fold> end right times }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="right assign"> {{{2
+
+template <class... Args>
+struct __fold_right_assign_impl_;
+template <class Arg>
+struct __fold_right_assign_impl_<Arg> {
+ using __rv = Arg&&;
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg&& arg) noexcept {
+ return (Arg&&)arg;
+ }
+};
+template <class Arg1, class Arg2, class... Args>
+struct __fold_right_assign_impl_<Arg1, Arg2, Args...> {
+ using __next_t = __fold_right_assign_impl_<Arg2, Args...>;
+ using __rv = decltype(std::declval<Arg1>() = std::declval<typename __next_t::__rv>());
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg1&& arg, Arg2&& arg2, Args&&... args) noexcept {
+ return ((Arg1&&)arg) = __next_t::__impl((Arg2&&)arg2, (Args&&)args...);
+ }
+};
+
+template <class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr typename __fold_right_assign_impl_<Args...>::__rv
+__fold_right_assign_impl(Args&&... args) {
+ return __fold_right_assign_impl_<Args...>::__impl((Args&&)args...);
+}
+
+// </editor-fold> end right assign }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="left assign"> {{{2
+
+template <class... Args>
+struct __fold_left_assign_impl_;
+template <class Arg>
+struct __fold_left_assign_impl_<Arg> {
+ using __rv = Arg&&;
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg&& arg) noexcept {
+ return (Arg&&)arg;
+ }
+};
+template <class Arg1, class Arg2, class... Args>
+struct __fold_left_assign_impl_<Arg1, Arg2, Args...> {
+ using __assign_result_t = decltype(std::declval<Arg1>() = std::declval<Arg2>());
+ using __next_t = __fold_left_assign_impl_<__assign_result_t, Args...>;
+ using __rv = typename __next_t::__rv;
+ MDSPAN_FORCE_INLINE_FUNCTION
+ static constexpr __rv
+ __impl(Arg1&& arg, Arg2&& arg2, Args&&... args) noexcept {
+ return __next_t::__impl(((Arg1&&)arg) = (Arg2&&)arg2, (Args&&)args...);
+ }
+};
+
+template <class... Args>
+MDSPAN_FORCE_INLINE_FUNCTION
+constexpr typename __fold_left_assign_impl_<Args...>::__rv
+__fold_left_assign_impl(Args&&... args) {
+ return __fold_left_assign_impl_<Args...>::__impl((Args&&)args...);
+}
+
+// </editor-fold> end left assign }}}2
+//------------------------------------------------------------------------------
+
+#endif
+
+
+template <class... Args>
+constexpr __mdspan_enable_fold_comma __fold_comma_impl(Args&&...) noexcept { return { }; }
+
+template <bool... Bs>
+struct __bools;
+
+} // __fold_compatibility_impl
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+# define _MDSPAN_FOLD_AND(...) MDSPAN_IMPL_STANDARD_NAMESPACE::__fold_compatibility_impl::__fold_right_and_impl((__VA_ARGS__)...)
+# define _MDSPAN_FOLD_OR(...) MDSPAN_IMPL_STANDARD_NAMESPACE::__fold_compatibility_impl::__fold_right_or_impl((__VA_ARGS__)...)
+# define _MDSPAN_FOLD_ASSIGN_LEFT(INIT, ...) MDSPAN_IMPL_STANDARD_NAMESPACE::__fold_compatibility_impl::__fold_left_assign_impl(INIT, (__VA_ARGS__)...)
+# define _MDSPAN_FOLD_ASSIGN_RIGHT(PACK, ...) MDSPAN_IMPL_STANDARD_NAMESPACE::__fold_compatibility_impl::__fold_right_assign_impl((PACK)..., __VA_ARGS__)
+# define _MDSPAN_FOLD_TIMES_RIGHT(PACK, ...) MDSPAN_IMPL_STANDARD_NAMESPACE::__fold_compatibility_impl::__fold_right_times_impl((PACK)..., __VA_ARGS__)
+# define _MDSPAN_FOLD_PLUS_RIGHT(PACK, ...) MDSPAN_IMPL_STANDARD_NAMESPACE::__fold_compatibility_impl::__fold_right_plus_impl((PACK)..., __VA_ARGS__)
+# define _MDSPAN_FOLD_COMMA(...) MDSPAN_IMPL_STANDARD_NAMESPACE::__fold_compatibility_impl::__fold_comma_impl((__VA_ARGS__)...)
+
+# define _MDSPAN_FOLD_AND_TEMPLATE(...) \
+ _MDSPAN_TRAIT(std::is_same, __fold_compatibility_impl::__bools<(__VA_ARGS__)..., true>, __fold_compatibility_impl::__bools<true, (__VA_ARGS__)...>)
+
+#endif
+
+// </editor-fold> end fold expressions }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="Variable template compatibility"> {{{1
+
+#if _MDSPAN_USE_VARIABLE_TEMPLATES
+# define _MDSPAN_TRAIT(TRAIT, ...) TRAIT##_v<__VA_ARGS__>
+#else
+# define _MDSPAN_TRAIT(TRAIT, ...) TRAIT<__VA_ARGS__>::value
+#endif
+
+// </editor-fold> end Variable template compatibility }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="Pre-C++14 constexpr"> {{{1
+
+#if _MDSPAN_USE_CONSTEXPR_14
+# define _MDSPAN_CONSTEXPR_14 constexpr
+// Workaround for a bug (I think?) in EDG frontends
+# ifdef __EDG__
+# define _MDSPAN_CONSTEXPR_14_DEFAULTED
+# else
+# define _MDSPAN_CONSTEXPR_14_DEFAULTED constexpr
+# endif
+#else
+# define _MDSPAN_CONSTEXPR_14
+# define _MDSPAN_CONSTEXPR_14_DEFAULTED
+#endif
+
+// </editor-fold> end Pre-C++14 constexpr }}}1
+//==============================================================================
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+#include "default_accessor.hpp"
+#include "layout_right.hpp"
+#include "extents.hpp"
+#include "trait_backports.hpp"
+#include "compressed_pair.hpp"
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+template <
+ class ElementType,
+ class Extents,
+ class LayoutPolicy = layout_right,
+ class AccessorPolicy = default_accessor<ElementType>
+>
+class mdspan
+{
+private:
+ static_assert(detail::__is_extents_v<Extents>,
+ MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::mdspan's Extents template parameter must be a specialization of " MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::extents.");
+ static_assert(std::is_same<ElementType, typename AccessorPolicy::element_type>::value,
+ MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::mdspan's ElementType template parameter must be the same as its AccessorPolicy::element_type.");
+
+ // Workaround for non-deducibility of the index sequence template parameter if it's given at the top level
+ template <class>
+ struct __deduction_workaround;
+
+ template <size_t... Idxs>
+ struct __deduction_workaround<std::index_sequence<Idxs...>>
+ {
+ MDSPAN_FORCE_INLINE_FUNCTION static constexpr
+ size_t __size(mdspan const& __self) noexcept {
+ return _MDSPAN_FOLD_TIMES_RIGHT((__self.__mapping_ref().extents().extent(Idxs)), /* * ... * */ size_t(1));
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION static constexpr
+ bool __empty(mdspan const& __self) noexcept {
+ return (__self.rank()>0) && _MDSPAN_FOLD_OR((__self.__mapping_ref().extents().extent(Idxs)==index_type(0)));
+ }
+ template <class ReferenceType, class SizeType, size_t N>
+ MDSPAN_FORCE_INLINE_FUNCTION static constexpr
+ ReferenceType __callop(mdspan const& __self, const std::array<SizeType, N>& indices) noexcept {
+ return __self.__accessor_ref().access(__self.__ptr_ref(), __self.__mapping_ref()(indices[Idxs]...));
+ }
+#ifdef __cpp_lib_span
+ template <class ReferenceType, class SizeType, size_t N>
+ MDSPAN_FORCE_INLINE_FUNCTION static constexpr
+ ReferenceType __callop(mdspan const& __self, const std::span<SizeType, N>& indices) noexcept {
+ return __self.__accessor_ref().access(__self.__ptr_ref(), __self.__mapping_ref()(indices[Idxs]...));
+ }
+#endif
+ };
+
+public:
+
+ //--------------------------------------------------------------------------------
+ // Domain and codomain types
+
+ using extents_type = Extents;
+ using layout_type = LayoutPolicy;
+ using accessor_type = AccessorPolicy;
+ using mapping_type = typename layout_type::template mapping<extents_type>;
+ using element_type = ElementType;
+ using value_type = std::remove_cv_t<element_type>;
+ using index_type = typename extents_type::index_type;
+ using size_type = typename extents_type::size_type;
+ using rank_type = typename extents_type::rank_type;
+ using data_handle_type = typename accessor_type::data_handle_type;
+ using reference = typename accessor_type::reference;
+
+ MDSPAN_INLINE_FUNCTION static constexpr size_t rank() noexcept { return extents_type::rank(); }
+ MDSPAN_INLINE_FUNCTION static constexpr size_t rank_dynamic() noexcept { return extents_type::rank_dynamic(); }
+ MDSPAN_INLINE_FUNCTION static constexpr size_t static_extent(size_t r) noexcept { return extents_type::static_extent(r); }
+ MDSPAN_INLINE_FUNCTION constexpr index_type extent(size_t r) const noexcept { return __mapping_ref().extents().extent(r); };
+
+private:
+
+ // Can't use defaulted parameter in the __deduction_workaround template because of a bug in MSVC warning C4348.
+ using __impl = __deduction_workaround<std::make_index_sequence<extents_type::rank()>>;
+
+ using __map_acc_pair_t = detail::__compressed_pair<mapping_type, accessor_type>;
+
+public:
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.cons], mdspan constructors, assignment, and destructor
+
+#if !MDSPAN_HAS_CXX_20
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdspan() = default;
+#else
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdspan()
+ requires(
+ // nvhpc has a bug where using just rank_dynamic() here doesn't work ...
+ (extents_type::rank_dynamic() > 0) &&
+ _MDSPAN_TRAIT(std::is_default_constructible, data_handle_type) &&
+ _MDSPAN_TRAIT(std::is_default_constructible, mapping_type) &&
+ _MDSPAN_TRAIT(std::is_default_constructible, accessor_type)
+ ) = default;
+#endif
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdspan(const mdspan&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdspan(mdspan&&) = default;
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ ((sizeof...(SizeTypes) == rank()) || (sizeof...(SizeTypes) == rank_dynamic())) &&
+ (detail::are_valid_indices<index_type, SizeTypes...>()) &&
+ _MDSPAN_TRAIT(std::is_constructible, mapping_type, extents_type) &&
+ _MDSPAN_TRAIT(std::is_default_constructible, accessor_type)
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ explicit constexpr mdspan(data_handle_type p, SizeTypes... dynamic_extents)
+ // TODO @proposal-bug shouldn't I be allowed to do `move(p)` here?
+ : __members(std::move(p), __map_acc_pair_t(mapping_type(extents_type(static_cast<index_type>(std::move(dynamic_extents))...)), accessor_type()))
+ { }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType, size_t N,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, const SizeType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, const SizeType&) &&
+ ((N == rank()) || (N == rank_dynamic())) &&
+ _MDSPAN_TRAIT(std::is_constructible, mapping_type, extents_type) &&
+ _MDSPAN_TRAIT(std::is_default_constructible, accessor_type)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT(N != rank_dynamic())
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdspan(data_handle_type p, const std::array<SizeType, N>& dynamic_extents)
+ : __members(std::move(p), __map_acc_pair_t(mapping_type(extents_type(dynamic_extents)), accessor_type()))
+ { }
+
+#ifdef __cpp_lib_span
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType, size_t N,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, const SizeType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, const SizeType&) &&
+ ((N == rank()) || (N == rank_dynamic())) &&
+ _MDSPAN_TRAIT(std::is_constructible, mapping_type, extents_type) &&
+ _MDSPAN_TRAIT(std::is_default_constructible, accessor_type)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT(N != rank_dynamic())
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdspan(data_handle_type p, std::span<SizeType, N> dynamic_extents)
+ : __members(std::move(p), __map_acc_pair_t(mapping_type(extents_type(as_const(dynamic_extents))), accessor_type()))
+ { }
+#endif
+
+ MDSPAN_FUNCTION_REQUIRES(
+ (MDSPAN_INLINE_FUNCTION constexpr),
+ mdspan, (data_handle_type p, const extents_type& exts), ,
+ /* requires */ (_MDSPAN_TRAIT(std::is_default_constructible, accessor_type) &&
+ _MDSPAN_TRAIT(std::is_constructible, mapping_type, const extents_type&))
+ ) : __members(std::move(p), __map_acc_pair_t(mapping_type(exts), accessor_type()))
+ { }
+
+ MDSPAN_FUNCTION_REQUIRES(
+ (MDSPAN_INLINE_FUNCTION constexpr),
+ mdspan, (data_handle_type p, const mapping_type& m), ,
+ /* requires */ (_MDSPAN_TRAIT(std::is_default_constructible, accessor_type))
+ ) : __members(std::move(p), __map_acc_pair_t(m, accessor_type()))
+ { }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdspan(data_handle_type p, const mapping_type& m, const accessor_type& a)
+ : __members(std::move(p), __map_acc_pair_t(m, a))
+ { }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherElementType, class OtherExtents, class OtherLayoutPolicy, class OtherAccessor,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_constructible, mapping_type, const typename OtherLayoutPolicy::template mapping<OtherExtents>&) &&
+ _MDSPAN_TRAIT(std::is_constructible, accessor_type, const OtherAccessor&)
+ )
+ )
+ MDSPAN_CONDITIONAL_EXPLICIT(
+ !_MDSPAN_TRAIT(std::is_convertible, const typename OtherLayoutPolicy::template mapping<OtherExtents>&, mapping_type) ||
+ !_MDSPAN_TRAIT(std::is_convertible, const OtherAccessor&, accessor_type)
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdspan(const mdspan<OtherElementType, OtherExtents, OtherLayoutPolicy, OtherAccessor>& other)
+ : __members(other.__ptr_ref(), __map_acc_pair_t(other.__mapping_ref(), other.__accessor_ref()))
+ {
+ static_assert(_MDSPAN_TRAIT(std::is_constructible, data_handle_type, typename OtherAccessor::data_handle_type),"Incompatible data_handle_type for mdspan construction");
+ static_assert(_MDSPAN_TRAIT(std::is_constructible, extents_type, OtherExtents),"Incompatible extents for mdspan construction");
+ /*
+ * TODO: Check precondition
+ * For each rank index r of extents_type, static_extent(r) == dynamic_extent || static_extent(r) == other.extent(r) is true.
+ */
+ }
+
+ /* Might need this on NVIDIA?
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ ~mdspan() = default;
+ */
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED _MDSPAN_CONSTEXPR_14_DEFAULTED mdspan& operator=(const mdspan&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED _MDSPAN_CONSTEXPR_14_DEFAULTED mdspan& operator=(mdspan&&) = default;
+
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.mapping], mdspan mapping domain multidimensional index to access codomain element
+
+ #if MDSPAN_USE_BRACKET_OPERATOR
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ _MDSPAN_FOLD_AND(_MDSPAN_TRAIT(std::is_convertible, SizeTypes, index_type) /* && ... */) &&
+ _MDSPAN_FOLD_AND(_MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, SizeTypes) /* && ... */) &&
+ (rank() == sizeof...(SizeTypes))
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator[](SizeTypes... indices) const
+ {
+ return __accessor_ref().access(__ptr_ref(), __mapping_ref()(static_cast<index_type>(std::move(indices))...));
+ }
+ #endif
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, const SizeType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, const SizeType&)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator[](const std::array< SizeType, rank()>& indices) const
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+
+ #ifdef __cpp_lib_span
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, const SizeType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, const SizeType&)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator[](std::span<SizeType, rank()> indices) const
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+ #endif // __cpp_lib_span
+
+ #if !MDSPAN_USE_BRACKET_OPERATOR
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Index,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, Index, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, Index) &&
+ extents_type::rank() == 1
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator[](Index idx) const
+ {
+ return __accessor_ref().access(__ptr_ref(), __mapping_ref()(static_cast<index_type>(std::move(idx))));
+ }
+ #endif
+
+ #if MDSPAN_USE_PAREN_OPERATOR
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ extents_type::rank() == sizeof...(SizeTypes) &&
+ (detail::are_valid_indices<index_type, SizeTypes...>())
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator()(SizeTypes... indices) const
+ {
+ return __accessor_ref().access(__ptr_ref(), __mapping_ref()(static_cast<index_type>(std::move(indices))...));
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, const SizeType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, const SizeType&)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator()(const std::array<SizeType, rank()>& indices) const
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+
+ #ifdef __cpp_lib_span
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_convertible, const SizeType&, index_type) &&
+ _MDSPAN_TRAIT(std::is_nothrow_constructible, index_type, const SizeType&)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator()(std::span<SizeType, rank()> indices) const
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+ #endif // __cpp_lib_span
+ #endif // MDSPAN_USE_PAREN_OPERATOR
+
+ MDSPAN_INLINE_FUNCTION constexpr size_type size() const noexcept {
+ return static_cast<size_type>(__impl::__size(*this));
+ };
+
+ MDSPAN_INLINE_FUNCTION constexpr bool empty() const noexcept {
+ return __impl::__empty(*this);
+ };
+
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr void swap(mdspan& x, mdspan& y) noexcept {
+ // can't call the std::swap inside on HIP
+ #if !defined(_MDSPAN_HAS_HIP) && !defined(_MDSPAN_HAS_CUDA)
+ using std::swap;
+ swap(x.__ptr_ref(), y.__ptr_ref());
+ swap(x.__mapping_ref(), y.__mapping_ref());
+ swap(x.__accessor_ref(), y.__accessor_ref());
+ #else
+ mdspan tmp = y;
+ y = x;
+ x = tmp;
+ #endif
+ }
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.domobs], mdspan observers of the domain multidimensional index space
+
+
+ MDSPAN_INLINE_FUNCTION constexpr const extents_type& extents() const noexcept { return __mapping_ref().extents(); };
+ MDSPAN_INLINE_FUNCTION constexpr const data_handle_type& data_handle() const noexcept { return __ptr_ref(); };
+ MDSPAN_INLINE_FUNCTION constexpr const mapping_type& mapping() const noexcept { return __mapping_ref(); };
+ MDSPAN_INLINE_FUNCTION constexpr const accessor_type& accessor() const noexcept { return __accessor_ref(); };
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.obs], mdspan observers of the mapping
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() { return mapping_type::is_always_unique(); };
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() { return mapping_type::is_always_exhaustive(); };
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() { return mapping_type::is_always_strided(); };
+
+ MDSPAN_INLINE_FUNCTION constexpr bool is_unique() const { return __mapping_ref().is_unique(); };
+ MDSPAN_INLINE_FUNCTION constexpr bool is_exhaustive() const { return __mapping_ref().is_exhaustive(); };
+ MDSPAN_INLINE_FUNCTION constexpr bool is_strided() const { return __mapping_ref().is_strided(); };
+ MDSPAN_INLINE_FUNCTION constexpr index_type stride(size_t r) const { return __mapping_ref().stride(r); };
+
+private:
+
+ detail::__compressed_pair<data_handle_type, __map_acc_pair_t> __members{};
+
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 data_handle_type& __ptr_ref() noexcept { return __members.__first(); }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr data_handle_type const& __ptr_ref() const noexcept { return __members.__first(); }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 mapping_type& __mapping_ref() noexcept { return __members.__second().__first(); }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr mapping_type const& __mapping_ref() const noexcept { return __members.__second().__first(); }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 accessor_type& __accessor_ref() noexcept { return __members.__second().__second(); }
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr accessor_type const& __accessor_ref() const noexcept { return __members.__second().__second(); }
+
+ template <class, class, class, class>
+ friend class mdspan;
+
+};
+
+#if defined(_MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
+MDSPAN_TEMPLATE_REQUIRES(
+ class ElementType, class... SizeTypes,
+ /* requires */ _MDSPAN_FOLD_AND(_MDSPAN_TRAIT(std::is_convertible, SizeTypes, size_t) /* && ... */) &&
+ (sizeof...(SizeTypes) > 0)
+)
+MDSPAN_DEDUCTION_GUIDE explicit mdspan(ElementType*, SizeTypes...)
+ -> mdspan<ElementType, ::MDSPAN_IMPL_STANDARD_NAMESPACE::dextents<size_t, sizeof...(SizeTypes)>>;
+
+MDSPAN_TEMPLATE_REQUIRES(
+ class Pointer,
+ (_MDSPAN_TRAIT(std::is_pointer, std::remove_reference_t<Pointer>))
+)
+MDSPAN_DEDUCTION_GUIDE mdspan(Pointer&&) -> mdspan<std::remove_pointer_t<std::remove_reference_t<Pointer>>, extents<size_t>>;
+
+MDSPAN_TEMPLATE_REQUIRES(
+ class CArray,
+ (_MDSPAN_TRAIT(std::is_array, CArray) && (std::rank_v<CArray> == 1))
+)
+MDSPAN_DEDUCTION_GUIDE mdspan(CArray&) -> mdspan<std::remove_all_extents_t<CArray>, extents<size_t, ::std::extent_v<CArray,0>>>;
+
+template <class ElementType, class SizeType, size_t N>
+MDSPAN_DEDUCTION_GUIDE mdspan(ElementType*, const ::std::array<SizeType, N>&)
+ -> mdspan<ElementType, ::MDSPAN_IMPL_STANDARD_NAMESPACE::dextents<size_t, N>>;
+
+#ifdef __cpp_lib_span
+template <class ElementType, class SizeType, size_t N>
+MDSPAN_DEDUCTION_GUIDE mdspan(ElementType*, ::std::span<SizeType, N>)
+ -> mdspan<ElementType, ::MDSPAN_IMPL_STANDARD_NAMESPACE::dextents<size_t, N>>;
+#endif
+
+// This one is necessary because all the constructors take `data_handle_type`s, not
+// `ElementType*`s, and `data_handle_type` is taken from `accessor_type::data_handle_type`, which
+// seems to throw off automatic deduction guides.
+template <class ElementType, class SizeType, size_t... ExtentsPack>
+MDSPAN_DEDUCTION_GUIDE mdspan(ElementType*, const extents<SizeType, ExtentsPack...>&)
+ -> mdspan<ElementType, ::MDSPAN_IMPL_STANDARD_NAMESPACE::extents<SizeType, ExtentsPack...>>;
+
+template <class ElementType, class MappingType>
+MDSPAN_DEDUCTION_GUIDE mdspan(ElementType*, const MappingType&)
+ -> mdspan<ElementType, typename MappingType::extents_type, typename MappingType::layout_type>;
+
+template <class MappingType, class AccessorType>
+MDSPAN_DEDUCTION_GUIDE mdspan(const typename AccessorType::data_handle_type, const MappingType&, const AccessorType&)
+ -> mdspan<typename AccessorType::element_type, typename MappingType::extents_type, typename MappingType::layout_type, AccessorType>;
+#endif
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include "macros.hpp"
+#include "trait_backports.hpp"
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace detail {
+
+//==============================================================================
+
+template <class _T, size_t _Disambiguator = 0, class _Enable = void>
+struct __no_unique_address_emulation {
+ using __stored_type = _T;
+ _T __v;
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T const &__ref() const noexcept {
+ return __v;
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T &__ref() noexcept {
+ return __v;
+ }
+};
+
+// Empty case
+// This doesn't work if _T is final, of course, but we're not using anything
+// like that currently. That kind of thing could be added pretty easily though
+template <class _T, size_t _Disambiguator>
+struct __no_unique_address_emulation<
+ _T, _Disambiguator,
+ std::enable_if_t<_MDSPAN_TRAIT(std::is_empty, _T) &&
+ // If the type isn't trivially destructible, its destructor
+ // won't be called at the right time, so don't use this
+ // specialization
+ _MDSPAN_TRAIT(std::is_trivially_destructible, _T)>> :
+#ifdef _MDSPAN_COMPILER_MSVC
+ // MSVC doesn't allow you to access public static member functions of a type
+ // when you *happen* to privately inherit from that type.
+ protected
+#else
+ // But we still want this to be private if possible so that we don't accidentally
+ // access members of _T directly rather than calling __ref() first, which wouldn't
+ // work if _T happens to be stateful and thus we're using the unspecialized definition
+ // of __no_unique_address_emulation above.
+ private
+#endif
+ _T {
+ using __stored_type = _T;
+ MDSPAN_FORCE_INLINE_FUNCTION constexpr _T const &__ref() const noexcept {
+ return *static_cast<_T const *>(this);
+ }
+ MDSPAN_FORCE_INLINE_FUNCTION _MDSPAN_CONSTEXPR_14 _T &__ref() noexcept {
+ return *static_cast<_T *>(this);
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __no_unique_address_emulation() noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __no_unique_address_emulation(
+ __no_unique_address_emulation const &) noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr __no_unique_address_emulation(
+ __no_unique_address_emulation &&) noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __no_unique_address_emulation &
+ operator=(__no_unique_address_emulation const &) noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ _MDSPAN_CONSTEXPR_14_DEFAULTED __no_unique_address_emulation &
+ operator=(__no_unique_address_emulation &&) noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ ~__no_unique_address_emulation() noexcept = default;
+
+ // Explicitly make this not a reference so that the copy or move
+ // constructor still gets called.
+ MDSPAN_INLINE_FUNCTION
+ explicit constexpr __no_unique_address_emulation(_T const& __v) noexcept : _T(__v) {}
+ MDSPAN_INLINE_FUNCTION
+ explicit constexpr __no_unique_address_emulation(_T&& __v) noexcept : _T(::std::move(__v)) {}
+};
+
+//==============================================================================
+
+} // end namespace detail
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#ifndef MDSPAN_INCLUDE_EXPERIMENTAL_BITS_TRAIT_BACKPORTS_HPP_
+#define MDSPAN_INCLUDE_EXPERIMENTAL_BITS_TRAIT_BACKPORTS_HPP_
+
+#include "macros.hpp"
+#include "config.hpp"
+
+#include <type_traits>
+#include <utility> // integer_sequence
+
+//==============================================================================
+// <editor-fold desc="Variable template trait backports (e.g., is_void_v)"> {{{1
+
+#ifdef _MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS
+
+#if _MDSPAN_USE_VARIABLE_TEMPLATES
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+#define _MDSPAN_BACKPORT_TRAIT(TRAIT) \
+ template <class... Args> _MDSPAN_INLINE_VARIABLE constexpr auto TRAIT##_v = TRAIT<Args...>::value;
+
+_MDSPAN_BACKPORT_TRAIT(is_assignable)
+_MDSPAN_BACKPORT_TRAIT(is_constructible)
+_MDSPAN_BACKPORT_TRAIT(is_convertible)
+_MDSPAN_BACKPORT_TRAIT(is_default_constructible)
+_MDSPAN_BACKPORT_TRAIT(is_trivially_destructible)
+_MDSPAN_BACKPORT_TRAIT(is_same)
+_MDSPAN_BACKPORT_TRAIT(is_empty)
+_MDSPAN_BACKPORT_TRAIT(is_void)
+
+#undef _MDSPAN_BACKPORT_TRAIT
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+#endif // _MDSPAN_USE_VARIABLE_TEMPLATES
+
+#endif // _MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS
+
+// </editor-fold> end Variable template trait backports (e.g., is_void_v) }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="integer sequence (ugh...)"> {{{1
+
+#if !defined(_MDSPAN_USE_INTEGER_SEQUENCE) || !_MDSPAN_USE_INTEGER_SEQUENCE
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+template <class T, T... Vals>
+struct integer_sequence {
+ static constexpr size_t size() noexcept { return sizeof...(Vals); }
+ using value_type = T;
+};
+
+template <size_t... Vals>
+using index_sequence = std::integer_sequence<size_t, Vals...>;
+
+namespace __detail {
+
+template <class T, T N, T I, class Result>
+struct __make_int_seq_impl;
+
+template <class T, T N, T... Vals>
+struct __make_int_seq_impl<T, N, N, integer_sequence<T, Vals...>>
+{
+ using type = integer_sequence<T, Vals...>;
+};
+
+template <class T, T N, T I, T... Vals>
+struct __make_int_seq_impl<
+ T, N, I, integer_sequence<T, Vals...>
+> : __make_int_seq_impl<T, N, I+1, integer_sequence<T, Vals..., I>>
+{ };
+
+} // end namespace __detail
+
+template <class T, T N>
+using make_integer_sequence = typename __detail::__make_int_seq_impl<T, N, 0, integer_sequence<T>>::type;
+
+template <size_t N>
+using make_index_sequence = typename __detail::__make_int_seq_impl<size_t, N, 0, integer_sequence<size_t>>::type;
+
+template <class... T>
+using index_sequence_for = make_index_sequence<sizeof...(T)>;
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+#endif
+
+// </editor-fold> end integer sequence (ugh...) }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="standard trait aliases"> {{{1
+
+#if !defined(_MDSPAN_USE_STANDARD_TRAIT_ALIASES) || !_MDSPAN_USE_STANDARD_TRAIT_ALIASES
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+#define _MDSPAN_BACKPORT_TRAIT_ALIAS(TRAIT) \
+ template <class... Args> using TRAIT##_t = typename TRAIT<Args...>::type;
+
+_MDSPAN_BACKPORT_TRAIT_ALIAS(remove_cv)
+_MDSPAN_BACKPORT_TRAIT_ALIAS(remove_reference)
+
+template <bool _B, class _T=void>
+using enable_if_t = typename enable_if<_B, _T>::type;
+
+#undef _MDSPAN_BACKPORT_TRAIT_ALIAS
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+#endif
+
+// </editor-fold> end standard trait aliases }}}1
+//==============================================================================
+
+#endif //MDSPAN_INCLUDE_EXPERIMENTAL_BITS_TRAIT_BACKPORTS_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#include "macros.hpp"
+
+#include "trait_backports.hpp" // make_index_sequence
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+//==============================================================================
+
+namespace detail {
+
+template <class... _Ts> struct __type_list { static constexpr auto __size = sizeof...(_Ts); };
+
+// Implementation of type_list at() that's heavily optimized for small typelists
+template <size_t, class> struct __type_at;
+template <size_t, class _Seq, class=std::make_index_sequence<_Seq::__size>> struct __type_at_large_impl;
+
+template <size_t _I, size_t _Idx, class _T>
+struct __type_at_entry { };
+
+template <class _Result>
+struct __type_at_assign_op_ignore_rest {
+ template <class _T>
+ __type_at_assign_op_ignore_rest<_Result> operator=(_T&&);
+ using type = _Result;
+};
+
+struct __type_at_assign_op_impl {
+ template <size_t _I, size_t _Idx, class _T>
+ __type_at_assign_op_impl operator=(__type_at_entry<_I, _Idx, _T>&&);
+ template <size_t _I, class _T>
+ __type_at_assign_op_ignore_rest<_T> operator=(__type_at_entry<_I, _I, _T>&&);
+};
+
+template <size_t _I, class... _Ts, size_t... _Idxs>
+struct __type_at_large_impl<_I, __type_list<_Ts...>, std::integer_sequence<size_t, _Idxs...>>
+ : decltype(
+ _MDSPAN_FOLD_ASSIGN_LEFT(__type_at_assign_op_impl{}, /* = ... = */ __type_at_entry<_I, _Idxs, _Ts>{})
+ )
+{ };
+
+template <size_t _I, class... _Ts>
+struct __type_at<_I, __type_list<_Ts...>>
+ : __type_at_large_impl<_I, __type_list<_Ts...>>
+{ };
+
+template <class _T0, class... _Ts>
+struct __type_at<0, __type_list<_T0, _Ts...>> {
+ using type = _T0;
+};
+
+template <class _T0, class _T1, class... _Ts>
+struct __type_at<1, __type_list<_T0, _T1, _Ts...>> {
+ using type = _T1;
+};
+
+template <class _T0, class _T1, class _T2, class... _Ts>
+struct __type_at<2, __type_list<_T0, _T1, _T2, _Ts...>> {
+ using type = _T2;
+};
+
+template <class _T0, class _T1, class _T2, class _T3, class... _Ts>
+struct __type_at<3, __type_list<_T0, _T1, _T2, _T3, _Ts...>> {
+ using type = _T3;
+};
+
+
+} // namespace detail
+
+//==============================================================================
+
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
--- /dev/null
+#pragma once
+
+#include <cstddef>
+#include <type_traits>
+#include <array>
+#include <utility>
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace detail {
+
+// type alias used for rank-based tag dispatch
+//
+// this is used to enable alternatives to constexpr if when building for C++14
+//
+template <std::size_t N>
+using with_rank = std::integral_constant<std::size_t, N>;
+
+template <class I1, class I2>
+MDSPAN_INLINE_FUNCTION
+constexpr bool common_integral_compare(I1 x, I2 y)
+{
+ static_assert(std::is_integral<I1>::value &&
+ std::is_integral<I2>::value, "");
+
+ using I = std::common_type_t<I1, I2>;
+ return static_cast<I>(x) == static_cast<I>(y);
+}
+
+template <class T1, class T2, class F>
+MDSPAN_INLINE_FUNCTION
+constexpr bool rankwise_equal(with_rank<0>, const T1&, const T2&, F)
+{
+ return true;
+}
+
+template <std::size_t N, class T1, class T2, class F>
+MDSPAN_INLINE_FUNCTION
+constexpr bool rankwise_equal(with_rank<N>, const T1& x, const T2& y, F func)
+{
+ bool match = true;
+
+ for (std::size_t r = 0; r < N; r++) {
+ match = match && common_integral_compare(func(x, r), func(y, r));
+ }
+
+ return match;
+}
+
+constexpr struct
+{
+ template <class T, class I>
+ MDSPAN_INLINE_FUNCTION
+ constexpr auto operator()(const T& x, I i) const
+ {
+ return x.extent(i);
+ }
+} extent;
+
+constexpr struct
+{
+ template <class T, class I>
+ MDSPAN_INLINE_FUNCTION
+ constexpr auto operator()(const T& x, I i) const
+ {
+ return x.stride(i);
+ }
+} stride;
+
+// same as std::integral_constant but with __host__ __device__ annotations on
+// the implicit conversion function and the call operator
+template <class T, T v>
+struct integral_constant {
+ using value_type = T;
+ using type = integral_constant<T, v>;
+
+ static constexpr T value = v;
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr integral_constant() = default;
+
+ // These interop functions work, because other than the value_type operator
+ // everything of std::integral_constant works on device (defaulted functions)
+ MDSPAN_FUNCTION
+ constexpr integral_constant(std::integral_constant<T,v>) {};
+
+ MDSPAN_FUNCTION constexpr operator std::integral_constant<T,v>() const noexcept {
+ return std::integral_constant<T,v>{};
+ }
+
+ MDSPAN_FUNCTION constexpr operator value_type() const noexcept {
+ return value;
+ }
+
+ MDSPAN_FUNCTION constexpr value_type operator()() const noexcept {
+ return value;
+ }
+};
+
+// The tuple implementation only comes in play when using capabilities
+// such as submdspan which require C++17 anyway
+#if MDSPAN_HAS_CXX_17
+template<class T, size_t Idx>
+struct tuple_member {
+ using type = T;
+ static constexpr size_t idx = Idx;
+ T val;
+ MDSPAN_FUNCTION constexpr T& get() { return val; }
+ MDSPAN_FUNCTION constexpr const T& get() const { return val; }
+};
+
+// A helper class which will be used via a fold expression to
+// select the type with the correct Idx in a pack of tuple_member
+template<size_t SearchIdx, size_t Idx, class T>
+struct tuple_idx_matcher {
+ using type = tuple_member<T, Idx>;
+ template<class Other>
+ MDSPAN_FUNCTION
+ constexpr auto operator | (Other v) const {
+ if constexpr (Idx == SearchIdx) { return *this; }
+ else { return v; }
+ }
+};
+
+template<class IdxSeq, class ... Elements>
+struct tuple_impl;
+
+template<size_t ... Idx, class ... Elements>
+struct tuple_impl<std::index_sequence<Idx...>, Elements...>: public tuple_member<Elements, Idx> ... {
+
+ MDSPAN_FUNCTION
+ constexpr tuple_impl(Elements ... vals):tuple_member<Elements, Idx>{vals}... {}
+
+ template<size_t N>
+ MDSPAN_FUNCTION
+ constexpr auto& get() {
+ using base_t = decltype((tuple_idx_matcher<N, Idx, Elements>() | ...) );
+ return base_t::type::get();
+ }
+ template<size_t N>
+ MDSPAN_FUNCTION
+ constexpr const auto& get() const {
+ using base_t = decltype((tuple_idx_matcher<N, Idx, Elements>() | ...) );
+ return base_t::type::get();
+ }
+};
+
+// A simple tuple-like class for representing slices internally and is compatible with device code
+// This doesn't support type access since we don't need it
+// This is not meant as an external API
+template<class ... Elements>
+struct tuple: public tuple_impl<decltype(std::make_index_sequence<sizeof...(Elements)>()), Elements...> {
+ MDSPAN_FUNCTION
+ constexpr tuple(Elements ... vals):tuple_impl<decltype(std::make_index_sequence<sizeof...(Elements)>()), Elements ...>(vals ...) {}
+};
+
+template<size_t Idx, class ... Args>
+MDSPAN_FUNCTION
+constexpr auto& get(tuple<Args...>& vals) { return vals.template get<Idx>(); }
+
+template<size_t Idx, class ... Args>
+MDSPAN_FUNCTION
+constexpr const auto& get(const tuple<Args...>& vals) { return vals.template get<Idx>(); }
+
+template<class ... Elements>
+tuple(Elements ...) -> tuple<Elements...>;
+#endif
+} // namespace detail
+
+constexpr struct mdspan_non_standard_tag {
+} mdspan_non_standard;
+
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+#include "../mdspan"
+#include <cassert>
+#include <vector>
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace MDSPAN_IMPL_PROPOSED_NAMESPACE {
+
+namespace {
+ template<class Extents>
+ struct size_of_extents;
+
+ template<class IndexType, size_t ... Extents>
+ struct size_of_extents<extents<IndexType, Extents...>> {
+ constexpr static size_t value() {
+ size_t size = 1;
+ for(size_t r=0; r<extents<IndexType, Extents...>::rank(); r++)
+ size *= extents<IndexType, Extents...>::static_extent(r);
+ return size;
+ }
+ };
+}
+
+namespace {
+ template<class C>
+ struct container_is_array : std::false_type {
+ template<class M>
+ static constexpr C construct(const M& m) { return C(m.required_span_size()); }
+ };
+ template<class T, size_t N>
+ struct container_is_array<std::array<T,N>> : std::true_type {
+ template<class M>
+ static constexpr std::array<T,N> construct(const M&) { return std::array<T,N>(); }
+ };
+}
+
+template <
+ class ElementType,
+ class Extents,
+ class LayoutPolicy = layout_right,
+ class Container = std::vector<ElementType>
+>
+class mdarray {
+private:
+ static_assert(::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::__is_extents_v<Extents>,
+ MDSPAN_IMPL_PROPOSED_NAMESPACE_STRING "::mdspan's Extents template parameter must be a specialization of " MDSPAN_IMPL_STANDARD_NAMESPACE_STRING "::extents.");
+
+public:
+
+ //--------------------------------------------------------------------------------
+ // Domain and codomain types
+
+ using extents_type = Extents;
+ using layout_type = LayoutPolicy;
+ using container_type = Container;
+ using mapping_type = typename layout_type::template mapping<extents_type>;
+ using element_type = ElementType;
+ using mdspan_type = mdspan<element_type, extents_type, layout_type>;
+ using const_mdspan_type = mdspan<const element_type, extents_type, layout_type>;
+ using value_type = std::remove_cv_t<element_type>;
+ using index_type = typename Extents::index_type;
+ using size_type = typename Extents::size_type;
+ using rank_type = typename Extents::rank_type;
+ using pointer = typename container_type::pointer;
+ using reference = typename container_type::reference;
+ using const_pointer = typename container_type::const_pointer;
+ using const_reference = typename container_type::const_reference;
+
+public:
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.cons], mdspan constructors, assignment, and destructor
+
+#if !(MDSPAN_HAS_CXX_20)
+ MDSPAN_FUNCTION_REQUIRES(
+ (MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr),
+ mdarray, (), ,
+ /* requires */ (extents_type::rank_dynamic()!=0)) {}
+#else
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdarray() requires(extents_type::rank_dynamic()!=0) = default;
+#endif
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdarray(const mdarray&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdarray(mdarray&&) = default;
+
+ // Constructors for container types constructible from a size
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ (::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::are_valid_indices<index_type, SizeTypes...>()) &&
+ _MDSPAN_TRAIT( std::is_constructible, extents_type, SizeTypes...) &&
+ _MDSPAN_TRAIT( std::is_constructible, mapping_type, extents_type) &&
+ (_MDSPAN_TRAIT( std::is_constructible, container_type, size_t) ||
+ container_is_array<container_type>::value) &&
+ (extents_type::rank()>0 || extents_type::rank_dynamic()==0)
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ explicit constexpr mdarray(SizeTypes... dynamic_extents)
+ : map_(extents_type(dynamic_extents...)), ctr_(container_is_array<container_type>::construct(map_))
+ { }
+
+ MDSPAN_FUNCTION_REQUIRES(
+ (MDSPAN_INLINE_FUNCTION constexpr),
+ mdarray, (const extents_type& exts), ,
+ /* requires */ ((_MDSPAN_TRAIT( std::is_constructible, container_type, size_t) ||
+ container_is_array<container_type>::value) &&
+ _MDSPAN_TRAIT( std::is_constructible, mapping_type, extents_type))
+ ) : map_(exts), ctr_(container_is_array<container_type>::construct(map_))
+ { }
+
+ MDSPAN_FUNCTION_REQUIRES(
+ (MDSPAN_INLINE_FUNCTION constexpr),
+ mdarray, (const mapping_type& m), ,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, container_type, size_t) ||
+ container_is_array<container_type>::value)
+ ) : map_(m), ctr_(container_is_array<container_type>::construct(map_))
+ { }
+
+ MDSPAN_FUNCTION_REQUIRES(
+ (MDSPAN_INLINE_FUNCTION constexpr),
+ mdarray, (const extents_type& exts, const container_type& ctr), ,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, mapping_type, extents_type))
+ ) : map_(exts), ctr_(ctr)
+ { assert(ctr.size() >= static_cast<size_t>(map_.required_span_size())); }
+
+ constexpr mdarray(const mapping_type& m, const container_type& ctr)
+ : map_(m), ctr_(ctr)
+ { assert(ctr.size() >= static_cast<size_t>(map_.required_span_size())); }
+
+ MDSPAN_FUNCTION_REQUIRES(
+ (MDSPAN_INLINE_FUNCTION constexpr),
+ mdarray, (const extents_type& exts, container_type&& ctr), ,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, mapping_type, extents_type))
+ ) : map_(exts), ctr_(std::move(ctr))
+ { assert(ctr_.size() >= static_cast<size_t>(map_.required_span_size())); }
+
+ constexpr mdarray(const mapping_type& m, container_type&& ctr)
+ : map_(m), ctr_(std::move(ctr))
+ { assert(ctr_.size() >= static_cast<size_t>(map_.required_span_size())); }
+
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherElementType, class OtherExtents, class OtherLayoutPolicy, class OtherContainer,
+ /* requires */ (
+ _MDSPAN_TRAIT( std::is_constructible, mapping_type, typename OtherLayoutPolicy::template mapping<OtherExtents>) &&
+ _MDSPAN_TRAIT( std::is_constructible, container_type, OtherContainer)
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const mdarray<OtherElementType, OtherExtents, OtherLayoutPolicy, OtherContainer>& other)
+ : map_(other.mapping()), ctr_(other.container())
+ {
+ static_assert( std::is_constructible<extents_type, OtherExtents>::value, "");
+ }
+
+ // Constructors for container types constructible from a size and allocator
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Alloc,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, container_type, size_t, Alloc) &&
+ _MDSPAN_TRAIT( std::is_constructible, mapping_type, extents_type))
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const extents_type& exts, const Alloc& a)
+ : map_(exts), ctr_(map_.required_span_size(), a)
+ { }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Alloc,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, container_type, size_t, Alloc))
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const mapping_type& map, const Alloc& a)
+ : map_(map), ctr_(map_.required_span_size(), a)
+ { }
+
+ // Constructors for container types constructible from a container and allocator
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Alloc,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, container_type, container_type, Alloc) &&
+ _MDSPAN_TRAIT( std::is_constructible, mapping_type, extents_type))
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const extents_type& exts, const container_type& ctr, const Alloc& a)
+ : map_(exts), ctr_(ctr, a)
+ { assert(ctr_.size() >= static_cast<size_t>(map_.required_span_size())); }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Alloc,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, container_type, size_t, Alloc))
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const mapping_type& map, const container_type& ctr, const Alloc& a)
+ : map_(map), ctr_(ctr, a)
+ { assert(ctr_.size() >= static_cast<size_t>(map_.required_span_size())); }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Alloc,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, container_type, container_type, Alloc) &&
+ _MDSPAN_TRAIT( std::is_constructible, mapping_type, extents_type))
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const extents_type& exts, container_type&& ctr, const Alloc& a)
+ : map_(exts), ctr_(std::move(ctr), a)
+ { assert(ctr_.size() >= static_cast<size_t>(map_.required_span_size())); }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Alloc,
+ /* requires */ (_MDSPAN_TRAIT( std::is_constructible, container_type, size_t, Alloc))
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const mapping_type& map, container_type&& ctr, const Alloc& a)
+ : map_(map), ctr_(std::move(ctr), a)
+ { assert(ctr_.size() >= map_.required_span_size()); }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherElementType, class OtherExtents, class OtherLayoutPolicy, class OtherContainer, class Alloc,
+ /* requires */ (
+ _MDSPAN_TRAIT( std::is_constructible, mapping_type, typename OtherLayoutPolicy::template mapping<OtherExtents>) &&
+ _MDSPAN_TRAIT( std::is_constructible, container_type, OtherContainer, Alloc)
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mdarray(const mdarray<OtherElementType, OtherExtents, OtherLayoutPolicy, OtherContainer>& other, const Alloc& a)
+ : map_(other.mapping()), ctr_(other.container(), a)
+ {
+ static_assert( std::is_constructible<extents_type, OtherExtents>::value, "");
+ }
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdarray& operator= (const mdarray&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mdarray& operator= (mdarray&&) = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ ~mdarray() = default;
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.mapping], mdspan mapping domain multidimensional index to access codomain element
+
+ #if MDSPAN_USE_BRACKET_OPERATOR
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ _MDSPAN_FOLD_AND(_MDSPAN_TRAIT( std::is_convertible, SizeTypes, index_type) /* && ... */) &&
+ extents_type::rank() == sizeof...(SizeTypes)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr const_reference operator[](SizeTypes... indices) const noexcept
+ {
+ return ctr_[map_(static_cast<index_type>(std::move(indices))...)];
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ _MDSPAN_FOLD_AND(_MDSPAN_TRAIT( std::is_convertible, SizeTypes, index_type) /* && ... */) &&
+ extents_type::rank() == sizeof...(SizeTypes)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator[](SizeTypes... indices) noexcept
+ {
+ return ctr_[map_(static_cast<index_type>(std::move(indices))...)];
+ }
+ #endif
+
+#if 0
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType, size_t N,
+ /* requires */ (
+ _MDSPAN_TRAIT( std::is_convertible, SizeType, index_type) &&
+ N == extents_type::rank()
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr const_reference operator[](const std::array<SizeType, N>& indices) const noexcept
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType, size_t N,
+ /* requires */ (
+ _MDSPAN_TRAIT( std::is_convertible, SizeType, index_type) &&
+ N == extents_type::rank()
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator[](const std::array<SizeType, N>& indices) noexcept
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+#endif
+
+
+ #if MDSPAN_USE_PAREN_OPERATOR
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ (::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::are_valid_indices<index_type, SizeTypes...>()) &&
+ extents_type::rank() == sizeof...(SizeTypes)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr const_reference operator()(SizeTypes... indices) const noexcept
+ {
+ return ctr_[map_(static_cast<index_type>(std::move(indices))...)];
+ }
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... SizeTypes,
+ /* requires */ (
+ (::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::are_valid_indices<index_type, SizeTypes...>()) &&
+ extents_type::rank() == sizeof...(SizeTypes)
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator()(SizeTypes... indices) noexcept
+ {
+ return ctr_[map_(static_cast<index_type>(std::move(indices))...)];
+ }
+
+#if 0
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType, size_t N,
+ /* requires */ (
+ _MDSPAN_TRAIT( std::is_convertible, SizeType, index_type) &&
+ N == extents_type::rank()
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr const_reference operator()(const std::array<SizeType, N>& indices) const noexcept
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class SizeType, size_t N,
+ /* requires */ (
+ _MDSPAN_TRAIT( std::is_convertible, SizeType, index_type) &&
+ N == extents_type::rank()
+ )
+ )
+ MDSPAN_FORCE_INLINE_FUNCTION
+ constexpr reference operator()(const std::array<SizeType, N>& indices) noexcept
+ {
+ return __impl::template __callop<reference>(*this, indices);
+ }
+#endif
+ #endif
+
+ MDSPAN_INLINE_FUNCTION constexpr pointer data() noexcept { return ctr_.data(); };
+ MDSPAN_INLINE_FUNCTION constexpr const_pointer data() const noexcept { return ctr_.data(); };
+ MDSPAN_INLINE_FUNCTION constexpr container_type& container() noexcept { return ctr_; };
+ MDSPAN_INLINE_FUNCTION constexpr const container_type& container() const noexcept { return ctr_; };
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.domobs], mdspan observers of the domain multidimensional index space
+
+ MDSPAN_INLINE_FUNCTION static constexpr rank_type rank() noexcept { return extents_type::rank(); }
+ MDSPAN_INLINE_FUNCTION static constexpr rank_type rank_dynamic() noexcept { return extents_type::rank_dynamic(); }
+ MDSPAN_INLINE_FUNCTION static constexpr size_t static_extent(size_t r) noexcept { return extents_type::static_extent(r); }
+
+ MDSPAN_INLINE_FUNCTION constexpr const extents_type& extents() const noexcept { return map_.extents(); };
+ MDSPAN_INLINE_FUNCTION constexpr index_type extent(size_t r) const noexcept { return map_.extents().extent(r); };
+ MDSPAN_INLINE_FUNCTION constexpr index_type size() const noexcept {
+// return __impl::__size(*this);
+ return ctr_.size();
+ };
+
+
+ //--------------------------------------------------------------------------------
+ // [mdspan.basic.obs], mdspan observers of the mapping
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept { return mapping_type::is_always_unique(); };
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept { return mapping_type::is_always_exhaustive(); };
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept { return mapping_type::is_always_strided(); };
+
+ MDSPAN_INLINE_FUNCTION constexpr const mapping_type& mapping() const noexcept { return map_; };
+ MDSPAN_INLINE_FUNCTION constexpr bool is_unique() const noexcept { return map_.is_unique(); };
+ MDSPAN_INLINE_FUNCTION constexpr bool is_exhaustive() const noexcept { return map_.is_exhaustive(); };
+ MDSPAN_INLINE_FUNCTION constexpr bool is_strided() const noexcept { return map_.is_strided(); };
+ MDSPAN_INLINE_FUNCTION constexpr index_type stride(size_t r) const { return map_.stride(r); };
+
+ // Converstion to mdspan
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherElementType, class OtherExtents,
+ class OtherLayoutType, class OtherAccessorType,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_assignable,
+ mdspan<OtherElementType, OtherExtents, OtherLayoutType, OtherAccessorType>,
+ mdspan_type)
+ )
+ )
+ constexpr operator mdspan<OtherElementType, OtherExtents, OtherLayoutType, OtherAccessorType> () {
+ return mdspan_type(data(), map_);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherElementType, class OtherExtents,
+ class OtherLayoutType, class OtherAccessorType,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_assignable,
+ mdspan<OtherElementType, OtherExtents, OtherLayoutType, OtherAccessorType>,
+ const_mdspan_type)
+ )
+ )
+ constexpr operator mdspan<OtherElementType, OtherExtents, OtherLayoutType, OtherAccessorType> () const {
+ return const_mdspan_type(data(), map_);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherAccessorType = default_accessor<element_type>,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_assignable, mdspan_type,
+ mdspan<element_type, extents_type, layout_type, OtherAccessorType>)
+ )
+ )
+ constexpr mdspan<element_type, extents_type, layout_type, OtherAccessorType>
+ to_mdspan(const OtherAccessorType& a = default_accessor<element_type>()) {
+ return mdspan<element_type, extents_type, layout_type, OtherAccessorType>(data(), map_, a);
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class OtherAccessorType = default_accessor<const element_type>,
+ /* requires */ (
+ _MDSPAN_TRAIT(std::is_assignable, const_mdspan_type,
+ mdspan<const element_type, extents_type, layout_type, OtherAccessorType>)
+ )
+ )
+ constexpr mdspan<const element_type, extents_type, layout_type, OtherAccessorType>
+ to_mdspan(const OtherAccessorType& a = default_accessor<const element_type>()) const {
+ return mdspan<const element_type, extents_type, layout_type, OtherAccessorType>(data(), map_, a);
+ }
+
+private:
+ mapping_type map_;
+ container_type ctr_;
+
+ template <class, class, class, class>
+ friend class mdarray;
+};
+
+
+} // end namespace MDSPAN_IMPL_PROPOSED_NAMESPACE
+} // end namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+// backward compatibility import into experimental
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace MDSPAN_IMPL_PROPOSED_NAMESPACE {
+
+template< ::std::size_t Rank, class IndexType = std::size_t>
+using dims =
+ :: MDSPAN_IMPL_STANDARD_NAMESPACE :: dextents<IndexType, Rank>;
+
+} // namespace MDSPAN_IMPL_PROPOSED_NAMESPACE
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+#include <type_traits>
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+
+namespace {
+ template<class T>
+ struct __mdspan_is_integral_constant: std::false_type {};
+
+ template<class T, T val>
+ struct __mdspan_is_integral_constant<std::integral_constant<T,val>>: std::true_type {};
+}
+
+// Slice Specifier allowing for strides and compile time extent
+template <class OffsetType, class ExtentType, class StrideType>
+struct strided_slice {
+ using offset_type = OffsetType;
+ using extent_type = ExtentType;
+ using stride_type = StrideType;
+
+ _MDSPAN_NO_UNIQUE_ADDRESS OffsetType offset{};
+ _MDSPAN_NO_UNIQUE_ADDRESS ExtentType extent{};
+ _MDSPAN_NO_UNIQUE_ADDRESS StrideType stride{};
+
+ static_assert(std::is_integral_v<OffsetType> || __mdspan_is_integral_constant<OffsetType>::value);
+ static_assert(std::is_integral_v<ExtentType> || __mdspan_is_integral_constant<ExtentType>::value);
+ static_assert(std::is_integral_v<StrideType> || __mdspan_is_integral_constant<StrideType>::value);
+};
+
+} // MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+#include "submdspan_extents.hpp"
+#include "submdspan_mapping.hpp"
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+template <class ElementType, class Extents, class LayoutPolicy,
+ class AccessorPolicy, class... SliceSpecifiers>
+MDSPAN_INLINE_FUNCTION
+constexpr auto
+submdspan(const mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy> &src,
+ SliceSpecifiers... slices) {
+ const auto sub_submdspan_mapping_result = submdspan_mapping(src.mapping(), slices...);
+ // NVCC has a problem with the deduction so lets figure out the type
+ using sub_mapping_t = std::remove_cv_t<decltype(sub_submdspan_mapping_result.mapping)>;
+ using sub_extents_t = typename sub_mapping_t::extents_type;
+ using sub_layout_t = typename sub_mapping_t::layout_type;
+ using sub_accessor_t = typename AccessorPolicy::offset_policy;
+ return mdspan<ElementType, sub_extents_t, sub_layout_t, sub_accessor_t>(
+ src.accessor().offset(src.data_handle(), sub_submdspan_mapping_result.offset),
+ sub_submdspan_mapping_result.mapping,
+ sub_accessor_t(src.accessor()));
+}
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+#include <complex>
+
+#include "strided_slice.hpp"
+#include "../__p0009_bits/utility.hpp"
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace detail {
+
+// Mapping from submapping ranks to srcmapping ranks
+// InvMapRank is an index_sequence, which we build recursively
+// to contain the mapped indices.
+// end of recursion specialization containing the final index_sequence
+template <size_t Counter, size_t... MapIdxs>
+MDSPAN_INLINE_FUNCTION
+constexpr auto inv_map_rank(std::integral_constant<size_t, Counter>, std::index_sequence<MapIdxs...>) {
+ return std::index_sequence<MapIdxs...>();
+}
+
+// specialization reducing rank by one (i.e., integral slice specifier)
+template<size_t Counter, class Slice, class... SliceSpecifiers, size_t... MapIdxs>
+MDSPAN_INLINE_FUNCTION
+constexpr auto inv_map_rank(std::integral_constant<size_t, Counter>, std::index_sequence<MapIdxs...>, Slice,
+ SliceSpecifiers... slices) {
+ using next_idx_seq_t = std::conditional_t<std::is_convertible_v<Slice, size_t>,
+ std::index_sequence<MapIdxs...>,
+ std::index_sequence<MapIdxs..., Counter>>;
+
+ return inv_map_rank(std::integral_constant<size_t,Counter + 1>(), next_idx_seq_t(),
+ slices...);
+}
+
+// Helper for identifying strided_slice
+template <class T> struct is_strided_slice : std::false_type {};
+
+template <class OffsetType, class ExtentType, class StrideType>
+struct is_strided_slice<
+ strided_slice<OffsetType, ExtentType, StrideType>> : std::true_type {};
+
+// Helper for identifying valid pair like things
+template <class T, class IndexType> struct index_pair_like : std::false_type {};
+
+template <class IdxT1, class IdxT2, class IndexType>
+struct index_pair_like<std::pair<IdxT1, IdxT2>, IndexType> {
+ static constexpr bool value = std::is_convertible_v<IdxT1, IndexType> &&
+ std::is_convertible_v<IdxT2, IndexType>;
+};
+
+template <class IdxT1, class IdxT2, class IndexType>
+struct index_pair_like<std::tuple<IdxT1, IdxT2>, IndexType> {
+ static constexpr bool value = std::is_convertible_v<IdxT1, IndexType> &&
+ std::is_convertible_v<IdxT2, IndexType>;
+};
+
+template <class IdxT1, class IdxT2, class IndexType>
+struct index_pair_like<tuple<IdxT1, IdxT2>, IndexType> {
+ static constexpr bool value = std::is_convertible_v<IdxT1, IndexType> &&
+ std::is_convertible_v<IdxT2, IndexType>;
+};
+
+template <class IdxT, class IndexType>
+struct index_pair_like<std::complex<IdxT>, IndexType> {
+ static constexpr bool value = std::is_convertible_v<IdxT, IndexType>;
+};
+
+template <class IdxT, class IndexType>
+struct index_pair_like<std::array<IdxT, 2>, IndexType> {
+ static constexpr bool value = std::is_convertible_v<IdxT, IndexType>;
+};
+
+// first_of(slice): getting begin of slice specifier range
+MDSPAN_TEMPLATE_REQUIRES(
+ class Integral,
+ /* requires */(std::is_convertible_v<Integral, size_t>)
+)
+MDSPAN_INLINE_FUNCTION
+constexpr Integral first_of(const Integral &i) {
+ return i;
+}
+
+template<class Integral, Integral v>
+MDSPAN_INLINE_FUNCTION
+constexpr Integral first_of(const std::integral_constant<Integral, v>&) {
+ return integral_constant<Integral, v>();
+}
+
+MDSPAN_INLINE_FUNCTION
+constexpr integral_constant<size_t, 0>
+first_of(const ::MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent_t &) {
+ return integral_constant<size_t, 0>();
+}
+
+MDSPAN_TEMPLATE_REQUIRES(
+ class Slice,
+ /* requires */(index_pair_like<Slice, size_t>::value)
+)
+MDSPAN_INLINE_FUNCTION
+constexpr auto first_of(const Slice &i) {
+ return get<0>(i);
+}
+
+MDSPAN_TEMPLATE_REQUIRES(
+ class IdxT1, class IdxT2,
+ /* requires */ (index_pair_like<std::tuple<IdxT1, IdxT2>, size_t>::value)
+ )
+constexpr auto first_of(const std::tuple<IdxT1, IdxT2>& i) {
+ return get<0>(i);
+}
+
+MDSPAN_TEMPLATE_REQUIRES(
+ class IdxT1, class IdxT2,
+ /* requires */ (index_pair_like<std::pair<IdxT1, IdxT2>, size_t>::value)
+ )
+MDSPAN_INLINE_FUNCTION
+constexpr auto first_of(const std::pair<IdxT1, IdxT2>& i) {
+ return i.first;
+}
+
+template<class T>
+MDSPAN_INLINE_FUNCTION
+constexpr auto first_of(const std::complex<T> &i) {
+ return i.real();
+}
+
+template <class OffsetType, class ExtentType, class StrideType>
+MDSPAN_INLINE_FUNCTION
+constexpr OffsetType
+first_of(const strided_slice<OffsetType, ExtentType, StrideType> &r) {
+ return r.offset;
+}
+
+// last_of(slice): getting end of slice specifier range
+// We need however not just the slice but also the extents
+// of the original view and which rank from the extents.
+// This is needed in the case of slice being full_extent_t.
+MDSPAN_TEMPLATE_REQUIRES(
+ size_t k, class Extents, class Integral,
+ /* requires */(std::is_convertible_v<Integral, size_t>)
+)
+MDSPAN_INLINE_FUNCTION
+constexpr Integral
+ last_of(std::integral_constant<size_t, k>, const Extents &, const Integral &i) {
+ return i;
+}
+
+MDSPAN_TEMPLATE_REQUIRES(
+ size_t k, class Extents, class Slice,
+ /* requires */(index_pair_like<Slice, size_t>::value)
+)
+MDSPAN_INLINE_FUNCTION
+constexpr auto last_of(std::integral_constant<size_t, k>, const Extents &,
+ const Slice &i) {
+ return get<1>(i);
+}
+
+MDSPAN_TEMPLATE_REQUIRES(
+ size_t k, class Extents, class IdxT1, class IdxT2,
+ /* requires */ (index_pair_like<std::tuple<IdxT1, IdxT2>, size_t>::value)
+ )
+constexpr auto last_of(std::integral_constant<size_t, k>, const Extents &, const std::tuple<IdxT1, IdxT2>& i) {
+ return get<1>(i);
+}
+
+MDSPAN_TEMPLATE_REQUIRES(
+ size_t k, class Extents, class IdxT1, class IdxT2,
+ /* requires */ (index_pair_like<std::pair<IdxT1, IdxT2>, size_t>::value)
+ )
+MDSPAN_INLINE_FUNCTION
+constexpr auto last_of(std::integral_constant<size_t, k>, const Extents &, const std::pair<IdxT1, IdxT2>& i) {
+ return i.second;
+}
+
+template<size_t k, class Extents, class T>
+MDSPAN_INLINE_FUNCTION
+constexpr auto last_of(std::integral_constant<size_t, k>, const Extents &, const std::complex<T> &i) {
+ return i.imag();
+}
+
+// Suppress spurious warning with NVCC about no return statement.
+// This is a known issue in NVCC and NVC++
+// Depending on the CUDA and GCC version we need both the builtin
+// and the diagnostic push. I tried really hard to find something shorter
+// but no luck ...
+#if defined __NVCC__
+ #ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
+ #pragma nv_diagnostic push
+ #pragma nv_diag_suppress = implicit_return_from_non_void_function
+ #else
+ #ifdef __CUDA_ARCH__
+ #pragma diagnostic push
+ #pragma diag_suppress implicit_return_from_non_void_function
+ #endif
+ #endif
+#elif defined __NVCOMPILER
+ #pragma diagnostic push
+ #pragma diag_suppress = implicit_return_from_non_void_function
+#endif
+template <size_t k, class Extents>
+MDSPAN_INLINE_FUNCTION
+constexpr auto last_of(std::integral_constant<size_t, k>, const Extents &ext,
+ ::MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent_t) {
+ if constexpr (Extents::static_extent(k) == dynamic_extent) {
+ return ext.extent(k);
+ } else {
+ return integral_constant<size_t, Extents::static_extent(k)>();
+ }
+#if defined(__NVCC__) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
+ // Even with CUDA_ARCH protection this thing warns about calling host function
+ __builtin_unreachable();
+#endif
+}
+#if defined __NVCC__
+ #ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
+ #pragma nv_diagnostic pop
+ #else
+ #ifdef __CUDA_ARCH__
+ #pragma diagnostic pop
+ #endif
+ #endif
+#elif defined __NVCOMPILER
+ #pragma diagnostic pop
+#endif
+
+template <size_t k, class Extents, class OffsetType, class ExtentType,
+ class StrideType>
+MDSPAN_INLINE_FUNCTION
+constexpr OffsetType
+last_of(std::integral_constant<size_t, k>, const Extents &,
+ const strided_slice<OffsetType, ExtentType, StrideType> &r) {
+ return r.extent;
+}
+
+// get stride of slices
+template <class T>
+MDSPAN_INLINE_FUNCTION
+constexpr auto stride_of(const T &) {
+ return integral_constant<size_t, 1>();
+}
+
+template <class OffsetType, class ExtentType, class StrideType>
+MDSPAN_INLINE_FUNCTION
+constexpr auto
+stride_of(const strided_slice<OffsetType, ExtentType, StrideType> &r) {
+ return r.stride;
+}
+
+// divide which can deal with integral constant preservation
+template <class IndexT, class T0, class T1>
+MDSPAN_INLINE_FUNCTION
+constexpr auto divide(const T0 &v0, const T1 &v1) {
+ return IndexT(v0) / IndexT(v1);
+}
+
+template <class IndexT, class T0, T0 v0, class T1, T1 v1>
+MDSPAN_INLINE_FUNCTION
+constexpr auto divide(const std::integral_constant<T0, v0> &,
+ const std::integral_constant<T1, v1> &) {
+ // cutting short division by zero
+ // this is used for strided_slice with zero extent/stride
+ return integral_constant<IndexT, v0 == 0 ? 0 : v0 / v1>();
+}
+
+// multiply which can deal with integral constant preservation
+template <class IndexT, class T0, class T1>
+MDSPAN_INLINE_FUNCTION
+constexpr auto multiply(const T0 &v0, const T1 &v1) {
+ return IndexT(v0) * IndexT(v1);
+}
+
+template <class IndexT, class T0, T0 v0, class T1, T1 v1>
+MDSPAN_INLINE_FUNCTION
+constexpr auto multiply(const std::integral_constant<T0, v0> &,
+ const std::integral_constant<T1, v1> &) {
+ return integral_constant<IndexT, v0 * v1>();
+}
+
+// compute new static extent from range, preserving static knowledge
+template <class Arg0, class Arg1> struct StaticExtentFromRange {
+ constexpr static size_t value = dynamic_extent;
+};
+
+template <class Integral0, Integral0 val0, class Integral1, Integral1 val1>
+struct StaticExtentFromRange<std::integral_constant<Integral0, val0>,
+ std::integral_constant<Integral1, val1>> {
+ constexpr static size_t value = val1 - val0;
+};
+
+template <class Integral0, Integral0 val0, class Integral1, Integral1 val1>
+struct StaticExtentFromRange<integral_constant<Integral0, val0>,
+ integral_constant<Integral1, val1>> {
+ constexpr static size_t value = val1 - val0;
+};
+
+// compute new static extent from strided_slice, preserving static
+// knowledge
+template <class Arg0, class Arg1> struct StaticExtentFromStridedRange {
+ constexpr static size_t value = dynamic_extent;
+};
+
+template <class Integral0, Integral0 val0, class Integral1, Integral1 val1>
+struct StaticExtentFromStridedRange<std::integral_constant<Integral0, val0>,
+ std::integral_constant<Integral1, val1>> {
+ constexpr static size_t value = val0 > 0 ? 1 + (val0 - 1) / val1 : 0;
+};
+
+template <class Integral0, Integral0 val0, class Integral1, Integral1 val1>
+struct StaticExtentFromStridedRange<integral_constant<Integral0, val0>,
+ integral_constant<Integral1, val1>> {
+ constexpr static size_t value = val0 > 0 ? 1 + (val0 - 1) / val1 : 0;
+};
+
+// creates new extents through recursive calls to next_extent member function
+// next_extent has different overloads for different types of stride specifiers
+template <size_t K, class Extents, size_t... NewExtents>
+struct extents_constructor {
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Slice, class... SlicesAndExtents,
+ /* requires */(!std::is_convertible_v<Slice, size_t> &&
+ !is_strided_slice<Slice>::value)
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr static auto next_extent(const Extents &ext, const Slice &sl,
+ SlicesAndExtents... slices_and_extents) {
+ constexpr size_t new_static_extent = StaticExtentFromRange<
+ decltype(first_of(std::declval<Slice>())),
+ decltype(last_of(std::integral_constant<size_t, Extents::rank() - K>(),
+ std::declval<Extents>(),
+ std::declval<Slice>()))>::value;
+
+ using next_t =
+ extents_constructor<K - 1, Extents, NewExtents..., new_static_extent>;
+ using index_t = typename Extents::index_type;
+ return next_t::next_extent(
+ ext, slices_and_extents...,
+ index_t(last_of(std::integral_constant<size_t, Extents::rank() - K>(), ext,
+ sl)) -
+ index_t(first_of(sl)));
+ }
+
+ MDSPAN_TEMPLATE_REQUIRES(
+ class Slice, class... SlicesAndExtents,
+ /* requires */ (std::is_convertible_v<Slice, size_t>)
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr static auto next_extent(const Extents &ext, const Slice &,
+ SlicesAndExtents... slices_and_extents) {
+ using next_t = extents_constructor<K - 1, Extents, NewExtents...>;
+ return next_t::next_extent(ext, slices_and_extents...);
+ }
+
+ template <class OffsetType, class ExtentType, class StrideType,
+ class... SlicesAndExtents>
+ MDSPAN_INLINE_FUNCTION
+ constexpr static auto
+ next_extent(const Extents &ext,
+ const strided_slice<OffsetType, ExtentType, StrideType> &r,
+ SlicesAndExtents... slices_and_extents) {
+ using index_t = typename Extents::index_type;
+ using new_static_extent_t =
+ StaticExtentFromStridedRange<ExtentType, StrideType>;
+ if constexpr (new_static_extent_t::value == dynamic_extent) {
+ using next_t =
+ extents_constructor<K - 1, Extents, NewExtents..., dynamic_extent>;
+ return next_t::next_extent(
+ ext, slices_and_extents...,
+ r.extent > 0 ? 1 + divide<index_t>(r.extent - 1, r.stride) : 0);
+ } else {
+ constexpr size_t new_static_extent = new_static_extent_t::value;
+ using next_t =
+ extents_constructor<K - 1, Extents, NewExtents..., new_static_extent>;
+ return next_t::next_extent(
+ ext, slices_and_extents..., index_t(divide<index_t>(ExtentType(), StrideType())));
+ }
+ }
+};
+
+template <class Extents, size_t... NewStaticExtents>
+struct extents_constructor<0, Extents, NewStaticExtents...> {
+
+ template <class... NewExtents>
+ MDSPAN_INLINE_FUNCTION
+ constexpr static auto next_extent(const Extents &, NewExtents... new_exts) {
+ return extents<typename Extents::index_type, NewStaticExtents...>(
+ new_exts...);
+ }
+};
+
+} // namespace detail
+
+// submdspan_extents creates new extents given src extents and submdspan slice
+// specifiers
+template <class IndexType, size_t... Extents, class... SliceSpecifiers>
+MDSPAN_INLINE_FUNCTION
+constexpr auto submdspan_extents(const extents<IndexType, Extents...> &src_exts,
+ SliceSpecifiers... slices) {
+
+ using ext_t = extents<IndexType, Extents...>;
+ return detail::extents_constructor<ext_t::rank(), ext_t>::next_extent(
+ src_exts, slices...);
+}
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#pragma once
+
+#include <array>
+#include <type_traits>
+#include <utility> // index_sequence
+#include "../__p0009_bits/utility.hpp"
+
+// Suppress spurious warning with NVCC about no return statement.
+// This is a known issue in NVCC and NVC++
+// Depending on the CUDA and GCC version we need both the builtin
+// and the diagnostic push. I tried really hard to find something shorter
+// but no luck ...
+#if defined __NVCC__
+#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
+#pragma nv_diagnostic push
+#pragma nv_diag_suppress = implicit_return_from_non_void_function
+#else
+#ifdef __CUDA_ARCH__
+#pragma diagnostic push
+#pragma diag_suppress implicit_return_from_non_void_function
+#endif
+#endif
+#elif defined __NVCOMPILER
+#pragma diagnostic push
+#pragma diag_suppress = implicit_return_from_non_void_function
+#endif
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+//******************************************
+// Return type of submdspan_mapping overloads
+//******************************************
+template <class LayoutMapping> struct submdspan_mapping_result {
+ _MDSPAN_NO_UNIQUE_ADDRESS LayoutMapping mapping{};
+ size_t offset;
+};
+
+namespace detail {
+
+// We use const Slice& and not Slice&& because the various
+// submdspan_mapping_impl overloads use their slices arguments
+// multiple times. This makes perfect forwarding not useful, but we
+// still don't want to pass those (possibly of size 64 x 3 bits)
+// objects by value.
+template <class IndexType, class Slice>
+MDSPAN_INLINE_FUNCTION constexpr bool
+one_slice_out_of_bounds(const IndexType &ext, const Slice &slice) {
+ using common_t =
+ std::common_type_t<decltype(detail::first_of(slice)), IndexType>;
+ return static_cast<common_t>(detail::first_of(slice)) ==
+ static_cast<common_t>(ext);
+}
+
+template <size_t... RankIndices, class IndexType, size_t... Exts,
+ class... Slices>
+MDSPAN_INLINE_FUNCTION constexpr bool
+any_slice_out_of_bounds_helper(std::index_sequence<RankIndices...>,
+ const extents<IndexType, Exts...> &exts,
+ const Slices &... slices) {
+ return _MDSPAN_FOLD_OR(
+ (one_slice_out_of_bounds(exts.extent(RankIndices), slices)));
+}
+
+template <class IndexType, size_t... Exts, class... Slices>
+MDSPAN_INLINE_FUNCTION constexpr bool
+any_slice_out_of_bounds(const extents<IndexType, Exts...> &exts,
+ const Slices &... slices) {
+ return any_slice_out_of_bounds_helper(
+ std::make_index_sequence<sizeof...(Slices)>(), exts, slices...);
+}
+
+// constructs sub strides
+template<class T, size_t N>
+struct sub_strides
+{
+ T values[N > 0 ? N : 1];
+};
+
+template <class SrcMapping, class... slice_strides, size_t... InvMapIdxs>
+MDSPAN_INLINE_FUNCTION constexpr auto construct_sub_strides(
+ const SrcMapping &src_mapping, std::index_sequence<InvMapIdxs...>,
+ const MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple<slice_strides...> &slices_stride_factor) {
+ using index_type = typename SrcMapping::index_type;
+ return sub_strides<typename SrcMapping::index_type, sizeof...(InvMapIdxs)>{{
+ (static_cast<index_type>(src_mapping.stride(InvMapIdxs)) *
+ static_cast<index_type>(get<InvMapIdxs>(slices_stride_factor)))...}};
+}
+
+template<class SliceSpecifier, class IndexType>
+struct is_range_slice {
+ constexpr static bool value =
+ std::is_same_v<SliceSpecifier, full_extent_t> ||
+ index_pair_like<SliceSpecifier, IndexType>::value;
+};
+
+template<class SliceSpecifier, class IndexType>
+constexpr bool is_range_slice_v = is_range_slice<SliceSpecifier, IndexType>::value;
+
+template<class SliceSpecifier, class IndexType>
+struct is_index_slice {
+ constexpr static bool value = std::is_convertible_v<SliceSpecifier, IndexType>;
+};
+
+template<class SliceSpecifier, class IndexType>
+constexpr bool is_index_slice_v = is_index_slice<SliceSpecifier, IndexType>::value;
+
+} // namespace detail
+
+//**********************************
+// layout_left submdspan_mapping
+//*********************************
+namespace detail {
+
+// Figure out whether to preserve layout_left
+template <class IndexType, size_t SubRank, class IndexSequence,
+ class... SliceSpecifiers>
+struct deduce_layout_left_submapping;
+
+template <class IndexType, size_t SubRank, size_t... Idx,
+ class... SliceSpecifiers>
+struct deduce_layout_left_submapping<
+ IndexType, SubRank, std::index_sequence<Idx...>, SliceSpecifiers...> {
+
+ using count_range = index_sequence_scan_impl<
+ 0u, (is_index_slice_v<SliceSpecifiers, IndexType> ? 0u : 1u)...>;
+
+ constexpr static int gap_len =
+ (((Idx > 0 && count_range::get(Idx) == 1 &&
+ is_index_slice_v<SliceSpecifiers, IndexType>)
+ ? 1
+ : 0) +
+ ... + 0);
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr static bool layout_left_value() {
+ // Use layout_left for rank 0
+ if constexpr (SubRank == 0) {
+ return true;
+ // Use layout_left for rank 1 result if leftmost slice specifier is range like
+ } else if constexpr (SubRank == 1) {
+ return ((Idx > 0 || is_range_slice_v<SliceSpecifiers, IndexType>)&&...);
+ } else {
+ // Preserve if leftmost SubRank-1 slices are full_extent_t and
+ // the slice at idx Subrank - 1 is a range and
+ // for idx > SubRank the slice is an index
+ return ((((Idx < SubRank - 1) && std::is_same_v<SliceSpecifiers, full_extent_t>) ||
+ ((Idx == SubRank - 1) && is_range_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx > SubRank - 1) && is_index_slice_v<SliceSpecifiers, IndexType>)) && ...);
+ }
+#if defined(__NVCC__) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
+ __builtin_unreachable();
+#endif
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr static bool layout_left_padded_value() {
+ // Technically could also keep layout_left_padded for SubRank==0
+ // and SubRank==1 with leftmost slice specifier being a contiguous range
+ // but we intercept these cases separately
+
+ // In all other cases:
+ // leftmost slice must be range
+ // then there can be a gap with index slices
+ // then SubRank - 2 full_extent slices
+ // then another range slice
+ // then more index slices
+ // e.g. R I I I F F F R I I for obtaining a rank-5 from a rank-10
+ return ((((Idx == 0) && is_range_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx > 0 && Idx <= gap_len) && is_index_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx > gap_len && Idx < gap_len + SubRank - 1) && std::is_same_v<SliceSpecifiers, full_extent_t>) ||
+ ((Idx == gap_len + SubRank - 1) && is_range_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx > gap_len + SubRank - 1) && is_index_slice_v<SliceSpecifiers, IndexType>)) && ... );
+ }
+};
+
+// We are reusing the same thing for layout_left and layout_left_padded
+// For layout_left as source StaticStride is static_extent(0)
+template<class Extents, size_t NumGaps, size_t StaticStride>
+struct compute_s_static_layout_left {
+ // Neither StaticStride nor any of the provided extents can be zero.
+ // StaticStride can never be zero, the static_extents we are looking at are associated with
+ // integral slice specifiers - which wouldn't be valid for zero extent
+ template<size_t ... Idx>
+ MDSPAN_INLINE_FUNCTION
+ static constexpr size_t value(std::index_sequence<Idx...>) {
+ size_t val = ((Idx>0 && Idx<=NumGaps ? (Extents::static_extent(Idx) == dynamic_extent?0:Extents::static_extent(Idx)) : 1) * ... * (StaticStride == dynamic_extent?0:StaticStride));
+ return val == 0?dynamic_extent:val;
+ }
+};
+
+} // namespace detail
+
+// Actual submdspan mapping call
+template <class Extents>
+template <class... SliceSpecifiers>
+MDSPAN_INLINE_FUNCTION constexpr auto
+layout_left::mapping<Extents>::submdspan_mapping_impl(
+ SliceSpecifiers... slices) const {
+
+ // compute sub extents
+ using src_ext_t = Extents;
+ auto dst_ext = submdspan_extents(extents(), slices...);
+ using dst_ext_t = decltype(dst_ext);
+
+ // figure out sub layout type
+ using deduce_layout = detail::deduce_layout_left_submapping<
+ typename dst_ext_t::index_type, dst_ext_t::rank(),
+ std::make_index_sequence<src_ext_t::rank()>,
+ SliceSpecifiers...>;
+
+ // Figure out if any slice's lower bound equals the corresponding extent.
+ // If so, bypass evaluating the layout mapping. This fixes LWG Issue 4060.
+ const bool out_of_bounds =
+ detail::any_slice_out_of_bounds(this->extents(), slices...);
+ auto offset = static_cast<size_t>(
+ out_of_bounds ? this->required_span_size()
+ : this->operator()(detail::first_of(slices)...));
+
+ if constexpr (deduce_layout::layout_left_value()) {
+ // layout_left case
+ using dst_mapping_t = typename layout_left::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{dst_mapping_t(dst_ext),
+ offset};
+ } else if constexpr (deduce_layout::layout_left_padded_value()) {
+ constexpr size_t S_static = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::compute_s_static_layout_left<Extents, deduce_layout::gap_len, Extents::static_extent(0)>::value(std::make_index_sequence<Extents::rank()>());
+ using dst_mapping_t = typename MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_left_padded<S_static>::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{
+ dst_mapping_t(dst_ext, stride(1 + deduce_layout::gap_len)), offset};
+ } else {
+ // layout_stride case
+ using dst_mapping_t = typename layout_stride::mapping<dst_ext_t>;
+ auto inv_map = detail::inv_map_rank(std::integral_constant<size_t, 0>(),
+ std::index_sequence<>(), slices...);
+ return submdspan_mapping_result<dst_mapping_t> {
+ dst_mapping_t(mdspan_non_standard, dst_ext,
+ detail::construct_sub_strides(
+ *this, inv_map,
+// HIP needs deduction guides to have markups so we need to be explicit
+// NVCC 11.0 has a bug with deduction guide here, tested that 11.2 does not have
+// the issue but Clang-CUDA also doesn't accept the use of deduction guide so
+// disable it for CUDA altogether
+#if defined(_MDSPAN_HAS_HIP) || defined(_MDSPAN_HAS_CUDA)
+ detail::tuple<decltype(detail::stride_of(slices))...>{
+ detail::stride_of(slices)...}).values),
+#else
+ detail::tuple{detail::stride_of(slices)...}).values),
+#endif
+ offset
+ };
+ }
+#if defined(__NVCC__) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
+ __builtin_unreachable();
+#endif
+}
+
+template <size_t PaddingValue>
+template <class Extents>
+template <class... SliceSpecifiers>
+MDSPAN_INLINE_FUNCTION constexpr auto
+MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_left_padded<PaddingValue>::mapping<Extents>::submdspan_mapping_impl(
+ SliceSpecifiers... slices) const {
+
+ // compute sub extents
+ using src_ext_t = Extents;
+ auto dst_ext = submdspan_extents(extents(), slices...);
+ using dst_ext_t = decltype(dst_ext);
+
+ if constexpr (Extents::rank() == 0) { // rank-0 case
+ using dst_mapping_t = typename MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_left_padded<PaddingValue>::template mapping<Extents>;
+ return submdspan_mapping_result<dst_mapping_t>{*this, 0};
+ } else {
+ const bool out_of_bounds =
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::any_slice_out_of_bounds(this->extents(), slices...);
+ auto offset = static_cast<size_t>(
+ out_of_bounds ? this->required_span_size()
+ : this->operator()(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::first_of(slices)...));
+ if constexpr (dst_ext_t::rank() == 0) { // result rank-0
+ // The following for some reasons leads to compiler error later, while not using a typedef works:
+ // Compilers: CUDA 11.2 with GCC 9.1
+ //
+ // using dst_mapping_t = typename layout_left::template mapping<dst_ext_t>;
+ // return submdspan_mapping_result<dst_mapping_t>{dst_mapping_t{dst_ext}, offset};
+ //
+ // Error: submdspan_mapping.hpp:299:23: error: 'dst_mapping_t' does not name a type
+ // 299 | using dst_mapping_t = typename layout_left::template mapping<dst_ext_t>;
+ // The same error is given (about dst_mapping_t not naming type) when a different name is used in 299:
+ // using dst_mapping_t2 = typename layout_left::template mapping<dst_ext_t>;
+
+ return submdspan_mapping_result<typename layout_left::template mapping<dst_ext_t>>
+ {typename layout_left::template mapping<dst_ext_t>{dst_ext}, offset};
+ } else { // general case
+ // Figure out if any slice's lower bound equals the corresponding extent.
+ // If so, bypass evaluating the layout mapping. This fixes LWG Issue 4060.
+ // figure out sub layout type
+ using deduce_layout = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::deduce_layout_left_submapping<
+ typename dst_ext_t::index_type, dst_ext_t::rank(),
+ decltype(std::make_index_sequence<src_ext_t::rank()>()),
+ SliceSpecifiers...>;
+
+ if constexpr (deduce_layout::layout_left_value() && dst_ext_t::rank() == 1) { // getting rank-1 from leftmost
+ using dst_mapping_t = typename layout_left::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{dst_mapping_t{dst_ext}, offset};
+ } else if constexpr (deduce_layout::layout_left_padded_value()) { // can keep layout_left_padded
+ constexpr size_t S_static = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::compute_s_static_layout_left<Extents, deduce_layout::gap_len, static_padding_stride>::value(std::make_index_sequence<Extents::rank()>());
+ using dst_mapping_t = typename MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_left_padded<S_static>::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{
+ dst_mapping_t(dst_ext, stride(1 + deduce_layout::gap_len)), offset};
+ } else { // layout_stride
+ auto inv_map = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::inv_map_rank(std::integral_constant<size_t, 0>(),
+ std::index_sequence<>(), slices...);
+ using dst_mapping_t = typename layout_stride::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t> {
+ dst_mapping_t(mdspan_non_standard, dst_ext,
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::construct_sub_strides(
+ *this, inv_map,
+// HIP needs deduction guides to have markups so we need to be explicit
+// NVCC 11.0 has a bug with deduction guide here, tested that 11.2 does not have
+// the issue but Clang-CUDA also doesn't accept the use of deduction guide so
+// disable it for CUDA alltogether
+#if defined(_MDSPAN_HAS_HIP) || defined(_MDSPAN_HAS_CUDA)
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple<decltype(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::stride_of(slices))...>{
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::stride_of(slices)...}).values),
+#else
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple{MDSPAN_IMPL_STANDARD_NAMESPACE::detail::stride_of(slices)...}).values),
+#endif
+ offset
+ };
+ }
+ }
+ }
+
+
+#if defined(__NVCC__) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
+ __builtin_unreachable();
+#endif
+}
+
+//**********************************
+// layout_right submdspan_mapping
+//*********************************
+namespace detail {
+
+// Figure out whether to preserve layout_right
+template <class IndexType, size_t SubRank, class IndexSequence,
+ class... SliceSpecifiers>
+struct deduce_layout_right_submapping;
+
+template <class IndexType, size_t SubRank, size_t... Idx,
+ class... SliceSpecifiers>
+struct deduce_layout_right_submapping<
+ IndexType, SubRank, std::index_sequence<Idx...>, SliceSpecifiers...> {
+
+ static constexpr size_t Rank = sizeof...(Idx);
+ using count_range = index_sequence_scan_impl<
+ 0u, (std::is_convertible_v<SliceSpecifiers, IndexType> ? 0u : 1u)...>;
+ //__static_partial_sums<!std::is_convertible_v<SliceSpecifiers,
+ // IndexType>...>;
+ constexpr static int gap_len =
+ (((Idx < Rank - 1 && count_range::get(Idx) == SubRank - 1 &&
+ std::is_convertible_v<SliceSpecifiers, IndexType>)
+ ? 1
+ : 0) +
+ ... + 0);
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr static bool layout_right_value() {
+ // Use layout_right for rank 0
+ if constexpr (SubRank == 0) {
+ return true;
+ // Use layout_right for rank 1 result if rightmost slice specifier is range like
+ } else if constexpr (SubRank == 1) {
+ return ((Idx < Rank - 1 || is_range_slice_v<SliceSpecifiers, IndexType>)&&...);
+ } else {
+ // Preserve if rightmost SubRank-1 slices are full_extent_t and
+ // the slice at idx Rank-Subrank is a range and
+ // for idx < Rank - SubRank the slice is an index
+ return ((((Idx >= Rank - SubRank) && std::is_same_v<SliceSpecifiers, full_extent_t>) ||
+ ((Idx == Rank - SubRank) && is_range_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx < Rank - SubRank) && is_index_slice_v<SliceSpecifiers, IndexType>)) && ...);
+ }
+#if defined(__NVCC__) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
+ __builtin_unreachable();
+#endif
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr static bool layout_right_padded_value() {
+ // Technically could also keep layout_right_padded for SubRank==0
+ // and SubRank==1 with rightmost slice specifier being a contiguous range
+ // but we intercept these cases separately
+
+ // In all other cases:
+ // rightmost slice must be range
+ // then there can be a gap with index slices
+ // then SubRank - 2 full_extent slices
+ // then another range slice
+ // then more index slices
+ // e.g. I I R F F F I I I R for obtaining a rank-5 from a rank-10
+ return ((((Idx == Rank - 1) && is_range_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx >= Rank - gap_len - 1 && Idx < Rank - 1) && is_index_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx > Rank - gap_len - SubRank && Idx < Rank - gap_len - 1) && std::is_same_v<SliceSpecifiers, full_extent_t>) ||
+ ((Idx == Rank - gap_len - SubRank) && is_range_slice_v<SliceSpecifiers, IndexType>) ||
+ ((Idx < Rank - gap_len - SubRank) && is_index_slice_v<SliceSpecifiers, IndexType>)) && ... );
+ }
+};
+
+// We are reusing the same thing for layout_right and layout_right_padded
+// For layout_right as source StaticStride is static_extent(Rank-1)
+template<class Extents, size_t NumGaps, size_t StaticStride>
+struct compute_s_static_layout_right {
+ // Neither StaticStride nor any of the provided extents can be zero.
+ // StaticStride can never be zero, the static_extents we are looking at are associated with
+ // integral slice specifiers - which wouldn't be valid for zero extent
+ template<size_t ... Idx>
+ MDSPAN_INLINE_FUNCTION
+ static constexpr size_t value(std::index_sequence<Idx...>) {
+ size_t val = ((Idx >= Extents::rank() - 1 - NumGaps && Idx < Extents::rank() - 1 ? (Extents::static_extent(Idx) == dynamic_extent?0:Extents::static_extent(Idx)) : 1) * ... * (StaticStride == dynamic_extent?0:StaticStride));
+ return val == 0?dynamic_extent:val;
+ }
+};
+
+} // namespace detail
+
+// Actual submdspan mapping call
+template <class Extents>
+template <class... SliceSpecifiers>
+MDSPAN_INLINE_FUNCTION constexpr auto
+layout_right::mapping<Extents>::submdspan_mapping_impl(
+ SliceSpecifiers... slices) const {
+
+ // compute sub extents
+ using src_ext_t = Extents;
+ auto dst_ext = submdspan_extents(extents(), slices...);
+ using dst_ext_t = decltype(dst_ext);
+
+ // figure out sub layout type
+ using deduce_layout = detail::deduce_layout_right_submapping<
+ typename dst_ext_t::index_type, dst_ext_t::rank(),
+ std::make_index_sequence<src_ext_t::rank()>,
+ SliceSpecifiers...>;
+
+ // Figure out if any slice's lower bound equals the corresponding extent.
+ // If so, bypass evaluating the layout mapping. This fixes LWG Issue 4060.
+ const bool out_of_bounds =
+ detail::any_slice_out_of_bounds(this->extents(), slices...);
+ auto offset = static_cast<size_t>(
+ out_of_bounds ? this->required_span_size()
+ : this->operator()(detail::first_of(slices)...));
+
+ if constexpr (deduce_layout::layout_right_value()) {
+ // layout_right case
+ using dst_mapping_t = typename layout_right::mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{dst_mapping_t(dst_ext),
+ offset};
+ } else if constexpr (deduce_layout::layout_right_padded_value()) {
+ constexpr size_t S_static = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::compute_s_static_layout_left<Extents, deduce_layout::gap_len, Extents::static_extent(Extents::rank() - 1)>::value(std::make_index_sequence<Extents::rank()>());
+ using dst_mapping_t = typename MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_right_padded<S_static>::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{
+ dst_mapping_t(dst_ext,
+ stride(src_ext_t::rank() - 2 - deduce_layout::gap_len)),
+ offset};
+ } else {
+ // layout_stride case
+ using dst_mapping_t = typename layout_stride::mapping<dst_ext_t>;
+ auto inv_map = detail::inv_map_rank(std::integral_constant<size_t, 0>(),
+ std::index_sequence<>(), slices...);
+ return submdspan_mapping_result<dst_mapping_t> {
+ dst_mapping_t(mdspan_non_standard, dst_ext,
+ detail::construct_sub_strides(
+ *this, inv_map,
+// HIP needs deduction guides to have markups so we need to be explicit
+// NVCC 11.0 has a bug with deduction guide here, tested that 11.2 does not have
+// the issue but Clang-CUDA also doesn't accept the use of deduction guide so
+// disable it for CUDA altogether
+#if defined(_MDSPAN_HAS_HIP) || defined(_MDSPAN_HAS_CUDA)
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple<decltype(detail::stride_of(slices))...>{
+ detail::stride_of(slices)...}).values),
+#else
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple{detail::stride_of(slices)...}).values),
+#endif
+ offset
+ };
+ }
+#if defined(__NVCC__) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
+ __builtin_unreachable();
+#endif
+}
+
+template <size_t PaddingValue>
+template <class Extents>
+template <class... SliceSpecifiers>
+MDSPAN_INLINE_FUNCTION constexpr auto
+MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_right_padded<PaddingValue>::mapping<Extents>::submdspan_mapping_impl(
+ SliceSpecifiers... slices) const {
+
+ // compute sub extents
+ using src_ext_t = Extents;
+ auto dst_ext = submdspan_extents(extents(), slices...);
+ using dst_ext_t = decltype(dst_ext);
+
+ if constexpr (Extents::rank() == 0) { // rank-0 case
+ using dst_mapping_t = typename MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_right_padded<PaddingValue>::template mapping<Extents>;
+ return submdspan_mapping_result<dst_mapping_t>{*this, 0};
+ } else {
+ // Figure out if any slice's lower bound equals the corresponding extent.
+ // If so, bypass evaluating the layout mapping. This fixes LWG Issue 4060.
+ // figure out sub layout type
+ const bool out_of_bounds =
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::any_slice_out_of_bounds(this->extents(), slices...);
+ auto offset = static_cast<size_t>(
+ out_of_bounds ? this->required_span_size()
+ : this->operator()(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::first_of(slices)...));
+ if constexpr (dst_ext_t::rank() == 0) { // result rank-0
+ // Same issue as in layout_left_padded: see comment there
+ // using dst_mapping_t = typename layout_right::template mapping<dst_ext_t>;
+ // return submdspan_mapping_result<dst_mapping_t>{dst_mapping_t{dst_ext}, offset};
+ return submdspan_mapping_result<typename layout_right::template mapping<dst_ext_t>>
+ {typename layout_right::template mapping<dst_ext_t>{dst_ext}, offset};
+ } else { // general case
+ using deduce_layout = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::deduce_layout_right_submapping<
+ typename dst_ext_t::index_type, dst_ext_t::rank(),
+ decltype(std::make_index_sequence<src_ext_t::rank()>()),
+ SliceSpecifiers...>;
+
+ if constexpr (deduce_layout::layout_right_value() && dst_ext_t::rank() == 1) { // getting rank-1 from rightmost
+ using dst_mapping_t = typename layout_right::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{dst_mapping_t{dst_ext}, offset};
+ } else if constexpr (deduce_layout::layout_right_padded_value()) { // can keep layout_right_padded
+ constexpr size_t S_static = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::compute_s_static_layout_right<Extents, deduce_layout::gap_len, static_padding_stride>::value(std::make_index_sequence<Extents::rank()>());
+ using dst_mapping_t = typename MDSPAN_IMPL_PROPOSED_NAMESPACE::layout_right_padded<S_static>::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t>{
+ dst_mapping_t(dst_ext, stride(Extents::rank() - 2 - deduce_layout::gap_len)), offset};
+ } else { // layout_stride
+ auto inv_map = MDSPAN_IMPL_STANDARD_NAMESPACE::detail::inv_map_rank(std::integral_constant<size_t, 0>(),
+ std::index_sequence<>(), slices...);
+ using dst_mapping_t = typename layout_stride::template mapping<dst_ext_t>;
+ return submdspan_mapping_result<dst_mapping_t> {
+ dst_mapping_t(mdspan_non_standard, dst_ext,
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::construct_sub_strides(
+ *this, inv_map,
+// HIP needs deduction guides to have markups so we need to be explicit
+// NVCC 11.0 has a bug with deduction guide here, tested that 11.2 does not have
+// the issue but Clang-CUDA also doesn't accept the use of deduction guide so
+// disable it for CUDA alltogether
+#if defined(_MDSPAN_HAS_HIP) || defined(_MDSPAN_HAS_CUDA)
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple<decltype(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::stride_of(slices))...>{
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::stride_of(slices)...}).values),
+#else
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple{MDSPAN_IMPL_STANDARD_NAMESPACE::detail::stride_of(slices)...}).values),
+#endif
+ offset
+ };
+ }
+ }
+ }
+
+
+#if defined(__NVCC__) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
+ __builtin_unreachable();
+#endif
+}
+
+//**********************************
+// layout_stride submdspan_mapping
+//*********************************
+template <class Extents>
+template <class... SliceSpecifiers>
+MDSPAN_INLINE_FUNCTION constexpr auto
+layout_stride::mapping<Extents>::submdspan_mapping_impl(
+ SliceSpecifiers... slices) const {
+ auto dst_ext = submdspan_extents(extents(), slices...);
+ using dst_ext_t = decltype(dst_ext);
+ auto inv_map = detail::inv_map_rank(std::integral_constant<size_t, 0>(),
+ std::index_sequence<>(), slices...);
+ using dst_mapping_t = typename layout_stride::template mapping<dst_ext_t>;
+
+ // Figure out if any slice's lower bound equals the corresponding extent.
+ // If so, bypass evaluating the layout mapping. This fixes LWG Issue 4060.
+ const bool out_of_bounds =
+ detail::any_slice_out_of_bounds(this->extents(), slices...);
+ auto offset = static_cast<size_t>(
+ out_of_bounds ? this->required_span_size()
+ : this->operator()(detail::first_of(slices)...));
+
+ return submdspan_mapping_result<dst_mapping_t> {
+ dst_mapping_t(mdspan_non_standard, dst_ext,
+ detail::construct_sub_strides(
+ *this, inv_map,
+// HIP needs deduction guides to have markups so we need to be explicit
+// NVCC 11.0 has a bug with deduction guide here, tested that 11.2 does not have
+// the issue but Clang-CUDA also doesn't accept the use of deduction guide so
+// disable it for CUDA alltogether
+#if defined(_MDSPAN_HAS_HIP) || defined(_MDSPAN_HAS_CUDA)
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple<decltype(detail::stride_of(slices))...>(
+ detail::stride_of(slices)...)).values),
+#else
+ MDSPAN_IMPL_STANDARD_NAMESPACE::detail::tuple(detail::stride_of(slices)...)).values),
+#endif
+ offset
+ };
+}
+
+} // namespace MDSPAN_IMPL_STANDARD_NAMESPACE
+
+#if defined __NVCC__
+#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__
+#pragma nv_diagnostic pop
+#else
+#ifdef __CUDA_ARCH__
+#pragma diagnostic pop
+#endif
+#endif
+#elif defined __NVCOMPILER
+#pragma diagnostic pop
+#endif
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+//
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include <cassert>
+#include "layout_padded_fwd.hpp"
+#include "../__p0009_bits/dynamic_extent.hpp"
+#include "../__p0009_bits/extents.hpp"
+#include "../__p0009_bits/mdspan.hpp"
+#include "../__p0009_bits/layout_left.hpp"
+#include "../__p0009_bits/layout_right.hpp"
+#include "../__p0009_bits/layout_stride.hpp"
+#include "../__p0009_bits/utility.hpp"
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace MDSPAN_IMPL_PROPOSED_NAMESPACE {
+
+namespace detail {
+template<class _T>
+MDSPAN_INLINE_FUNCTION
+constexpr _T
+find_next_multiple(_T alignment, _T offset)
+{
+ if ( alignment == 0 ) {
+ return _T(0);
+ } else {
+ return ( ( offset + alignment - 1 ) / alignment) * alignment;
+ }
+}
+
+template <class _ExtentsType, size_t _PaddingValue, size_t _ExtentToPadIdx>
+MDSPAN_INLINE_FUNCTION constexpr size_t get_actual_static_padding_value() {
+ constexpr auto rank = _ExtentsType::rank();
+
+ if constexpr (rank <= typename _ExtentsType::rank_type(1)) {
+ return 0;
+ } else if constexpr (_PaddingValue != dynamic_extent &&
+ _ExtentsType::static_extent(_ExtentToPadIdx) !=
+ dynamic_extent) {
+ static_assert(
+ (_PaddingValue != 0) ||
+ (_ExtentsType::static_extent(_ExtentToPadIdx) == 0),
+ "padding stride can be 0 only if "
+ "extents_type::static_extent(extent-to-pad) is 0 or dynamic_extent");
+ return find_next_multiple(_PaddingValue,
+ _ExtentsType::static_extent(_ExtentToPadIdx));
+ } else {
+ return dynamic_extent;
+ }
+ // Missing return statement warning from NVCC and ICC
+#if (defined(__NVCC__) || defined(__INTEL_COMPILER)) && !defined(__NVCOMPILER)
+ return 0;
+#endif
+}
+
+template <size_t _PaddingValue, typename _Extents, size_t _ExtentToPadIdx, size_t _Rank, typename Enabled = void>
+struct static_array_type_for_padded_extent
+{
+ static constexpr size_t padding_value = _PaddingValue;
+ using index_type = typename _Extents::index_type;
+ using extents_type = _Extents;
+ using type = ::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::maybe_static_array<
+ index_type, size_t, dynamic_extent,
+ ::MDSPAN_IMPL_STANDARD_NAMESPACE::MDSPAN_IMPL_PROPOSED_NAMESPACE::detail::get_actual_static_padding_value<extents_type, _PaddingValue,
+ _ExtentToPadIdx>()>;
+};
+
+template <size_t _PaddingValue, typename _Extents, size_t _ExtentToPadIdx, size_t Rank>
+struct static_array_type_for_padded_extent<_PaddingValue, _Extents,
+ _ExtentToPadIdx, Rank, std::enable_if_t<Rank <= 1>> {
+ using index_type = typename _Extents::index_type;
+ using extents_type = _Extents;
+ using type =
+ ::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::maybe_static_array<
+ index_type, size_t, dynamic_extent, 0>;
+};
+
+template <size_t _PaddingValue, typename _Extents, size_t _ExtentToPadIdx>
+struct padded_extent {
+ static constexpr size_t padding_value = _PaddingValue;
+ using index_type = typename _Extents::index_type;
+ using extents_type = _Extents;
+ using static_array_type = typename static_array_type_for_padded_extent<
+ padding_value, _Extents, _ExtentToPadIdx, _Extents::rank()>::type;
+
+ MDSPAN_INLINE_FUNCTION
+ static constexpr auto static_value() { return static_array_type::static_value(0); }
+
+ MDSPAN_INLINE_FUNCTION
+ static constexpr static_array_type
+ init_padding(const _Extents &exts) {
+ if constexpr ((_Extents::rank() > 1) && (padding_value == dynamic_extent)) {
+ return {exts.extent(_ExtentToPadIdx)};
+ } else {
+ return init_padding(exts, padding_value);
+ }
+ // Missing return statement warning from NVCC and ICC
+#if (defined(__NVCC__) || defined(__INTEL_COMPILER)) && !defined(__NVCOMPILER)
+ return {};
+#endif
+ }
+
+ MDSPAN_INLINE_FUNCTION static constexpr static_array_type
+ init_padding([[maybe_unused]] const _Extents &exts,
+ [[maybe_unused]] index_type pv) {
+ if constexpr (_Extents::rank() > 1) {
+ return {find_next_multiple(pv,
+ exts.extent(_ExtentToPadIdx))};
+ } else {
+ return {};
+ }
+ // Missing return statement warning from NVCC and ICC
+#if (defined(__NVCC__) || defined(__INTEL_COMPILER)) && !defined(__NVCOMPILER)
+ return {};
+#endif
+ }
+
+ template <typename _Mapping, size_t _PaddingStrideIdx>
+ MDSPAN_INLINE_FUNCTION static constexpr static_array_type
+ init_padding([[maybe_unused]] const _Mapping &other_mapping,
+ std::integral_constant<size_t, _PaddingStrideIdx>) {
+ if constexpr (_Extents::rank() > 1) {
+ return {other_mapping.stride(_PaddingStrideIdx)};
+ } else {
+ return {};
+ }
+ // Missing return statement warning from NVCC and ICC
+#if (defined(__NVCC__) || defined(__INTEL_COMPILER)) && !defined(__NVCOMPILER)
+ return {};
+#endif
+ }
+};
+} // namespace detail
+
+template <size_t PaddingValue>
+template <class Extents>
+class layout_left_padded<PaddingValue>::mapping {
+public:
+ static constexpr size_t padding_value = PaddingValue;
+
+ using extents_type = Extents;
+ using index_type = typename extents_type::index_type;
+ using size_type = typename extents_type::size_type;
+ using rank_type = typename extents_type::rank_type;
+ using layout_type = layout_left_padded<padding_value>;
+
+#ifndef MDSPAN_INTERNAL_TEST
+private:
+#endif // MDSPAN_INTERNAL_TEST
+
+ static constexpr rank_type padded_stride_idx = detail::layout_padded_constants<layout_type, extents_type>::padded_stride_idx;
+ static constexpr rank_type extent_to_pad_idx = detail::layout_padded_constants<layout_type, extents_type>::extent_to_pad_idx;
+
+ static_assert((padding_value != 0)
+ || (extents_type::static_extent(extent_to_pad_idx) == 0)
+ || (extents_type::static_extent(extent_to_pad_idx) == dynamic_extent),
+ "out of bounds access for rank 0");
+
+ using padded_stride_type = detail::padded_extent< padding_value, extents_type, extent_to_pad_idx >;
+
+ static constexpr size_t static_padding_stride = padded_stride_type::static_value();
+
+ typename padded_stride_type::static_array_type padded_stride = {};
+ extents_type exts = {};
+
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ compute_offset(std::index_sequence<>) const {
+ return 0;
+ }
+
+ template <size_t Rank, class IndexOffset>
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ compute_offset(std::index_sequence<Rank>, IndexOffset index_offset) const {
+ return index_offset;
+ }
+
+ template <size_t... Ranks, class... IndexOffsets>
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ compute_offset(std::index_sequence<Ranks...>,
+ IndexOffsets... index_offsets) const {
+ index_type indices[] = {static_cast<index_type>(index_offsets)...};
+ // self-recursive fold trick from
+ // https://github.com/llvm/llvm-project/blob/96e1914aa2e6d8966acbfbe2f4d184201f1aa318/libcxx/include/mdspan/layout_left.h#L144
+ index_type res = 0;
+ ((res = indices[extents_type::rank() - 1 - Ranks] +
+ ((extents_type::rank() - 1 - Ranks) == extent_to_pad_idx
+ ? padded_stride.value(0)
+ : exts.extent(extents_type::rank() - 1 - Ranks)) *
+ res),
+ ...);
+ return res;
+ }
+
+public:
+#if !MDSPAN_HAS_CXX_20 || defined(__NVCC__)
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping()
+ : mapping(extents_type{})
+ {}
+#else
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr mapping()
+ requires(static_padding_stride != dynamic_extent) = default;
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping()
+ requires(static_padding_stride == dynamic_extent)
+ : mapping(extents_type{})
+ {}
+#endif
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(const mapping&) noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping& operator=(const mapping&) noexcept = default;
+
+ /**
+ * Initializes the mapping with the given extents.
+ *
+ * \param ext the given extents
+ */
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const extents_type& ext)
+ : padded_stride(padded_stride_type::init_padding(ext)), exts(ext)
+ {}
+
+ /**
+ * Initializes the mapping with the given extents and the specified padding value.
+ *
+ * This overload participates in overload resolution only if `is_convertible_v<Size, index_type>`
+ * is `true` and `is_nothrow_constructible_v<index_type, Size>` is `true`
+ *
+ * \param ext the given extents
+ * \param padding_value the padding value
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Size,
+ /* requires */ (
+ std::is_convertible_v<_Size, index_type>
+ && std::is_nothrow_constructible_v<index_type, _Size>
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const extents_type &ext, _Size dynamic_padding_value)
+ : padded_stride(padded_stride_type::init_padding(ext, dynamic_padding_value)), exts(ext)
+ {
+ assert((padding_value == dynamic_extent) || (static_cast<index_type>(padding_value) == static_cast<index_type>(dynamic_padding_value)));
+ }
+
+ /**
+ * Converting constructor from `layout_left::mapping`.
+ *
+ * This overload participates in overload resolution only if
+ * `is_constructible_v<extents_type, OtherExtents>` is true. If
+ * `OtherExtents::rank() > 1` then one of `padding_value`, `static_extent(0)`,
+ * or `OtherExtents::static_extent(0)` must be `dynamic_extent`; otherwise,
+ * `OtherExtents::static_extent(0)` must be equal to the least multiple of
+ * `padding_value` greater than or equal to `extents_type::static_extent(0)`
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _OtherExtents,
+ /* requires */ (std::is_constructible_v<extents_type, _OtherExtents>))
+ MDSPAN_CONDITIONAL_EXPLICIT(
+ (!std::is_convertible_v<_OtherExtents, extents_type>))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const layout_left::mapping<_OtherExtents> &other_mapping)
+ : padded_stride(padded_stride_type::init_padding(
+ other_mapping,
+ std::integral_constant<size_t, padded_stride_idx>{})),
+ exts(other_mapping.extents()) {
+ static_assert(
+ (_OtherExtents::rank() > 1) ||
+ (static_padding_stride != dynamic_extent) ||
+ (_OtherExtents::static_extent(extent_to_pad_idx) != dynamic_extent) ||
+ (static_padding_stride ==
+ _OtherExtents::static_extent(extent_to_pad_idx)));
+ }
+
+ /**
+ * Converting constructor from `layout_stride::mapping`.
+ *
+ * This overload participates in overload resolution only if
+ * `is_constructible_v<extents_type, OtherExtents>` is true
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _OtherExtents,
+ /* requires */ (std::is_constructible_v<extents_type, _OtherExtents>))
+ MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const layout_stride::mapping<_OtherExtents> &other_mapping)
+ : padded_stride(padded_stride_type::init_padding(
+ other_mapping,
+ std::integral_constant<size_t, padded_stride_idx>{})),
+ exts(other_mapping.extents()) {}
+
+ /**
+ * Converting constructor from `layout_left_padded::mapping`.
+ *
+ * This overload participates in overload resolution only if
+ * `is_constructible_v<extents_type, OtherExtents>` is true. Either
+ * `padding_value` or `OtherPaddingStride` must be `std::dynamic_extent`, or
+ * `padding_value == OtherPaddingStride`.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_left_padded_mapping<_Mapping>::value
+ &&std::is_constructible_v<
+ extents_type, typename _Mapping::extents_type>))
+ MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 1 &&
+ (padding_value == dynamic_extent ||
+ _Mapping::padding_value == dynamic_extent)))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const _Mapping &other_mapping)
+ : padded_stride(padded_stride_type::init_padding(
+ other_mapping,
+ std::integral_constant<size_t, padded_stride_idx>{})),
+ exts(other_mapping.extents()) {
+ static_assert(padding_value == dynamic_extent ||
+ _Mapping::padding_value == dynamic_extent ||
+ padding_value == _Mapping::padding_value);
+ }
+
+ /**
+ * Converting constructor from `layout_right_padded::mapping`.
+ *
+ * This overload participates in overload resolution only if
+ * `extents_type::rank()` is 0 or 1 and `is_constructible_v<extents_type,
+ * OtherExtents>` is `true`.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_right_padded_mapping<_Mapping>::value
+ &&extents_type::rank() <= 1 &&
+ std::is_constructible_v<extents_type,
+ typename _Mapping::extents_type>))
+ MDSPAN_CONDITIONAL_EXPLICIT(
+ (!std::is_convertible_v<typename _Mapping::extents_type, extents_type>))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const _Mapping &other_mapping) noexcept
+ : padded_stride(padded_stride_type::init_padding(
+ static_cast<extents_type>(other_mapping.extents()),
+ other_mapping.extents().extent(extent_to_pad_idx))),
+ exts(other_mapping.extents()) {}
+
+ MDSPAN_INLINE_FUNCTION constexpr const extents_type &
+ extents() const noexcept {
+ return exts;
+ }
+
+ constexpr std::array<index_type, extents_type::rank()>
+ strides() const noexcept {
+ if constexpr (extents_type::rank() == 0) {
+ return {};
+ } else if constexpr (extents_type::rank() == 1) {
+ return {1};
+ } else {
+ index_type value = 1;
+ std::array<index_type, extents_type::rank()> s{};
+ s[extent_to_pad_idx] = value;
+ value *= padded_stride.value(0);
+ for (rank_type r = extent_to_pad_idx + 1; r < extents_type::rank() - 1;
+ ++r) {
+ s[r] = value;
+ value *= exts.extent(r);
+ }
+ s[extents_type::rank() - 1] = value;
+ return s;
+ }
+ }
+
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ required_span_size() const noexcept {
+ if constexpr (extents_type::rank() == 0) {
+ return 1;
+ } else if constexpr (extents_type::rank() == 1) {
+ return exts.extent(0);
+ } else {
+ index_type value = padded_stride.value(0);
+ for (rank_type r = 1; r < extents_type::rank(); ++r) {
+ value *= exts.extent(r);
+ }
+ return value;
+ }
+ }
+
+ /**
+ * Return the mapping given the provided indices per rank.
+ *
+ * This overload participates in overload resolution only if:
+ * - `sizeof...(Indices) == extents_type::rank()`,
+ * - `(is_convertible_v<Indices, index_type> && ...) is true`, and
+ * - (is_nothrow_constructible_v<index_type, Indices> && ...) is true.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... _Indices,
+ /* requires */ (sizeof...(_Indices) == extents_type::rank() &&
+ (::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::
+ are_valid_indices<index_type, _Indices...>())))
+ MDSPAN_INLINE_FUNCTION constexpr size_t
+ operator()(_Indices... idxs) const noexcept {
+#if !defined(NDEBUG)
+ ::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::check_all_indices(this->extents(),
+ idxs...);
+#endif // ! NDEBUG
+ return compute_offset(std::index_sequence_for<_Indices...>{}, idxs...);
+ }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept {
+ return true;
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept {
+ return (extents_type::rank() <= rank_type(1)) ||
+ (extents_type::static_extent(extent_to_pad_idx) != dynamic_extent &&
+ extents_type::static_extent(extent_to_pad_idx) ==
+ padded_stride_type::static_value());
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept {
+ return true;
+ }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_unique() noexcept {
+ return true;
+ }
+ MDSPAN_INLINE_FUNCTION constexpr bool is_exhaustive() const noexcept {
+ return (extents_type::rank() < 2) ||
+ (exts.extent(extent_to_pad_idx) == padded_stride.value(0));
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_strided() noexcept {
+ return true;
+ }
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr index_type stride(rank_type r) const noexcept {
+ assert(r < extents_type::rank());
+ if (r == 0)
+ return index_type(1);
+
+ index_type value = padded_stride.value(0);
+ for (rank_type k = 1; k < r; k++)
+ value *= exts.extent(k);
+
+ return value;
+ }
+
+ /**
+ * Equality operator between `layout_left_padded`s
+ *
+ * This overload only participates in overload resolution if
+ * `OtherExtents::rank() == extents_type::rank()`.
+ *
+ * \note There is currently a difference from p2642r2, where this function is
+ * specified as taking `layout_left_padded< padding_value >::mapping<
+ * Extents>`. However, this makes `padding_value` non-deducible.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_left_padded_mapping<_Mapping>::value &&
+ (_Mapping::extents_type::rank() == extents_type::rank())))
+ MDSPAN_INLINE_FUNCTION friend constexpr bool
+ operator==(const mapping &left, const _Mapping &right) noexcept {
+ // Workaround for some compilers not short-circuiting properly with
+ // compile-time checks i.e. we can't access stride(_padding_stride_idx) of a
+ // rank 0 mapping
+ bool strides_equal = true;
+ if constexpr (extents_type::rank() > rank_type(1)) {
+ strides_equal =
+ left.stride(padded_stride_idx) == right.stride(padded_stride_idx);
+ }
+ return (left.extents() == right.extents()) && strides_equal;
+ }
+
+#if !MDSPAN_HAS_CXX_20
+ /**
+ * Inequality operator between `layout_left_padded`s
+ *
+ * This overload only participates in overload resolution if
+ * `OtherExtents::rank() == extents_type::rank()`.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_left_padded_mapping<_Mapping>::value &&
+ (_Mapping::extents_type::rank() == extents_type::rank())))
+ MDSPAN_INLINE_FUNCTION friend constexpr bool
+ operator!=(const mapping &left, const _Mapping &right) noexcept {
+ return !(left == right);
+ }
+#endif
+
+ // [mdspan.submdspan.mapping], submdspan mapping specialization
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ constexpr auto submdspan_mapping_impl(
+ SliceSpecifiers... slices) const;
+
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr auto submdspan_mapping(
+ const mapping& src, SliceSpecifiers... slices) {
+ return src.submdspan_mapping_impl(slices...);
+ }
+};
+
+template <size_t PaddingValue>
+template <class Extents>
+class layout_right_padded<PaddingValue>::mapping {
+public:
+ static constexpr size_t padding_value = PaddingValue;
+
+ using extents_type = Extents;
+ using index_type = typename extents_type::index_type;
+ using size_type = typename extents_type::size_type;
+ using rank_type = typename extents_type::rank_type;
+ using layout_type = layout_right_padded<padding_value>;
+
+#ifndef MDSPAN_INTERNAL_TEST
+ private:
+#endif // MDSPAN_INTERNAL_TEST
+
+ static constexpr rank_type padded_stride_idx = detail::layout_padded_constants<layout_type, extents_type>::padded_stride_idx;
+ static constexpr rank_type extent_to_pad_idx = detail::layout_padded_constants<layout_type, extents_type>::extent_to_pad_idx;
+
+ static_assert((padding_value != 0)
+ || (extents_type::static_extent(extent_to_pad_idx) == 0)
+ || (extents_type::static_extent(extent_to_pad_idx) == dynamic_extent),
+ "if padding stride is 0, static_extent(extent-to-pad-rank) must also be 0 or dynamic_extent");
+
+ using padded_stride_type = detail::padded_extent< padding_value, extents_type, extent_to_pad_idx >;
+ static constexpr size_t static_padding_stride = padded_stride_type::static_value();
+
+ typename padded_stride_type::static_array_type padded_stride = {};
+ extents_type exts = {};
+
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ compute_offset(std::index_sequence<>) const {
+ return 0;
+ }
+
+ template <size_t Rank, class IndexOffset>
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ compute_offset(std::index_sequence<Rank>, IndexOffset index_offset) const {
+ return index_offset;
+ }
+
+ template <size_t... Ranks, class... IndexOffsets>
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ compute_offset(std::index_sequence<Ranks...>,
+ IndexOffsets... index_offsets) const {
+ // self-recursive fold trick from
+ // https://github.com/llvm/llvm-project/blob/4d9771741d40cc9cfcccb6b033f43689d36b705a/libcxx/include/mdspan/layout_right.h#L141
+ index_type res = 0;
+ ((res = static_cast<index_type>(index_offsets) +
+ (Ranks == extent_to_pad_idx ? padded_stride.value(0)
+ : exts.extent(Ranks)) *
+ res),
+ ...);
+ return res;
+ }
+
+public:
+#if !MDSPAN_HAS_CXX_20 || defined(__NVCC__)
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping()
+ : mapping(extents_type{})
+ {}
+#else
+ MDSPAN_INLINE_FUNCTION_DEFAULTED
+ constexpr mapping()
+ requires(static_padding_stride != dynamic_extent) = default;
+
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping()
+ requires(static_padding_stride == dynamic_extent)
+ : mapping(extents_type{})
+ {}
+#endif
+
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping(const mapping&) noexcept = default;
+ MDSPAN_INLINE_FUNCTION_DEFAULTED constexpr mapping& operator=(const mapping&) noexcept = default;
+
+ /**
+ * Initializes the mapping with the given extents.
+ *
+ * \param ext the given extents
+ */
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const extents_type &ext)
+ : padded_stride(padded_stride_type::init_padding(ext)), exts(ext) {}
+
+ /**
+ * Initializes the mapping with the given extents and the specified padding value.
+ *
+ * This overload participates in overload resolution only if `is_convertible_v<Size, index_type>`
+ * is `true` and `is_nothrow_constructible_v<index_type, Size>` is `true`
+ *
+ * \param ext the given extents
+ * \param padding_value the padding value
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Size,
+ /* requires */ (
+ std::is_convertible_v<_Size, index_type>
+ && std::is_nothrow_constructible_v<index_type, _Size>
+ )
+ )
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const extents_type &ext, _Size dynamic_padding_value)
+ : padded_stride(padded_stride_type::init_padding(ext, static_cast<index_type>(dynamic_padding_value))),
+ exts(ext) {
+ assert((padding_value == dynamic_extent) ||
+ (static_cast<index_type>(padding_value) == static_cast<index_type>(dynamic_padding_value)));
+ }
+
+ /**
+ * Converting constructor from `layout_right::mapping`.
+ *
+ * This overload participates in overload resolution only if `is_constructible_v<extents_type, OtherExtents>` is true.
+ * If `OtherExtents::rank() > 1` then one of `padding_value`, `static_extent(0)`, or `OtherExtents::static_extent(0)` must be `dynamic_extent`;
+ * otherwise, `OtherExtents::static_extent(0)` must be equal to the least multiple of `padding_value` greater than or equal to `extents_type::static_extent(0)`
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _OtherExtents,
+ /* requires */ (std::is_constructible_v<extents_type, _OtherExtents>))
+ MDSPAN_CONDITIONAL_EXPLICIT(
+ (!std::is_convertible_v<_OtherExtents, extents_type>))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const layout_right::mapping<_OtherExtents> &other_mapping)
+ : padded_stride(padded_stride_type::init_padding(
+ other_mapping,
+ std::integral_constant<size_t, padded_stride_idx>{})),
+ exts(other_mapping.extents()) {
+ static_assert(
+ (_OtherExtents::rank() > 1) ||
+ (padded_stride_type::static_value() != dynamic_extent) ||
+ (_OtherExtents::static_extent(extent_to_pad_idx) != dynamic_extent) ||
+ (padded_stride_type::static_value() ==
+ _OtherExtents::static_extent(extent_to_pad_idx)));
+ }
+
+ /**
+ * Converting constructor from `layout_stride::mapping`.
+ *
+ * This overload participates in overload resolution only if
+ * `is_constructible_v<extents_type, OtherExtents>` is true
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _OtherExtents,
+ /* requires */ (std::is_constructible_v<extents_type, _OtherExtents>))
+ MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const layout_stride::mapping<_OtherExtents> &other_mapping)
+ : padded_stride(padded_stride_type::init_padding(
+ other_mapping,
+ std::integral_constant<size_t, padded_stride_idx>{})),
+ exts(other_mapping.extents()) {}
+
+ /**
+ * Converting constructor from `layout_right_padded::mapping`.
+ *
+ * This overload participates in overload resolution only if
+ * `is_constructible_v<extents_type, OtherExtents>` is true. Either
+ * `padding_value` or `OtherPaddingStride` must be `std::dynamic_extent`, or
+ * `padding_value == OtherPaddingStride`.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_right_padded_mapping<_Mapping>::value
+ &&std::is_constructible_v<
+ extents_type, typename _Mapping::extents_type>))
+ MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 1 &&
+ (padding_value == dynamic_extent ||
+ _Mapping::padding_value == dynamic_extent)))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const _Mapping &other_mapping)
+ : padded_stride(padded_stride_type::init_padding(
+ other_mapping,
+ std::integral_constant<size_t, padded_stride_idx>{})),
+ exts(other_mapping.extents()) {
+ static_assert(padding_value == dynamic_extent ||
+ _Mapping::padding_value == dynamic_extent ||
+ padding_value == _Mapping::padding_value);
+ }
+
+ /**
+ * Converting constructor from `layout_left_padded::mapping`.
+ *
+ * This overload participates in overload resolution only if
+ * `extents_type::rank()` is 0 or 1 and `is_constructible_v<extents_type,
+ * OtherExtents>` is `true`.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_left_padded_mapping<_Mapping>::value
+ &&extents_type::rank() <= 1 &&
+ std::is_constructible_v<extents_type,
+ typename _Mapping::extents_type>))
+ MDSPAN_CONDITIONAL_EXPLICIT(
+ (!std::is_convertible_v<typename _Mapping::extents_type, extents_type>))
+ MDSPAN_INLINE_FUNCTION
+ constexpr mapping(const _Mapping &other_mapping) noexcept
+ : padded_stride(padded_stride_type::init_padding(
+ static_cast<extents_type>(other_mapping.extents()),
+ other_mapping.extents().extent(extent_to_pad_idx))),
+ exts(other_mapping.extents()) {}
+
+ MDSPAN_INLINE_FUNCTION constexpr const extents_type &
+ extents() const noexcept {
+ return exts;
+ }
+
+ constexpr std::array<index_type, extents_type::rank()>
+ strides() const noexcept {
+ if constexpr (extents_type::rank() == 0) {
+ return {};
+ } else if constexpr (extents_type::rank() == 1) {
+ return {1};
+ } else {
+ index_type value = 1;
+ std::array<index_type, extents_type::rank()> s{};
+ s[extent_to_pad_idx] = value;
+ value *= padded_stride.value(0);
+ for (rank_type r = extent_to_pad_idx - 1; r > 0; --r) {
+ s[r] = value;
+ value *= exts.extent(r);
+ }
+ s[0] = value;
+ return s;
+ }
+ }
+
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ required_span_size() const noexcept {
+ if constexpr (extents_type::rank() == 0) {
+ return 1;
+ } else if constexpr (extents_type::rank() == 1) {
+ return exts.extent(0);
+ } else {
+ index_type value = 1;
+ for (rank_type r = 0; r < extent_to_pad_idx; ++r) {
+ value *= exts.extent(r);
+ }
+ return value * padded_stride.value(0);
+ }
+ }
+
+ /**
+ * Return the mapping given the provided indices per rank.
+ *
+ * This overload participates in overload resolution only if:
+ * - `sizeof...(Indices) == extents_type::rank()`,
+ * - `(is_convertible_v<Indices, index_type> && ...) is true`, and
+ * - (is_nothrow_constructible_v<index_type, Indices> && ...) is true.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class... _Indices,
+ /* requires */ (sizeof...(_Indices) == extents_type::rank() &&
+ (::MDSPAN_IMPL_STANDARD_NAMESPACE::detail::
+ are_valid_indices<index_type, _Indices...>())))
+ MDSPAN_INLINE_FUNCTION constexpr size_t
+ operator()(_Indices... idxs) const noexcept {
+ return compute_offset(std::index_sequence_for<_Indices...>{}, idxs...);
+ }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_unique() noexcept {
+ return true;
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_exhaustive() noexcept {
+ return (extents_type::rank() <= rank_type(1)) ||
+ (extents_type::static_extent(extent_to_pad_idx) != dynamic_extent &&
+ extents_type::static_extent(extent_to_pad_idx) ==
+ padded_stride_type::static_value());
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_always_strided() noexcept {
+ return true;
+ }
+
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_unique() noexcept {
+ return true;
+ }
+ MDSPAN_INLINE_FUNCTION constexpr bool is_exhaustive() const noexcept {
+ return (extents_type::rank() < 2) ||
+ (exts.extent(extent_to_pad_idx) == padded_stride.value(0));
+ }
+ MDSPAN_INLINE_FUNCTION static constexpr bool is_strided() noexcept {
+ return true;
+ }
+
+ MDSPAN_INLINE_FUNCTION constexpr index_type
+ stride(rank_type r) const noexcept {
+ assert(r < extents_type::rank());
+ if (r == extents_type::rank() - 1)
+ return index_type(1);
+
+ index_type value = padded_stride.value(0);
+ for (rank_type k = extents_type::rank() - 2; k > r; k--)
+ value *= exts.extent(k);
+
+ return value;
+ }
+
+ /**
+ * Equality operator between `layout_right_padded`s
+ *
+ * This overload only participates in overload resolution if
+ * `OtherExtents::rank() == extents_type::rank()`.
+ *
+ * \note There is currently a difference from p2642r2, where this function is
+ * specified as taking `layout_right_padded< padding_value >::mapping<
+ * Extents>`. However, this makes `padding_value` non-deducible.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_right_padded_mapping<_Mapping>::value &&
+ (_Mapping::extents_type::rank() == extents_type::rank())))
+ MDSPAN_INLINE_FUNCTION friend constexpr bool
+ operator==(const mapping &left, const _Mapping &right) noexcept {
+ // Workaround for some compilers not short-circuiting properly with
+ // compile-time checks i.e. we can't access stride(_padding_stride_idx) of a
+ // rank 0 mapping
+ bool strides_equal = true;
+ if constexpr (extents_type::rank() > rank_type(1)) {
+ strides_equal =
+ left.stride(padded_stride_idx) == right.stride(padded_stride_idx);
+ }
+ return (left.extents() == right.extents()) && strides_equal;
+ }
+
+#if !MDSPAN_HAS_CXX_20
+ /**
+ * Inequality operator between `layout_right_padded`s
+ *
+ * This overload only participates in overload resolution if
+ * `OtherExtents::rank() == extents_type::rank()`.
+ */
+ MDSPAN_TEMPLATE_REQUIRES(
+ class _Mapping,
+ /* requires */ (detail::is_layout_right_padded_mapping<_Mapping>::value &&
+ (_Mapping::extents_type::rank() == extents_type::rank())))
+ MDSPAN_INLINE_FUNCTION friend constexpr bool
+ operator!=(const mapping &left, const _Mapping &right) noexcept {
+ return !(left == right);
+ }
+#endif
+
+ // [mdspan.submdspan.mapping], submdspan mapping specialization
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ constexpr auto submdspan_mapping_impl(
+ SliceSpecifiers... slices) const;
+
+ template<class... SliceSpecifiers>
+ MDSPAN_INLINE_FUNCTION
+ friend constexpr auto submdspan_mapping(
+ const mapping& src, SliceSpecifiers... slices) {
+ return src.submdspan_mapping_impl(slices...);
+ }
+};
+}
+}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+//
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+#pragma once
+
+#include <cassert>
+#include "../__p0009_bits/dynamic_extent.hpp"
+#include "../__p0009_bits/utility.hpp"
+
+namespace MDSPAN_IMPL_STANDARD_NAMESPACE {
+namespace MDSPAN_IMPL_PROPOSED_NAMESPACE {
+
+template <size_t padding_value = dynamic_extent>
+struct layout_left_padded {
+ template <class _Extents>
+ class mapping;
+};
+
+template <size_t padding_value = dynamic_extent>
+struct layout_right_padded {
+ template <class _Extents>
+ class mapping;
+};
+
+namespace detail {
+// The layout_padded_constants structs are only useful if rank > 1, otherwise they may wrap
+template <class _Layout, class _ExtentsType>
+struct layout_padded_constants;
+
+template <class _ExtentsType, size_t _PaddingStride>
+struct layout_padded_constants<layout_left_padded<_PaddingStride>, _ExtentsType>
+{
+ using rank_type = typename _ExtentsType::rank_type;
+ static constexpr rank_type padded_stride_idx = 1;
+ static constexpr rank_type extent_to_pad_idx = 0;
+};
+
+template <class _ExtentsType, size_t _PaddingStride>
+struct layout_padded_constants<layout_right_padded<_PaddingStride>, _ExtentsType>
+{
+ using rank_type = typename _ExtentsType::rank_type;
+ static constexpr rank_type padded_stride_idx = _ExtentsType::rank() - 2;
+ static constexpr rank_type extent_to_pad_idx = _ExtentsType::rank() - 1;
+};
+
+template <class _Layout>
+struct is_layout_left_padded : std::false_type {};
+
+template <size_t _PaddingStride>
+struct is_layout_left_padded<layout_left_padded<_PaddingStride>> : std::true_type {};
+
+template <class _Mapping, class _Enabled = void>
+struct is_layout_left_padded_mapping : std::false_type {};
+
+template <class _Mapping>
+struct is_layout_left_padded_mapping<_Mapping,
+ std::enable_if_t<std::is_same<_Mapping, typename layout_left_padded<_Mapping::padding_value>::template mapping<typename _Mapping::extents_type>>::value>>
+ : std::true_type {};
+
+template <class _Layout>
+struct is_layout_right_padded : std::false_type {};
+
+template <size_t _PaddingStride>
+struct is_layout_right_padded<layout_right_padded<_PaddingStride>> : std::true_type {};
+
+template <class _Mapping, class _Enabled = void>
+struct is_layout_right_padded_mapping : std::false_type {};
+
+template <class _Mapping>
+struct is_layout_right_padded_mapping<_Mapping,
+ std::enable_if_t<std::is_same<_Mapping, typename layout_right_padded<_Mapping::padding_value>::template mapping<typename _Mapping::extents_type>>::value>>
+ : std::true_type {};
+
+
+template <class _LayoutExtentsType, class _PaddedLayoutMappingType>
+MDSPAN_INLINE_FUNCTION
+constexpr void check_padded_layout_converting_constructor_mandates(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::with_rank<0>) {}
+
+template <class _LayoutExtentsType, class _PaddedLayoutMappingType>
+MDSPAN_INLINE_FUNCTION
+constexpr void check_padded_layout_converting_constructor_mandates(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::with_rank<1>) {}
+
+template <class _LayoutExtentsType, class _PaddedLayoutMappingType, std::size_t N>
+MDSPAN_INLINE_FUNCTION
+constexpr void check_padded_layout_converting_constructor_mandates(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::with_rank<N>)
+{
+ using extents_type = typename _PaddedLayoutMappingType::extents_type;
+ constexpr auto padding_value = _PaddedLayoutMappingType::padding_value;
+ constexpr auto idx = layout_padded_constants<typename _PaddedLayoutMappingType::layout_type, _LayoutExtentsType >::extent_to_pad_idx;
+
+ constexpr auto statically_determinable =
+ (_LayoutExtentsType::static_extent(idx) != dynamic_extent) &&
+ (extents_type::static_extent(idx) != dynamic_extent) &&
+ (padding_value != dynamic_extent);
+
+ static_assert(!statically_determinable ||
+ (padding_value == 0
+ ? _LayoutExtentsType::static_extent(idx) == 0
+ : _LayoutExtentsType::static_extent(idx) % padding_value == 0),
+ "");
+}
+
+template <typename _ExtentsType, typename _OtherMapping>
+MDSPAN_INLINE_FUNCTION
+constexpr void check_padded_layout_converting_constructor_preconditions(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::with_rank<0>,
+ const _OtherMapping&) {}
+template <typename _ExtentsType, typename _OtherMapping>
+MDSPAN_INLINE_FUNCTION
+constexpr void check_padded_layout_converting_constructor_preconditions(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::with_rank<1>,
+ const _OtherMapping&) {}
+template <typename _ExtentsType, typename _OtherMapping, std::size_t N>
+MDSPAN_INLINE_FUNCTION
+constexpr void check_padded_layout_converting_constructor_preconditions(MDSPAN_IMPL_STANDARD_NAMESPACE::detail::with_rank<N>,
+ const _OtherMapping &other_mapping) {
+ constexpr auto padded_stride_idx =
+ layout_padded_constants<typename _OtherMapping::layout_type,
+ _ExtentsType>::padded_stride_idx;
+ constexpr auto extent_to_pad_idx = layout_padded_constants<typename _OtherMapping::layout_type, _ExtentsType>::extent_to_pad_idx;
+ MDSPAN_IMPL_PRECONDITION(other_mapping.stride(padded_stride_idx) == other_mapping.extents().extent(extent_to_pad_idx));
+}
+
+
+}
+}
+}
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef MDARRAY_HPP_
+#define MDARRAY_HPP_
+
+#ifndef MDSPAN_IMPL_STANDARD_NAMESPACE
+ #define MDSPAN_IMPL_STANDARD_NAMESPACE Kokkos
+#endif
+
+#ifndef MDSPAN_IMPL_PROPOSED_NAMESPACE
+ #define MDSPAN_IMPL_PROPOSED_NAMESPACE Experimental
+#endif
+
+#include "mdspan.hpp"
+#include "../experimental/__p1684_bits/mdarray.hpp"
+
+#endif // MDARRAY_HPP_
--- /dev/null
+//@HEADER
+// ************************************************************************
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
+// See https://kokkos.org/LICENSE for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//@HEADER
+
+#ifndef MDSPAN_HPP_
+#define MDSPAN_HPP_
+
+#ifndef MDSPAN_IMPL_STANDARD_NAMESPACE
+ #define MDSPAN_IMPL_STANDARD_NAMESPACE Kokkos
+#endif
+
+#ifndef MDSPAN_IMPL_PROPOSED_NAMESPACE
+ #define MDSPAN_IMPL_PROPOSED_NAMESPACE Experimental
+#endif
+
+#include "../experimental/__p0009_bits/default_accessor.hpp"
+#include "../experimental/__p0009_bits/full_extent_t.hpp"
+#include "../experimental/__p0009_bits/mdspan.hpp"
+#include "../experimental/__p0009_bits/dynamic_extent.hpp"
+#include "../experimental/__p0009_bits/extents.hpp"
+#include "../experimental/__p0009_bits/layout_stride.hpp"
+#include "../experimental/__p0009_bits/layout_left.hpp"
+#include "../experimental/__p0009_bits/layout_right.hpp"
+#include "../experimental/__p0009_bits/macros.hpp"
+#if MDSPAN_HAS_CXX_17
+#include "../experimental/__p2642_bits/layout_padded.hpp"
+#include "../experimental/__p2630_bits/submdspan.hpp"
+#endif
+#include "../experimental/__p2389_bits/dims.hpp"
+
+#endif // MDSPAN_HPP_
"Always use the bundled Kokkos library instead of an external one."
OFF)
-set(KOKKOS_FOLDER "${CMAKE_SOURCE_DIR}/bundled/kokkos-3.7.00")
+set(KOKKOS_FOLDER "${CMAKE_SOURCE_DIR}/bundled/kokkos-4.5.01")
macro(feature_kokkos_configure_bundled)
- set(Kokkos_VERSION "3.7.0")
- set(KOKKOS_VERSION "3.7.0")
- set(KOKKOS_VERSION_MAJOR "3")
- set(KOKKOS_VERSION_MINOR "7")
- set(KOKKOS_VERSION_SUBMINOR "0")
+ set(Kokkos_VERSION "4.5.0")
+ set(KOKKOS_VERSION "4.5.1")
+ set(KOKKOS_VERSION_MAJOR "4")
+ set(KOKKOS_VERSION_MINOR "5")
+ set(KOKKOS_VERSION_SUBMINOR "1")
set(Kokkos_DEVICES "Serial")
set(Kokkos_ARCH " ")